blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64abbd79020cfe186e38c100a66432f254b6f63c
|
835e428d1cbe87adf945897ff75f77e93b500d12
|
/demonstrations/tutorial_qnn_module_torch.py
|
b8b5b8ea0148840cf4f468e8203d1730eb4e4f74
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
quantshah/qml
|
9acb3c932610e30a28369fe72ee49683ac301219
|
45533ef6f6d7b9cfa0384302fe52b5ead772b923
|
refs/heads/master
| 2022-11-30T08:26:12.972709 | 2022-11-18T19:59:59 | 2022-11-18T19:59:59 | 218,805,085 | 0 | 0 |
Apache-2.0
| 2019-10-31T16:02:07 | 2019-10-31T16:02:06 | null |
UTF-8
|
Python
| false | false | 11,188 |
py
|
"""
Turning quantum nodes into Torch Layers
=======================================
.. meta::
:property="og:description": Learn how to create hybrid ML models in PennyLane using Torch
:property="og:image": https://pennylane.ai/qml/_images/PyTorch_icon.png
.. related::
tutorial_qnn_module_tf Turning quantum nodes into Keras Layers
*Author: Tom Bromley — Posted: 02 November 2020. Last updated: 28 January 2021.*
Creating neural networks in `PyTorch <https://pytorch.org/>`__ is easy using the
`nn module <https://pytorch.org/docs/stable/nn.html>`__. Models are constructed from elementary
*layers* and can be trained using the PyTorch API. For example, the following code defines a
two-layer network that could be used for binary classification:
"""
import torch
layer_1 = torch.nn.Linear(2, 2)
layer_2 = torch.nn.Linear(2, 2)
softmax = torch.nn.Softmax(dim=1)
layers = [layer_1, layer_2, softmax]
model = torch.nn.Sequential(*layers)
###############################################################################
# **What if we want to add a quantum layer to our model?** This is possible in PennyLane:
# :doc:`QNodes <../glossary/hybrid_computation>` can be converted into ``torch.nn`` layers and
# combined with the wide range of built-in classical
# `layers <https://pytorch.org/docs/stable/nn.html>`__ to create truly hybrid
# models. This tutorial will guide you through a simple example to show you how it's done!
#
# .. note::
#
# A similar demo explaining how to
# :doc:`turn quantum nodes into Keras layers <tutorial_qnn_module_tf>`
# is also available.
#
# Fixing the dataset and problem
# ------------------------------
#
# Let us begin by choosing a simple dataset and problem to allow us to focus on how the hybrid
# model is constructed. Our objective is to classify points generated from scikit-learn's
# binary-class
# `make_moons() <https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html>`__ dataset:
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_moons
# Set random seeds
torch.manual_seed(42)
np.random.seed(42)
X, y = make_moons(n_samples=200, noise=0.1)
y_ = torch.unsqueeze(torch.tensor(y), 1) # used for one-hot encoded labels
y_hot = torch.scatter(torch.zeros((200, 2)), 1, y_, 1)
c = ["#1f77b4" if y_ == 0 else "#ff7f0e" for y_ in y] # colours for each class
plt.axis("off")
plt.scatter(X[:, 0], X[:, 1], c=c)
plt.show()
###############################################################################
# Defining a QNode
# ----------------
#
# Our next step is to define the QNode that we want to interface with ``torch.nn``. Any
# combination of device, operations and measurements that is valid in PennyLane can be used to
# compose the QNode. However, the QNode arguments must satisfy additional :doc:`conditions
# <code/api/pennylane.qnn.TorchLayer>` including having an argument called ``inputs``. All other
# arguments must be arrays or tensors and are treated as trainable weights in the model. We fix a
# two-qubit QNode using the
# :doc:`default.qubit <code/api/pennylane.devices.default_qubit.DefaultQubit>` simulator and
# operations from the :doc:`templates <introduction/templates>` module.
import pennylane as qml
n_qubits = 2
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, weights):
qml.AngleEmbedding(inputs, wires=range(n_qubits))
qml.BasicEntanglerLayers(weights, wires=range(n_qubits))
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
###############################################################################
# Interfacing with Torch
# ----------------------
#
# With the QNode defined, we are ready to interface with ``torch.nn``. This is achieved using the
# :class:`~pennylane.qnn.TorchLayer` class of the :mod:`~pennylane.qnn` module, which converts the
# QNode to the elementary building block of ``torch.nn``: a *layer*. We shall see in the
# following how the resultant layer can be combined with other well-known neural network layers
# to form a hybrid model.
#
# We must first define the ``weight_shapes`` dictionary. Recall that all of
# the arguments of the QNode (except the one named ``inputs``) are treated as trainable
# weights. For the QNode to be successfully converted to a layer in ``torch.nn``, we need to provide
# the details of the shape of each trainable weight for them to be initialized. The
# ``weight_shapes`` dictionary maps from the argument names of the QNode to corresponding shapes:
n_layers = 6
weight_shapes = {"weights": (n_layers, n_qubits)}
###############################################################################
# In our example, the ``weights`` argument of the QNode is trainable and has shape given by
# ``(n_layers, n_qubits)``, which is passed to
# :func:`~pennylane.templates.layers.BasicEntanglerLayers`.
#
# Now that ``weight_shapes`` is defined, it is easy to then convert the QNode:
qlayer = qml.qnn.TorchLayer(qnode, weight_shapes)
###############################################################################
# With this done, the QNode can now be treated just like any other ``torch.nn`` layer and we can
# proceed using the familiar Torch workflow.
#
# Creating a hybrid model
# -----------------------
#
# Let's create a basic three-layered hybrid model consisting of:
#
# 1. a 2-neuron fully connected classical layer
# 2. our 2-qubit QNode converted into a layer
# 3. another 2-neuron fully connected classical layer
# 4. a softmax activation to convert to a probability vector
#
# A diagram of the model can be seen in the figure below.
#
# .. figure:: /demonstrations/qnn_module/qnn_torch.png
# :width: 100%
# :align: center
#
# We can construct the model using the
# `Sequential <https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html>`__ API:
clayer_1 = torch.nn.Linear(2, 2)
clayer_2 = torch.nn.Linear(2, 2)
softmax = torch.nn.Softmax(dim=1)
layers = [clayer_1, qlayer, clayer_2, softmax]
model = torch.nn.Sequential(*layers)
###############################################################################
# Training the model
# ------------------
#
# We can now train our hybrid model on the classification dataset using the usual Torch
# approach. We'll use the
# standard `SGD <https://pytorch.org/docs/stable/optim.html#torch.optim.SGD>`__ optimizer
# and the mean absolute error loss function:
opt = torch.optim.SGD(model.parameters(), lr=0.2)
loss = torch.nn.L1Loss()
###############################################################################
# Note that there are more advanced combinations of optimizer and loss function, but here we are
# focusing on the basics.
#
# The model is now ready to be trained!
X = torch.tensor(X, requires_grad=True).float()
y_hot = y_hot.float()
batch_size = 5
batches = 200 // batch_size
data_loader = torch.utils.data.DataLoader(
list(zip(X, y_hot)), batch_size=5, shuffle=True, drop_last=True
)
epochs = 6
for epoch in range(epochs):
running_loss = 0
for xs, ys in data_loader:
opt.zero_grad()
loss_evaluated = loss(model(xs), ys)
loss_evaluated.backward()
opt.step()
running_loss += loss_evaluated
avg_loss = running_loss / batches
print("Average loss over epoch {}: {:.4f}".format(epoch + 1, avg_loss))
y_pred = model(X)
predictions = torch.argmax(y_pred, axis=1).detach().numpy()
correct = [1 if p == p_true else 0 for p, p_true in zip(predictions, y)]
accuracy = sum(correct) / len(correct)
print(f"Accuracy: {accuracy * 100}%")
###############################################################################
# How did we do? The model looks to have successfully trained and the accuracy is reasonably
# high. In practice, we would aim to push the accuracy higher by thinking carefully about the
# model design and the choice of hyperparameters such as the learning rate.
#
# Creating non-sequential models
# ------------------------------
#
# The model we created above was composed of a sequence of classical and quantum layers. This
# type of model is very common and is suitable in a lot of situations. However, in some cases we
# may want a greater degree of control over how the model is constructed, for example when we
# have multiple inputs and outputs or when we want to distribute the output of one layer into
# multiple subsequent layers.
#
# Suppose we want to make a hybrid model consisting of:
#
# 1. a 4-neuron fully connected classical layer
# 2. a 2-qubit quantum layer connected to the first two neurons of the previous classical layer
# 3. a 2-qubit quantum layer connected to the second two neurons of the previous classical layer
# 4. a 2-neuron fully connected classical layer which takes a 4-dimensional input from the
# combination of the previous quantum layers
# 5. a softmax activation to convert to a probability vector
#
# A diagram of the model can be seen in the figure below.
#
# .. figure:: /demonstrations/qnn_module/qnn2_torch.png
# :width: 100%
# :align: center
#
# This model can also be constructed by creating a new class that inherits from the
# ``torch.nn`` `Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ and
# overriding the ``forward()`` method:
class HybridModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.clayer_1 = torch.nn.Linear(2, 4)
self.qlayer_1 = qml.qnn.TorchLayer(qnode, weight_shapes)
self.qlayer_2 = qml.qnn.TorchLayer(qnode, weight_shapes)
self.clayer_2 = torch.nn.Linear(4, 2)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
x = self.clayer_1(x)
x_1, x_2 = torch.split(x, 2, dim=1)
x_1 = self.qlayer_1(x_1)
x_2 = self.qlayer_2(x_2)
x = torch.cat([x_1, x_2], axis=1)
x = self.clayer_2(x)
return self.softmax(x)
model = HybridModel()
###############################################################################
# As a final step, let's train the model to check if it's working:
opt = torch.optim.SGD(model.parameters(), lr=0.2)
epochs = 6
for epoch in range(epochs):
running_loss = 0
for xs, ys in data_loader:
opt.zero_grad()
loss_evaluated = loss(model(xs), ys)
loss_evaluated.backward()
opt.step()
running_loss += loss_evaluated
avg_loss = running_loss / batches
print("Average loss over epoch {}: {:.4f}".format(epoch + 1, avg_loss))
y_pred = model(X)
predictions = torch.argmax(y_pred, axis=1).detach().numpy()
correct = [1 if p == p_true else 0 for p, p_true in zip(predictions, y)]
accuracy = sum(correct) / len(correct)
print(f"Accuracy: {accuracy * 100}%")
###############################################################################
# Great! We've mastered the basics of constructing hybrid classical-quantum models using
# PennyLane and Torch. Can you think of any interesting hybrid models to construct? How do they
# perform on realistic datasets?
##############################################################################
# About the author
# ----------------
# .. include:: ../_static/authors/tom_bromley.txt
|
[
"noreply@github.com"
] |
quantshah.noreply@github.com
|
990c5f495f62b81a018f3378ee04bf67c55c74ba
|
7f77c30aff4d1f11f4c8cd6a2496f1dbb8968e4d
|
/checker.py
|
d0696aaa3d0b74ae3b582fcb1d95ffa27f4acb33
|
[
"MIT"
] |
permissive
|
yanrising/crypto-address-balance-checker
|
5aa32003a6ac2a109c5208f00b3d45b2955d63ee
|
5a6a4e85377550193bb5e9b6a46331431347a66a
|
refs/heads/main
| 2023-02-01T01:56:07.303201 | 2020-12-13T13:57:24 | 2020-12-13T13:57:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,353 |
py
|
#!/usr/bin/python
import sqlite3
import re
import requests
import time
#Config
EXPLORER = "https://explorer.teloscoin.org/"
#Initialize database
db = sqlite3.connect('addresses.db')
c = db.cursor()
c.execute('CREATE TABLE IF NOT EXISTS users(address TEXT, bal INTEGER)')
db.commit()
#Menu
print("What you want to do?")
print("1 - Load file to database")
print("2 - List database")
one = input("Choose option: ")
if one == "1":
#Open txt file with list of addresses
with open("addr.txt", "r") as a_file:
for line in a_file:
stripped_line = line.strip()
args = stripped_line
time.sleep(2)
try:
if len(str(args)) == 34:
bal = requests.get("{}ext/getbalance/{}".format(EXPLORER, args)).json()
db = sqlite3.connect('addresses.db')
c = db.cursor()
c.execute('INSERT INTO users(address, bal) VALUES(?,?)', (str(args), str(bal)))
db.commit()
db.close()
except requests.exceptions.RequestException:
print('error')
if one == "2":
db = sqlite3.connect('addresses.db')
c = db.cursor()
c.execute('SELECT * FROM users')
usr = c.fetchall()
for raw in usr:
print(raw [0],' = ', raw[1])
db.close()
|
[
"noreply@github.com"
] |
yanrising.noreply@github.com
|
184842f3bbee75d2303145e795cb88373757d3ed
|
cff3f4a71859ce0c5fbf4394dd082936153b4d75
|
/aiohttp/backport_cookies.py
|
799025183932ef5555dd866e0932730074a26a5f
|
[
"Apache-2.0"
] |
permissive
|
Krzana/aiohttp
|
6def78bbc5276f7dba4a000b4766516196c3df0f
|
d175e4ec45784aaa2382e6e3647336398e2added
|
refs/heads/master
| 2021-01-22T03:13:14.918834 | 2017-02-06T00:58:41 | 2017-02-06T02:36:15 | 81,104,193 | 0 | 0 | null | 2017-02-06T15:57:06 | 2017-02-06T15:57:06 | null |
UTF-8
|
Python
| false | false | 14,610 |
py
|
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
import re
import string
from http.cookies import CookieError, Morsel
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars): # pragma: no cover
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str): # pragma: no cover
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname): # pragma: no cover
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
_LegalValueChars = _LegalKeyChars + '\[\]'
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
\s* # Optional whitespace at start of cookie
(?P<key> # Start of group 'key'
[""" + _LegalKeyChars + r"""]+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
[""" + _LegalValueChars + r"""]* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict): # pragma: no cover
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.match(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
if value is None:
if key.lower() in Morsel._flags:
M[key] = True
else:
M[key] = _unquote(value)
elif value is not None:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, `SimpleCookie`
calls the builtin `str()` to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
|
[
"nikolay.kim@affirm.com"
] |
nikolay.kim@affirm.com
|
7d2eaa7c95a52feb3ae21b49f2f02ae05e36fca1
|
7ccef526ce13e545e56cce01a577581a169eba58
|
/python/flask-d3/flask_server.py
|
a6bdb778432349c92eb6ff4a27c04769cc064eac
|
[] |
no_license
|
kdubss/NodeJS-Weather-App
|
e601a008ee72367adbe623816f45760536278902
|
5af4689a2fae009c6197e3b9ee43e4b69b5424be
|
refs/heads/master
| 2020-03-07T02:39:26.243000 | 2018-05-11T16:31:40 | 2018-05-11T16:31:40 | 127,213,508 | 9 | 3 | null | 2018-04-07T23:26:34 | 2018-03-29T00:13:09 |
Python
|
UTF-8
|
Python
| false | false | 6,594 |
py
|
# !/usr/bin/python
# ./weather-app/python/flask-d3/app.py
import json
import pandas as pd
import sys, os
import datetime as dt
from flask import Flask, render_template, url_for
sys.path.insert(0, '../')
import api_requests as api
import weather as w
from local_settings import env
app = Flask(__name__)
@app.errorhandler(404)
def pageNotFound(err):
'''
Handles a 404 client-side error(s).
'''
return (
'''
Sorry, 404!
This means it's completely YOUR fault (i.e. wrong url, etc.).
Make sure to get'yo stuff together and do it right!
'''
)
@app.errorhandler(500)
def serverError(err):
'''
Handles a 500 server-side error(s).
'''
return (
'''
Okay okay, okay...
This is our fault...we'll do our best to get our stuff right, so you
can get'yo stuff right!
'''
)
@app.route('/test1')
def getTestPage():
'''
Route for testing purposes.
'''
# > Fetching & Organization of data from API:
forecast_request = api.getForecastDataFromDarkSkyAPI('Vancouver')
forecast_json = forecast_request.json()
forecast_hourly_data = forecast_json['hourly']['data']
forecast_series = w.getForecastHourlyTemperatureSeries(forecast_hourly_data)
forecast_df = w.convertSeriesData2DataFrame(forecast_series)
w.saveWeatherData2Csv(forecast_df, 'data', 'forecast-hourly-temp-test.csv')
# > Loading the data & passing it to html template:
fname = 'forecast-hourly-temp-test.csv'
df = pd.read_csv(env['path2data'] + fname)
forecast_data = df.to_dict(orient = 'records')
forecaset_data = json.dumps(forecast_data, indent = 2)
data = { 'forecast_data' : forecast_data }
return render_template('forecast_temperature.html', data = data)
@app.route('/')
def getLandingPage():
'''
View function to fetch the landing page template.
'''
return render_template(
'landing-page.html',
)
@app.route('/index')
def getIndex():
'''
Rendering './templates/index.html'.
'''
return render_template('index.html')
@app.route('/about')
def getAbout():
'''
Renderin './templates/about.html'
'''
return render_template('about.html')
@app.route('/data')
def getDataParams():
'''
Rendering the template to get weather data parameters.
'''
return render_template('weather_data_parameters.html')
@app.route('/forecast')
def getForecastTemperatureD3():
'''
Parsing data and rendering the forecasted temperature data, then passing
the data to ./templates/forecast_temperature.html, in which the data will
be rendered by D3.
'''
# > Fetching and prepping forecast temperature data:
forecast_request = api.getForecastDataFromDarkSkyAPI('Vancouver')
forecast_hourly_data = forecast_request.json()['hourly']['data']
forecast_hourly_series = w.getForecastHourlyTemperatureSeries(forecast_hourly_data)
forecast_df = w.convertSeriesData2DataFrame(forecast_hourly_series)
w.saveWeatherData2Csv(forecast_df, 'data', 'forecast-hourly-temp.csv')
# > loading and sending forecast temperature data to html template:
path2Data = '~/Documents/node-projects/weather-app/python/flask-d3/data/'
fname = 'forecast-hourly-temp'
fname_fmt = '.csv'
df = pd.read_csv(path2Data + fname + fname_fmt)
forecast_data = df.to_dict(orient = 'records')
forecast_data = json.dumps(forecast_data, indent = 2)
data = { 'forecast_data': forecast_data }
return render_template(
'forecast-temp.html',
data = data,
title = 'Forecasted Hourly Temp (from %s on)' % str(dt.datetime.today())
)
@app.route('/hindcast')
def getHistoricalHindcastTemperatureD3():
'''
Parsing data and rendering the historical hindcast temperature data, then passing
the data to ./templates/historical_temperature.html, in which the data will
be rendered by D3.
'''
hindcast_request = api.getTimeMachineDataFromDarkSkyAPI('Vancouver',
str(dt.datetime.today() - dt.timedelta(1)))
hindcast_json = hindcast_request.json()
hindcast_hourly_data = hindcast_json['hourly']['data']
hindcast_series = w.getTimeMachineHourlyTemperatureSeries(hindcast_hourly_data)
hindcast_df = w.convertSeriesData2DataFrame(hindcast_series)
w.saveWeatherData2Csv(hindcast_df, 'data', 'hindcast-hourly-temp.csv')
path2Data = '~/Documents/node-projects/weather-app/python/flask-d3/data/'
fname = 'hindcast-hourly-temp'
fname_fmt = '.csv'
df = pd.read_csv(path2Data + fname + fname_fmt)
hindcast_data = df.to_dict(orient = 'records')
hindcast_data = json.dumps(hindcast_data, indent = 2)
data = { 'hindcast_data': hindcast_data }
return render_template(
'hindcast-temp.html',
data = data,
title = 'Historical Hourly Temp (from %s)' % str(dt.datetime.today() - dt.timedelta(1))
)
@app.route('/temperature')
def getForecastAndHindcastTemperatureD3():
'''
Function to call when fetching the index endpoint.
'''
# > Making requests to API:
forecast_request = api.getForecastDataFromDarkSkyAPI('Vancouver')
hindcast_request = api.getTimeMachineDataFromDarkSkyAPI('Vancouver', str(dt.datetime.today() - dt.timedelta(1)))
forecast_hourly_data = forecast_request.json()['hourly']['data']
forecast_series = w.getForecastHourlyTemperatureSeries(forecast_hourly_data)
hindcast_hourly_data = hindcast_request.json()['hourly']['data']
hindcast_series = w.getTimeMachineHourlyTemperatureSeries(hindcast_hourly_data)
df = w.combineForecastAndTimemachineSeries2DfAndSave(forecast_series, hindcast_series)
df.to_csv('data/combined-temp-data.csv', index = False)
# > Loading up and passing data to D3:
path2Data = '~/Documents/node-projects/weather-app/python/flask-d3/data/'
fname = 'combined-temp-data.csv'
df = pd.read_csv(path2Data + fname, sep = ',')
temp_data = df.to_dict(orient = 'records')
temp_data = json.dumps(temp_data, indent = 2)
data = { 'temp_data': temp_data }
return render_template(
'forecast-hindcast-temp.html',
data = data
)
@app.route('/inheritance')
def getInheritanceTest():
return render_template('inheritance-test.html')
@app.route('/test2', methods = ['GET', 'POST'])
def getFormInput():
form = SearchByCityNameForm()
return render_template('test.html', title = 'Search By City', form = form)
if __name__ == '__main__':
app.run(port = 8080, debug = True)
|
[
"kang.wang@hakai.org"
] |
kang.wang@hakai.org
|
14c0fbeb00123c49b5619983159bc4fcae4f3f8d
|
d089a726faaa79fbb8cf7c0c8bf39f8b53c024fd
|
/china_meta/item/migrations/0001_initial.py
|
7df235555ca2c4a660394d99160d2418772d7d5f
|
[] |
no_license
|
LEESM/china_meta
|
5ae2271a24691d5a90f5de17ca088e27478023e1
|
8736c363ea156d78439aa9ae0b6dd488aafa0c56
|
refs/heads/master
| 2021-01-11T07:47:04.140101 | 2016-09-04T14:02:50 | 2016-09-04T14:02:50 | 66,812,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,302 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-30 07:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('image', models.ImageField(blank=True, upload_to='')),
('pub_date', models.DateTimeField(auto_now_add=True)),
('detail', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='ItemLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source_name', models.CharField(max_length=100)),
('source_url', models.CharField(max_length=100)),
('price', models.IntegerField(default=0)),
('delivery', models.CharField(max_length=100)),
('additional_benefit', models.CharField(max_length=100)),
],
),
]
|
[
"fast0522@gmail.com"
] |
fast0522@gmail.com
|
d0116d4a239f04f6479599ffb1c93317e8cf427d
|
abbdf7f8404a16b4566924b882504824749a5d3a
|
/MultiChoiceExample.py
|
ea2d6700d6d37fdfbc8ff649840b8102fe47ad60
|
[] |
no_license
|
LZhang2004/Year9DesignCS-PythonLZ
|
58f8f13defec37567d7eb05feee6ec8a5a091526
|
f095e441dbc4c13c86d57c9183fad36a16422c79
|
refs/heads/master
| 2020-03-28T12:58:07.642482 | 2018-11-14T21:49:01 | 2018-11-14T21:49:01 | 148,352,761 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 704 |
py
|
import tkinter as tk
def change(*args):
print("running change")
print(var.get())
root = tk.Tk()
title = tk.Label(root, text = "Welcome to Encryptor", fg = "black", font = ("Courier", 44) )
title.pack()
OPTIONS = [
"Caesar Cipher",
"bunny",
"chicken",
]
var = tk.StringVar(root)
var.set(OPTIONS[0])
var.trace("w",change)
dropDownMenu = tk.OptionMenu(root,var, OPTIONS[0],OPTIONS[1],OPTIONS[2])
dropDownMenu.config(height = 3, width = 20)
dropDownMenu.pack()
MODES = [
("Encrypt", "1"),
("Decrypt", "2"),
]
v = tk.StringVar()
v.set("1")
for r in range(0,len(MODES),1):
b = tk.Radiobutton(root, text=MODES[r][0], variable = v, value=MODES[r][0], command = change)
b.pack()
root.mainloop()
|
[
"leo.chris@gmail.com"
] |
leo.chris@gmail.com
|
8e2c3bf85967880b09d3d7ad9fa6c206218021fd
|
ded143fe73523a34219239f896b91639b55fa83a
|
/libs/ParseEvtx/Script/evtx_dump_chunk_slack.py
|
0e147791aae3c49f0e3e4f00afac95d8632aff60
|
[] |
no_license
|
shjvero/teammse
|
aff6a69a9daa31448d0ae9c4cb41624a42dcddba
|
c272395f4e5a8bb7ba133398a24e1970582e1341
|
refs/heads/master
| 2020-03-31T03:15:14.867448 | 2018-10-11T02:20:44 | 2018-10-11T02:20:44 | 151,858,637 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,746 |
py
|
#!c:\users\asdzx\appdata\local\programs\python\python36\python.exe
# This file is part of python-evtx.
#
# Copyright 2015 Willi Ballenthin <william.ballenthin@mandiant.com>
# while at Mandiant <http://www.mandiant.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmap
import sys
import contextlib
import argparse
from Evtx.Evtx import FileHeader
def main():
parser = argparse.ArgumentParser(
description="Dump the slack space of an EVTX file.")
parser.add_argument("evtx", type=str,
help="Path to the Windows EVTX event log file")
args = parser.parse_args()
with open(args.evtx, 'r') as f:
with contextlib.closing(mmap.mmap(f.fileno(), 0,
access=mmap.ACCESS_READ)) as buf:
fh = FileHeader(buf, 0x0)
for chunk in fh.chunks():
chunk_start = chunk.offset()
last_allocated_offset = chunk_start
for record in chunk.records():
last_allocated_offset = record.offset() + record.size()
sys.stdout.write(buf[last_allocated_offset:chunk_start + 0x10000])
if __name__ == "__main__":
main()
|
[
"gej48443@gmail.com"
] |
gej48443@gmail.com
|
deb5e444db08df24b2d2882fc523d31889cdf3c8
|
ae9c8450d7b656b20d18cf885ca14b6311f39f5a
|
/analysis/_1_process_raw_data/parse_facebook.py
|
bd579b861bc7b3e15dd498d46d32317e48613400
|
[] |
no_license
|
wongjiahau/TCLCPhase2
|
4f9917eb170cec03faba089f322ae8b55ffe6129
|
7b61635842dec98c034d7e69fdd9a5c368fe24cb
|
refs/heads/master
| 2021-05-10T07:54:38.965161 | 2018-01-25T06:42:09 | 2018-01-25T09:13:39 | 118,867,038 | 1 | 0 | null | 2018-01-25T05:32:09 | 2018-01-25T05:32:08 | null |
UTF-8
|
Python
| false | false | 423 |
py
|
import csv
from analysis.Post import Post
def parse_facebook(file_path):
result = []
with open(file_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
p = Post()
p.date = row['status_published']
p.value = str.lower(row['status_message'] + row['link_name'])
p.source = 'facebook'
result.append(p)
return result
|
[
"hou32hou@gmail.com"
] |
hou32hou@gmail.com
|
438aff543d82ca32db8f6713c4e6cc93cdec7990
|
a74a8b5852896fef845747172fb7fb230f013f4a
|
/myWebServer-test6.py
|
38e64f7f44030d7b3d6ef65d91d6a4091cf045b8
|
[] |
no_license
|
4220182/prometheus-metrics
|
5c37ad2b6b901db5e2f88b6eaf6e769fd6d1f184
|
5bffa1710bc5df82e7cd104e7123c9a06bf70b0a
|
refs/heads/master
| 2021-07-06T03:55:41.478633 | 2020-07-19T11:39:52 | 2020-07-19T11:39:52 | 134,413,046 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,852 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
使用python内置WSGI server: wsgiref ,考虑性能问题你也可以使用其他WSGI server
WSGI server用了gevent, eventlet等 green thread技术,就可以支持更多并发。
"""
from prometheus_client import start_http_server, Counter, Summary
import random
import time
from flask import Flask, jsonify, render_template
from wsgiref.simple_server import make_server
# 定义一个Counter类型的变量,这个变量不是指标名称,这种Counter类型只增加
# 不减少,程序重启的时候会被重新设置为0,构造函数第一个参数是定义 指标名称,
# 第二个是定义HELP中显示的内容,都属于文本
# 第三个参数是标签列表,也就是给这个指标加labels,这个也可以不设置
http_requests_total = Counter("http_requests", "Total request count of the host", ['code', 'method', 'endpoint'])
# Summary类型,它可以统计2个时间
# request_processing_seconds_count 该函数被调用的数量
# request_processing_seconds_sum 该函数执行所花的时长
request_time = Summary('request_processing_seconds', 'Time spent processing request')
app = Flask(__name__)
# Decorate function with metric.
@app.route("/")
@request_time.time() # 这个必须要放在app.route的下面
def process_request():
time.sleep(random.random())
http_requests_total.labels(code="302", method="get", endpoint="/").inc()
return jsonify({"return": "response 302!"}), 302, {"Content-Type": "application/text", "location": "/app"}
# desc
@app.route("/app")
@request_time.time() # 这个必须要放在app.route的下面
def process_request_app():
time.sleep(random.random())
http_requests_total.labels(code="302", method="get", endpoint="/app").inc()
return render_template('app.html')
# desc
@app.route("/hello")
@request_time.time() # 这个必须要放在app.route的下面
def process_request_hello():
time.sleep(random.random())
http_requests_total.labels(code="200", method="get", endpoint="/hello").inc()
return jsonify({"return": "hello OK!"})
@app.route("/301")
def process_request_301():
time.sleep(random.random())
http_requests_total.labels(code="301", method="get", endpoint="/301").inc()
return jsonify({"return": "response 301!"}), 301, {"Content-Type":"application/text","location":"/"}
@app.route("/302")
def process_request_302():
time.sleep(random.random())
http_requests_total.labels(code="301", method="get", endpoint="/302").inc()
return jsonify({"return": "response 301!"}), 302, {"Content-Type":"application/text","location":"/"}
@app.route("/429")
def process_request_429():
time.sleep(random.random())
http_requests_total.labels(code="429", method="get", endpoint="/429").inc()
return jsonify({"return": "response 429!"}), 429, {"Content-Type":"application/text"}
@app.route("/503")
def process_request_503():
time.sleep(random.random())
http_requests_total.labels(code="503", method="get", endpoint="/503").inc()
return jsonify({"return": "response 503!"}), 503, {"Content-Type":"application/text"}
# 这个是健康检查用的
@app.route('/healthy')
def healthy():
return "healthy"
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(9106)
# Generate some requests.
httpd = make_server(
'0.0.0.0', # The host name.
8086, # A port number where to wait for the request.
app # Our application object name, in this case a function.
)
print("started.\n"
"url: 0.0.0.0:8080/\n"
"response 301: 0.0.0.0:8080/301\n"
"response 429: 0.0.0.0:8080/429\n"
"response 503: 0.0.0.0:8080/503\n"
"metrics: 0.0.0.0:9100/metrics\n"
"healthy: 0.0.0.0:9100/healthy")
httpd.serve_forever()
|
[
"4220182@qq.com"
] |
4220182@qq.com
|
b26bc7fe0011c8ea86eb4da2c10d7efe7af6b6ac
|
68de57c8bc62159571d299234a41cc651461b84c
|
/server_TCP.py
|
05e143b8c76c4aab13db4b4721417db6e26cfc1a
|
[] |
no_license
|
lukaszgolojuch/TCP-Client-Server
|
dcbc1bbdb6b75b7fd52d1202b664d7384cf1aa3e
|
2cac839fa70cc6e5519f9d2335abf97e4d2dd72b
|
refs/heads/master
| 2022-12-17T20:43:59.339517 | 2020-09-21T12:48:30 | 2020-09-21T12:48:30 | 297,339,315 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 833 |
py
|
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
#watek do obslugi klienta
def handle_client(client_socket):
#drukuje informacje przesylanie przez klienta
request = client_socket.recv(1024)
print "[*] Odebrano: %s" % request
#wysyla pakiet powrotny
client_socket.send("Connection Correct!")
client_socket.close()
while True:
print "[*] Nasluchiwanie na porcie %s:%d" % (bind_ip, bind_port)
client,addr = server.accept()
print "[*] Przyjeto polaczenie od: %s:%d" % (addr[0],addr[1])
#utworzenie watku klienta do obslugi przychodzacych danych
client_handler = threading.Thread(target = handle_client, args=(client,))
client_handler.start()
|
[
"lukasz.golojuch@gmail.com"
] |
lukasz.golojuch@gmail.com
|
7ab4386359ae261d8503ca3bde63b6e0a0864feb
|
7057d53063720fc498812f37d4b381a33bfdf86f
|
/mr_master.py
|
cf79ec591972192616dd6a9caf28756e2d97ed95
|
[] |
no_license
|
robinbattle/MapReduce
|
79e95c13ccb4379a5a2edea259d4cf3c7906b7c6
|
4c35cdc27e90daa7622489d849baba9beda80bf4
|
refs/heads/master
| 2020-12-24T16:23:36.404538 | 2015-03-25T00:53:23 | 2015-03-25T00:53:23 | 31,989,384 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,865 |
py
|
__author__ = 'blu2'
__author__ = 'jtanigawa'
import sys
import zerorpc
import gevent
import subprocess
class Master(object):
def __init__(self):
gevent.spawn(self.controller)
self.state = 'Ready'
self.workers = {}
self.map_workers = []
self.reduce_workers = []
self.current_map_works = []
self.current_reduce_works = {}
self.map_works_in_progress = {}
self.input_filename = ""
self.split_size = 10
self.base_filename = ""
self.num_reducers = 1
self.restart = True
self.finished = False
def controller(self):
while True:
print '[Master:%s] ' % (self.state),
for w in self.workers:
print '(%s,%s,%s)' % (w[0], w[1], self.workers[w][0]),
print
print "current map work:" + str(self.current_map_works)
print "current super reduce work:" + str(self.current_reduce_works)
try:
for w in self.workers:
try:
if self.workers[w][0] != 'Die':
worker_status = self.workers[w][1].ping()
map_work_status = worker_status[0]
if self.workers[w][0][0] == 'Working':
if map_work_status == 'Finished':
work_index = worker_status[1]
transmitting_index = worker_status[2]
self.current_reduce_works[work_index[0], work_index[1]] = transmitting_index, w[0], w[1]
map_work_status = "Finished"
new_status = [map_work_status, self.workers[w][0][1]]
self.workers[w] = (new_status, self.workers[w][1])
elif self.workers[w][0][0] == 'Finished':
if w in self.map_works_in_progress:
del self.map_works_in_progress[w]
if map_work_status == 'Ready':
#print "add back to map worker" + str(w)
self.map_workers.append(w)
map_work_status = "Ready"
new_status = [map_work_status, self.workers[w][0][1]]
self.workers[w] = (new_status, self.workers[w][1])
elif self.workers[w][0][0] == 'Ready':
print "I am (map) ready"
reduce_work_status = worker_status[3]
if self.workers[w][0][1] == 'Working':
if reduce_work_status == 'Finished' or reduce_work_status == 'Ready':
new_status = [self.workers[w][0][0], reduce_work_status]
self.workers[w] = (new_status, self.workers[w][1])
elif self.workers[w][0][1] == 'Finished':
if reduce_work_status == 'Ready':
new_status = [self.workers[w][0][0], reduce_work_status]
self.workers[w] = (new_status, self.workers[w][1])
elif self.workers[w][0][1] == 'Ready':
print "I am (reduce) ready"
else:
self.workers[w][0] # do nothing
except zerorpc.TimeoutExpired:
if w in self.map_works_in_progress:
if self.map_works_in_progress[w] not in self.current_map_works:
self.current_map_works.append(self.map_works_in_progress[w])
del self.map_works_in_progress[w]
#print "#######################"
new_w = self.pick_new_reducer()
if new_w is not None:
self.reduce_workers.append(new_w)
else:
print "Read to restart"
self.restart = True
self.num_reducers = self.num_avaliable_reducer()
print "$$$$$$$$$$$$$$$$$$$$"
print "%%%%%%%%%%%%%%%%%%%%"
print "@@@@@@@@@@@@@@@@@@@@"
print "$$$$$$$$$$$$$$$$$$$$"
print "%%%%%%%%%%%%%%%%%%%%"
print "@@@@@@@@@@@@@@@@@@@@"
self.workers[w] = ('Die', self.workers[w][1])
gevent.sleep(0.03)
except RuntimeError:
print "restart controller loop"
def register_async(self, ip, port):
print '[Master:%s] ' % self.state,
print 'Registered worker (%s,%s)' % (ip, port)
c = zerorpc.Client(timeout=5)
c.connect("tcp://" + ip + ':' + port)
self.workers[(ip,port)] = (['Ready', 'Ready'], c, [])
c.ping()
def register(self, ip, port):
gevent.spawn(self.register_async, ip, port)
def pick_new_reducer(self):
for w in self.workers:
if w not in self.reduce_workers:
return w
return None
def num_avaliable_reducer(self):
count = 0
for w in self.reduce_workers:
if self.workers[w][0] == 'Die':
continue
count += 1
return count
def num_avaliable_worker(self):
count = 0
for w in self.workers:
if self.workers[w][0] == 'Die':
continue
count += 1
return count
def reducer_alive(self):
for w in self.reduce_workers:
if self.workers[w][0] is not 'Die':
return w
return None
def reducers_working(self):
for w in self.reduce_workers:
if self.workers[w][0][1] == 'Working':
return True
return False
def clear_work_list(self):
self.current_map_works = []
self.current_reduce_works = {}
self.map_works_in_progress = {}
def reset_status(self):
self.state = 'Ready'
self.map_workers = []
self.reduce_workers = []
self.current_map_works = []
self.current_reduce_works = {}
self.map_works_in_progress = {}
self.restart = True
self.finished = False
self.input_filename = ""
self.split_size = 10
self.base_filename = ""
self.num_reducers = 1
procs = []
for w in self.workers:
proc = gevent.spawn(self.workers[w][1].reset_params)
procs.append(proc)
gevent.joinall(procs)
def map_job(self, type):
while len(self.current_map_works) > 0:
if self.restart:
break
if len(self.map_workers) <= 0:
#print "all mappers are busy"
gevent.sleep(0.03)
continue
map_worker_p = self.map_workers.pop()
map_work_index = self.current_map_works.pop()
self.map_works_in_progress[map_worker_p] = map_work_index
#print "map_worker_p:" + str(map_worker_p)
#print "map_work_index:" + str(map_work_index)
new_status = ["Working", self.workers[map_worker_p][0][1]]
self.workers[map_worker_p] = (new_status, self.workers[map_worker_p][1])
proc = gevent.spawn(self.workers[map_worker_p][1].do_map, data_dir, self.input_filename, map_work_index,
self.num_reducers, type)
gevent.sleep(0.03)
print "##### end of mapping"
def reduce_job(self, type):
print "##### start reducing"
print self.current_map_works
while len(self.current_reduce_works.keys()) > 0 or len(self.current_map_works) > 0 or \
len(self.map_works_in_progress.keys()) > 0:
if self.restart:
break
#print "########## i am in reducing circle"
if len(self.current_reduce_works.keys()) == 0:
gevent.sleep(0.05)
continue
reduce_work_key = self.current_reduce_works.keys()[0]
reduce_work_list = self.current_reduce_works[reduce_work_key]
transitting_index = reduce_work_list[0]
ip = reduce_work_list[1]
port = reduce_work_list[2]
print "Trannsitting index:" + str(transitting_index)
try:
index = 0
procs = []
#print "self.reduce_workers: " + str(self.reduce_workers)
#for w in self.reduce_workers:
# print str(w) + str(self.workers[w])
if self.reducers_working():
print "not all reducers ready, wait"
gevent.sleep(0.03)
continue
for w in self.reduce_workers:
if self.workers[w][0] == 'Die':
continue
if self.workers[w][0][1] == 'Working':
continue
new_status = [self.workers[w][0][0], "Working"]
self.workers[w] = (new_status, self.workers[w][1])
file_index = self.reduce_workers.index(w)
if index > len(transitting_index) - 1:
break
reduce_index = transitting_index[index]
reduce_work = reduce_index, ip, port
print str(w) + " will do " + str(reduce_work)
proc = gevent.spawn(self.workers[w][1].do_reduce, reduce_work, data_dir, self.base_filename, file_index, type)
procs.append(proc)
index += 1
gevent.joinall(procs, raise_error=True)
#print "finished reduce work"
#print "start output"
procs = []
for w in self.reduce_workers:
if self.workers[w][0] == 'Die':
continue
file_index = self.reduce_workers.index(w)
#proc = gevent.spawn(self.workers[w][1].write_to_file, data_dir, self.base_filename + str(file_index) + ".txt")
proc = gevent.spawn(self.receivingReduceFile, w, data_dir, self.base_filename + str(file_index) + ".txt")
procs.append(proc)
gevent.joinall(procs, raise_error=True)
print "***** delete key:" + str(reduce_work_key)
del self.current_reduce_works[reduce_work_key]
#print "finished output"
except zerorpc.TimeoutExpired:
self.current_map_works.append(reduce_work_key)
c = zerorpc.Client(timeout=5)
c.connect("tcp://" + ip + ':' + port)
c.force_reset_to_map_ready()
#print ip + ':' + port + " should be ready"
new_w = self.pick_new_reducer()
if new_w is not None:
self.reduce_workers.append(new_w)
else:
print "HHHHHHHHHHHHHHHHHHHH"
self.restart = True
self.num_reducers = self.num_avaliable_reducer()
print "$$$$$$$$$$$$$$$$$$$$"
print "%%%%%%%%%%%%%%%%%%%%"
print "@@@@@@@@@@@@@@@@@@@@"
print "$$$$$$$$$$$$$$$$$$$$"
print "%%%%%%%%%%%%%%%%%%%%"
print "@@@@@@@@@@@@@@@@@@@@"
#print "######################################"
#print "add " + str(reduce_work_key) + " back to self.current_map_work"
#print "######################################"
gevent.sleep(0.03)
if not self.restart:
self.finished = True
def receivingReduceFile(self, w, data_dir, filename):
c = zerorpc.Client(timeout=5)
c.connect("tcp://" + w[0] + ':' + w[1])
text = c.send_current_reduce_file_to_master()
output = open(data_dir + filename, 'w')
output.write(text)
output.close()
def do_work(self, filename, split_size, num_reducers, base_filename, type):
self.reset_status()
# init params
self.input_filename = filename
self.split_size = int(split_size)
self.num_reducers = int(num_reducers)
self.base_filename = base_filename
#self.map_workers.append(('0.0.0.0', '10001'))
#self.map_workers.append(('0.0.0.0', '10002'))
#self.reduce_workers.append(('0.0.0.0', '10000'))
#self.reduce_workers.append(('0.0.0.0', '10001'))
procs = []
while True:
gevent.sleep(0.03)
if self.restart:
print "#############################################################################"
num_avaliable_worker = self.num_avaliable_worker()
if self.num_reducers > num_avaliable_worker:
self.num_reducers = num_avaliable_worker
# clear map/reduce worker list
self.map_workers = []
self.reduce_workers = []
# create map/reduce worker list
count = 0
for w in self.workers:
if self.workers[w][0] == 'Die':
continue
self.map_workers.append(w)
if count < self.num_reducers:
self.reduce_workers.append(w)
count += 1
print "We have " + str(len(self.map_workers)) + " mappers, and " + str(len(self.reduce_workers)) + " reducers"
print "Mapper: " + str(self.map_workers)
print "Reducers:" + str(self.reduce_workers)
# clear map/reduce work list
self.clear_work_list()
# split file, this will also assign work to current_map_work
self.split_file()
# reset restart
self.restart = False
for proc in procs:
gevent.kill(proc)
procs = []
# spawn map job
procs.append(gevent.spawn(self.map_job, type))
# spawn reduce job
procs.append(gevent.spawn(self.reduce_job, type))
gevent.joinall(procs)
if self.finished:
break
gevent.sleep(0.03)
def split_file(self):
chunk = self.split_size
input = open(data_dir + self.input_filename, 'r').read()
split_list = []
offset = 0
while offset < len(input):
end = input.find(' ', offset + chunk)
if end != -1:
new_chunk = end - offset
work = offset, offset + new_chunk
split_list.append(work)
offset += new_chunk
else:
new_chunk = len(input) - 1 - offset
work = offset, offset + new_chunk
split_list.append(work)
break
self.current_map_works = split_list
return split_list
if __name__ == '__main__':
port = sys.argv[1]
data_dir = sys.argv[2]
if data_dir[len(data_dir)-1] != '/':
data_dir += '/'
master_addr = 'tcp://0.0.0.0:' + port
s = zerorpc.Server(Master())
s.bind(master_addr)
s.run()
|
[
"blu2@dons.usfca.edu"
] |
blu2@dons.usfca.edu
|
7ae8b387c43d9fad46026ca984184ad67fc76bb1
|
14f3df564f4fd028be94a44b052aa6843d8535dc
|
/make-model.py
|
0941a9b5857e216a2d330ae6653842d81b1cb5eb
|
[] |
no_license
|
foxmouldy/blib
|
e2d5442b17aa5a47e7ebdf19dff2c9d3e62c5202
|
3cf36655b44eea7a70516f4ddae0a17eecd32cb2
|
refs/heads/master
| 2021-01-10T21:59:54.125431 | 2013-08-20T15:01:33 | 2013-08-20T15:01:33 | 11,581,518 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,506 |
py
|
from tasks import *
from taskinit import *
import casac
import sys
from optparse import OptionParser
usage = "usage: %prog options"
parser = OptionParser(usage=usage);
# O1 for Option
parser.add_option("--ra", type='string', dest='ra', default='10h00m00.0s',
help="Right Ascension of Target [10h00m00.0s]")
parser.add_option("--dec", type='string', dest='dec', default='-30d00m00.0s',
help="Declination of Target [-30d00m00.0s]")
parser.add_option("-f", type='string', dest='f', default="Gaussian",
help = "Name for output files [Gaussian]")
(options, args) = parser.parse_args();
if len(sys.argv)==1:
parser.print_help();
dummy = sys.exit(0);
direction = "J2000 "+options.ra+" "+options.dec;
cl.done()
cl.addcomponent(dir=direction, flux=1.0, fluxunit='Jy', freq='1.420GHz',
shape="Gaussian",
majoraxis="5arcmin", minoraxis='1arcmin',
positionangle='45.0deg')
#
ia.fromshape(options.f+".im",[256,256,1,1],overwrite=True)
cs=ia.coordsys()
cs.setunits(['rad','rad','','Hz'])
cell_rad=qa.convert(qa.quantity("2arcsec"),"rad")['value']
cs.setincrement([-cell_rad,cell_rad],'direction')
cs.setreferencevalue([qa.convert(options.ra,'rad')['value'],qa.convert(options.dec,'rad')['value']],type="direction")
cs.setreferencevalue("1.420GHz",'spectral')
cs.setincrement('1GHz','spectral')
ia.setcoordsys(cs.torecord())
ia.setbrightnessunit("Jy/pixel")
ia.modify(cl.torecord(),subtract=False)
exportfits(imagename=options.f+'.im',fitsimage=options.f+'.fits',overwrite=True)
|
[
"frank@rigel.astron.nl"
] |
frank@rigel.astron.nl
|
bc5233b50a4786f6c05d414551612e05b2399edc
|
d6b0cfbea0c3d3a37bb1f6a6890fc1574bcdf566
|
/src/user_service/service_api/flask_api.py
|
e144758c47419349085f523bca847cd8711bc3a1
|
[
"Apache-2.0"
] |
permissive
|
Forcepoint/fp-bd-microsoft-graph-azure
|
2628bd12068a97f6fffb6bb342086b00b892594a
|
89808dbbacd6f1129a47d74f37e349ba9272259b
|
refs/heads/master
| 2023-01-24T19:43:04.597754 | 2020-12-02T14:31:14 | 2020-12-02T14:37:18 | 317,608,235 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,780 |
py
|
#
# Author: Dlo Bagari
# created Date: 13-11-2019
import json
from flask import Flask, request, jsonify
from user_lib.logger import Logger
from user_lib.entity import Entity
from user_lib.const_values import ConstValues
from microsoft_graph.user_api import UserApi
from microsoft_graph.group_api import GroupApi
from user_lib.access_token import AccessToken
from user_lib.const_values import ConstValues
logger = Logger()
entity = Entity()
access_token = AccessToken()
user_api = UserApi(access_token, logger)
group_api = GroupApi(access_token, logger)
app = Flask(__name__)
@app.route("/user/<user_id>/groups")
def get_users_groups(user_id):
error_code, groups = user_api.get_user_groups(user_id)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", groups["message"])
return jsonify({"error": groups["message"]}), 400
else:
return jsonify(groups), 200
@app.route("/user")
def get_user():
error_code, users = user_api.get_user()
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", users["message"])
return jsonify({"error": users}), 400
else:
return jsonify(users), 200
@app.route("/groups")
def get_groups():
error_code, response = group_api.get_groups()
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", response["message"])
return jsonify({"error": response}), 400
else:
return jsonify(response), 200
@app.route("/user/<user_id>")
def get_user_by_id(user_id):
error_code, users = user_api.get_user(user_id)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", users["message"])
return jsonify({"error": users}), 400
else:
return jsonify(users), 200
# used
@app.route("/user/filter")
def filter_user():
first_name = request.args.get("first_name", None)
last_name = request.args.get("last_name", None)
error_code, user = user_api.find_user_by_name(first_name, last_name)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", user["message"])
return jsonify({"error": user}), 400
else:
return jsonify(user), 200
# used
@app.route("/group/filter")
def filter_group_by_name():
name = request.args.get("name", None)
if name is None:
return jsonify({"error": "missing parameter name"}), 400
error_code, group = group_api.filter_group_by_name(name)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", group["message"])
return jsonify({"error": group["message"]}), 400
else:
return jsonify(group), 200
@app.route("/group/change", methods=["POST"])
def change_group():
user_id = request.args.get("user_id")
if user_id is None:
return jsonify({"error": "missing parameter user_id"}), 400
group_name = request.args.get("group_name")
if group_name is None:
return jsonify({"error": "missing parameter group_name"}), 400
error_code, response = group_api.change_group(user_id, group_name)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", response["message"])
return jsonify({"error": response["message"]}), 400
else:
return jsonify(response), 200
@app.route("/groups/<group_id>/members")
def get_group_members(group_id):
error_code, users = group_api.get_group_members(group_id)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", users["message"])
return jsonify({"error": users["message"]}), 400
else:
return jsonify(users), 200
@app.route("/groups/<group_id>/add/<user_id>")
def add_member(group_id, user_id):
error_code, response = group_api.add_member(group_id, user_id)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", response["message"])
return jsonify({"error": response["message"]}), 400
else:
return jsonify(response), 200
@app.route("/groups/<group_id>/remove/<user_id>")
def remove_member(group_id, user_id):
error_code, response = group_api.remove_member(group_id, user_id)
if error_code != ConstValues.ERROR_CODE_ZERO:
logger.error("API_REQUEST", response["message"])
return jsonify({"error": response["message"]}), 400
else:
return jsonify(response), 200
# TODO: in process
@app.route("/entity", methods=["POST"])
def handle_entity():
data = request.json
error_code, result, entity_id = entity.handle_notification(data)
if error_code == ConstValues.ERROR_CODE_ZERO and result is True:
return jsonify({"entity_id": entity_id}), 201
else:
return jsonify({"error": "Failed in handling the request"}), 400
|
[
"Mindaugas.Rakauskas@forcepoint.com"
] |
Mindaugas.Rakauskas@forcepoint.com
|
00f50413c35210f59461ee60300f036e4ff03104
|
283c3b6d4fac4bf95af9dcc23c384758d8887f8d
|
/crawl.py
|
af8984eb0980c6d7b6f001c6e7851c9cd52a101f
|
[] |
no_license
|
NingyuanXu/Weapon_crawl
|
33e98cd37d352779597de7dd0b057f0770a14748
|
a8a729c22e0b8ec455cad481096995c2a71bde08
|
refs/heads/master
| 2020-07-01T20:51:56.397645 | 2019-08-08T16:34:14 | 2019-08-08T16:34:14 | 201,297,145 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,060 |
py
|
import requests
import os
from bs4 import BeautifulSoup
from docx import Document
from docx.shared import Inches
siteurl = 'http://weapon.huanqiu.com'
proxies = {'http': '210.22.5.117:3128',
'https': '210.22.5.117:3128'
}
def get_data(url, type):
heads={}
heads['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
response = requests.get(url,headers= heads, proxies = proxies, timeout=6)
if response.status_code == 200:
html = response.content.decode('utf-8')
if type == 1:
parse_content(html)
else:
parse_data(html)
def parse_data(html):
bs = BeautifulSoup(html,'html.parser')
metas = bs.find_all(class_='picList')
ul = metas[0]
li = ul.select("li")
i=0
while i < len(li):
href = siteurl + li[i].find(class_="pic").find("a").attrs["href"]
print(li[i].find(class_="name").text)
print(href)
get_data(href,1)
i = i+1
def parse_content(html):
bs = BeautifulSoup(html,'html.parser')
meta = bs.find(class_="detail clearfix")
conMain = meta.find(class_="conMain")
maxPic = conMain.find(class_="maxPic")
intron = conMain.find(class_="intron")
module = intron.find(class_="module").text
side = bs.find(class_="side")
dataInfo = side.find(class_="dataInfo").find_all("li")
name = dataInfo[0].next.next.next
if maxPic != None:
img = maxPic.find("img").attrs["src"]
else:
img = side.find(class_="dataInfo").find("img").attrs["src"]
name = name.replace("/","-").strip()
path = "./files/"+name
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
document = Document()
document.add_heading(name,3)
request_download(path,img,name)
document.add_picture(path+'/'+img[img.rindex("/")+1:])
document.add_paragraph(module)
info = conMain.find(class_="info")
if info != None:
title_ = info.find_all(class_="title_")
textInfo = info.find_all(class_="textInfo")
for i in range(len(textInfo)):
document.add_heading(title_[i].text,2)
document.add_paragraph(textInfo[i].text)
document.add_heading("技术数据",2)
i = 0
while i < len(dataInfo):
document.add_paragraph(dataInfo[i].text)
i = i+1
document.save(path+'/'+name+'.doc')
def request_download(path,IMAGE_URL,name):
r = requests.get(IMAGE_URL,proxies=proxies,timeout=6)
print(IMAGE_URL[IMAGE_URL.rindex("/")+1:])
with open(path+'/'+IMAGE_URL[IMAGE_URL.rindex("/")+1:],'wb') as f:
f.write(r.content)
def read_file():
with open('url.txt','r') as f:
try:
while True:
line = f.readline()
if line:
print(line.strip())
get_data(line.strip(),0)
else:
break
finally:
f.close()
if __name__ == '__main__':
read_file()
|
[
"xu.ningyuan@outlook.com"
] |
xu.ningyuan@outlook.com
|
c4fb08fa830674dbd87a91837f635e684fc58752
|
cf63e3bbb82fe976d38bfc5442f34d87f30edb53
|
/zhihuimohe4py/settings.py
|
eb838681640bd7d94ae5b125e632f631b22f89ce
|
[] |
no_license
|
lovederh/magicbox
|
5ff37d157045d0ff2216173cf64785ae2a812cd7
|
c692d14f13228e29e8d90aa2fb76f2157ee23211
|
refs/heads/master
| 2022-11-23T10:15:15.969273 | 2020-07-14T01:00:27 | 2020-07-14T01:00:27 | 274,322,415 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,311 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Django settings for zhihuimohe4py project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '$vcwin@+8dm4v3)p-xniux8lr9n)ins-74p6g86lsv%5n%8$-2'
DEBUG = True
# DEBUG = False
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dwebsocket', # 支持webSocket
'base.apps.BaseConfig',
'sr.apps.SrConfig',
'zhkt.apps.ZhktConfig',
]
# 可以允许每一个单独的视图实用websockets
WEBSOCKET_ACCEPT_ALL = True
# X-Frame-Options设置, 解决iframe无法显示问题
X_FRAME_OPTIONS = 'SAMEORIGIN'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'zhihuimohe4py.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], # 视图基路径
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'zhihuimohe4py.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # 数据库引擎
'NAME': 'hongyang_mh', # 数据库名称
'HOST': '127.0.0.1', # 数据库地址,本机 ip 地址 127.0.0.1
'PORT': 3306, # 端口
'USER': 'root', # 数据库用户名
'PASSWORD': 'root', # 数据库密码
# 'OPTIONS': {
# "init_command": "SET sql_mode='STRICT_TRANS_TABLES'", # 启用数据库建表的严格模式
# },
'TEST': {
'NAME': 'test',
},
},
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# 静态文件路径
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filename': BASE_DIR + '/upload/logs/debug.log',
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'ERROR',
'propagate': True,
},
'django.request': {
'handlers': ['file', 'console'],
'level': 'INFO' if DEBUG else 'ERROR',
'propagate': False,
},
},
}
|
[
"lovederh@126.com"
] |
lovederh@126.com
|
cd435e90cd387ae537fffb76deea07be9701be65
|
01e299bd037964da1bc770989c2ad7a7080557fb
|
/Algorithm/binarySearch.py
|
aa48ab094f8865f198efae5470305f265666d466
|
[] |
no_license
|
jngsoo/PS
|
a336e48868c5dfd52ff5e3db3837ad8c7725fb21
|
98a3038a7192e57bc0c3aad7833658e9a97d84b2
|
refs/heads/master
| 2020-05-19T06:07:13.910697 | 2020-01-24T11:45:10 | 2020-01-24T11:45:10 | 184,866,391 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 447 |
py
|
def binarySearch(arr, left, right, target):
if right < left:
return -1
print('bin-search!')
mid = (left + (right + 1)) // 2
print(left, mid, right)
if arr[mid] == target:
res = arr[mid]
return res
elif arr[mid] < target:
left = mid + 1
else:
right = mid - 1
return binarySearch(arr, left, right, target)
t1 = [2,4,5,9,11,14,19]
print(binarySearch(t1, 0, len(t1)-1, 19))
|
[
"wt2933@icloud.com"
] |
wt2933@icloud.com
|
92103249322b421545629318572a095a6464b746
|
46bd3e3ba590785cbffed5f044e69f1f9bafbce5
|
/env/lib/python3.8/site-packages/supervisor/tests/test_dispatchers.py
|
3f88376a16df1a07247d1fe031d2147a0cb4d10c
|
[] |
no_license
|
adamkluk/casper-getstarted
|
a6a6263f1547354de0e49ba2f1d57049a5fdec2b
|
01e846621b33f54ed3ec9b369e9de3872a97780d
|
refs/heads/master
| 2023-08-13T11:04:05.778228 | 2021-09-19T22:56:59 | 2021-09-19T22:56:59 | 408,036,193 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b2039ef9d32ffde70df065c6a333cb150fa31e79786df3f98287dc41938ad1e1
size 53720
|
[
"a.klukowski@live.com"
] |
a.klukowski@live.com
|
7401b94189214c99484961a6a267429cd5e290fb
|
19f27f432b968521c7bee497a96f2b01963da293
|
/manage.py
|
0ff8346ecebe236c0d31d614ad2ceeab700db026
|
[] |
no_license
|
ethanlee6/myw
|
eae3eb751f4b06e06ce1dd2a21adf9272f1bf72f
|
74c60ebea5519c18d7495c2ee8064b4a576b9b89
|
refs/heads/master
| 2021-01-24T18:39:43.481407 | 2017-03-15T12:15:01 | 2017-03-15T12:15:01 | 84,459,667 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 775 |
py
|
import os
from flask.ext.script import Manager, Server
from flask.ext.script.commands import ShowUrls
from flask.ext.migrate import Migrate, MigrateCommand
from webapp import create_app
from webapp.models import db, User, Post, Tag, Comment
# default to dev config
env = os.environ.get('WEBAPP_ENV', 'dev')
app = create_app('webapp.config.%sConfig' % env.capitalize())
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("server", Server())
#manager.add_command("show-urls", ShowUrls())
manager.add_command('db', MigrateCommand)
@manager.shell
def make_shell_context():
return dict(
app=app,
db=db,
User=User,
Post=Post,
Tag=Tag,
Comment=Comment
)
if __name__ == "__main__":
manager.run()
|
[
"lg_online@hotmail.com"
] |
lg_online@hotmail.com
|
133b5633b2d64eca82739da7b77732b3f48a0ac2
|
602bac701a96754f3bc1312add368c6dbd0c7fc3
|
/Home_Work_6.py
|
5f1c3e84413aaf7065a1ccd318d7b49be2a28425
|
[] |
no_license
|
AwsJup/TMS
|
9f5f404d10b0e9619ad5909de9f35214625a1aff
|
eab7af7c2cac21112cdf179dc2eaca6419b651ed
|
refs/heads/main
| 2023-04-29T04:26:58.503914 | 2021-05-03T13:31:19 | 2021-05-03T13:31:19 | 359,001,485 | 2 | 0 | null | 2021-05-13T14:54:21 | 2021-04-17T23:16:24 |
Python
|
UTF-8
|
Python
| false | false | 1,891 |
py
|
#1.
#В отеле есть 3 типа номеров: royal (2-3 комнаты), lux (1-2 комнаты), standard (1 комната).
#надо добавить метод для создания номеров и хранения их в виде словаря.
class Hotel():
def __init__(self, a, b, c):
self.royal = a
self.lux = b
self.standard = c
rooms= Hotel(3, 2, 1)
print(rooms.__dict__)
print(' ')
#2
#В номере есть мебель для ванной, спальни и зала (если есть зал)
#нужно добавить метод для добавления и удаления из номера мебели в любом количестве.
class Royal():
def __init__(self, a, b, c):
self.bathroom = a
self.bedroom = b
self.living_room = c
def furniture(self, a, b, c):
print ('for delete: set -x, for add: set +x, for same: set 0')
print ('furniture for bathroom')
i = input()
if i == "0":
print(' ')
else:
self.bathroom = i
print ('furniture for bedroom')
i = input()
if i == "0":
print(' ')
else:
self.bedroom = i
print ('furniture for living_room')
i = input()
if i == "0":
print(' ')
else:
self.living_room = i
total = Royal('total in the bathroom', 'total in the bedroom', 'total in living_room')
total.furniture(1, 1, 1)
print (total.__dict__)
print(' ')
#3
#Нужно создать один метод для изменения любого номера по заданным параметрам, в том числе удалению и изменению номеров и комнат
class StandartRoom():
|
[
"noreply@github.com"
] |
AwsJup.noreply@github.com
|
24876209f1755a36a8ecfd85f03399473ca3cc19
|
5aec711159e0270b0cb197bb8cbf34203ffbb585
|
/runLive.py
|
f6b03b5a0a53d45c57aa0efa6f5dc700c36ef6b5
|
[] |
no_license
|
korean-fingerspelling-recognition/hand_gesture_recognition
|
b429cc4a6b0429cdb567efdf1fd3ce2e137565b2
|
6fd8401449fcdbcd998ad40f0abe09c4f418bd5e
|
refs/heads/master
| 2020-05-28T00:24:29.870967 | 2019-06-14T13:21:10 | 2019-06-14T13:21:10 | 188,830,986 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,830 |
py
|
import tensorflow as tf
import parameters as par
import cv2
import numpy as np
from PIL import ImageOps, Image
saver = tf.train.import_meta_graph(par.saved_path + str('501.meta'))
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('./Saved/'))
# Get Operations to restore
graph = sess.graph
# Get Input Graph
X = graph.get_tensor_by_name('Input:0')
#Y = graph.get_tensor_by_name('Target:0')
# keep_prob = tf.placeholder(tf.float32)
keep_prob = graph.get_tensor_by_name('Placeholder:0')
# Get Ops
prediction = graph.get_tensor_by_name('prediction:0')
logits = graph.get_tensor_by_name('logits:0')
accuracy = graph.get_tensor_by_name('accuracy:0')
# Get the image
while 1:
cap = cv2.VideoCapture(0)
ret, img = cap.read()
if ret:
cv2.rectangle(img, (300, 300), (100, 100), (0, 255, 0), 0)
crop_img = img[100:300, 100:300]
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
_, thresh1 = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# cv2.imshow('title',thresh1)
thresh1 = (thresh1 * 1.0) / 255
thresh1 = Image.fromarray(thresh1)
thresh1 = ImageOps.fit(thresh1, [par.image_size, par.image_size])
if par.threshold:
testImage = np.reshape(thresh1, [-1, par.image_size, par.image_size, 1])
else:
testImage = np.reshape(thresh1, [-1, par.image_size, par.image_size, 3])
testImage = testImage.astype(np.float32)
testY = sess.run(prediction, feed_dict={X: testImage, keep_prob: 1.0})
print(testY)
else:
continue
|
[
"alee6868@kaist.ac.kr"
] |
alee6868@kaist.ac.kr
|
e8536703822ccf6a4476cc5edc141e2fc9760d95
|
0b7790afa5a651ffe1664a42befce7ba0c4a3041
|
/FrMaya/core/uimaya.py
|
08b53b9a8ce79d8ef6ef45fe65420b822a41ab17
|
[
"MIT"
] |
permissive
|
muhammadfredo/FrMaya
|
45c16ef00bb5a7512a43e87b4f2eed517927cbe9
|
5e56274e200374a9d3bf16e774e6bf05c075f325
|
refs/heads/master
| 2022-07-22T16:55:08.597522 | 2022-01-15T15:22:23 | 2022-01-15T15:22:23 | 91,210,526 | 10 | 0 |
MIT
| 2022-01-15T15:22:23 | 2017-05-14T00:33:53 |
Python
|
UTF-8
|
Python
| false | false | 9,828 |
py
|
"""
## SCRIPT HEADER ##
Created By : Muhammad Fredo
Email : muhammadfredo@gmail.com
Start Date : 12 May 2017
Refactor Date : 29 Agt 2020
Info :
"""
import os
import re
try:
import shiboken
except ImportError:
import shiboken2 as shiboken
from maya import mel as mel
from maya import OpenMayaUI as omui
from pymel import core as pm
from pymel.core import uitypes as pmui
from pymel.core import windows as pywin
from FrMaya.vendor.Qt import QtCore, QtCompat, QtWidgets
from FrMaya.core import system
SEPARATOR = '__separator__'
def get_menu_name(name):
"""Make any supplied string suitable/nice for menubar name.
:arg name: Name need to make nice for menubar.
:type name: str
:return: Nice menu name for menubar.
:rtype: str
"""
# get menu name and use it for menu identifier
regex = r"^(?:\d{2,3}_|)(\w+)"
result = re.match(regex, name)
menu_name = result.group(0)
if SEPARATOR in menu_name:
menu_name = menu_name.replace(SEPARATOR, '{}')
menu_name = menu_name.replace('_', ' ')
menu_name = menu_name.format(SEPARATOR)
return menu_name
class Menubar(pmui.SubMenuItem):
def __new__(cls, menubar_path, parent = None):
"""
Before initialize of the object,
create the menubar root, convert menubar_path to menubarName,
hook/parent it to maya menubar, and refresh the whole menubar if the menubar root already exist
:param cls: The class of this object
:param menubar_path: Path folder to collection of folder which will be used as menubar root
:param parent: Parent of menubar root, in this case $gMainWindow
"""
# give menubar proper name which is basename instead fullpath name
menubar_name = os.path.basename(menubar_path)
menu_name = get_menu_name(menubar_name)
# delete existing menu if the menu already exist
if pywin.menu(menu_name, ex = 1):
pywin.deleteUI(menu_name)
# build root menu and return it as pmui.SubMenuItem
self = pywin.menu(menu_name, label = menu_name, aob = True, tearOff = True, p = parent)
return pmui.SubMenuItem.__new__(cls, self)
def __init__(self, menubar_path, name = None, parent = None):
"""
An Object that handle building menubar in maya
:param menubar_path: Path folder to collection of folder which will be used as menubar root
:param name: Menubar root name
:param parent: Parent of menubar root, in this case $gMainWindow
"""
super(Menubar, self).__init__()
# Convert input variable to object variable
self.menubarPath = menubar_path
# Refresh menu each time the menuitem will opened
self.postMenuCommand(pm.Callback(self.refresh_menu))
def refresh_menu(self):
"""
Refresh the submenu each time cursor hover to this menubar root
"""
# Delete all submenu
self.deleteAllItems()
# Rebuild all submenu
self.build_sub_menu(self.menubarPath, self)
def build_sub_menu(self, fullpath, parent):
"""
Build submenu, can be recursive
:param fullpath: path folder to collection of folder or file
which will be used to create menuItem or Submenu
:param parent: submenu or menu parent which will be parented to
"""
# list all folder, file on current path(fullpath)
for dir_name in os.listdir(fullpath):
# fullpath of each file/folder inside current path(fullpath)
the_path = os.path.join(fullpath, dir_name).replace("\\", "/")
# separated filename and extension
file_name, ext = os.path.splitext(dir_name)
icon_file = get_icon_file(the_path)
# remove number and underline
# number and underline in folder for sorting purpose :))
try:
int(file_name[:1])
file_name = file_name[3:]
except ValueError:
pass
# get nice name for menu label
menu_name = get_menu_name(file_name)
# check if the path is file or folder
if os.path.isdir(the_path):
# create submenu
submenu = pywin.subMenuItem(
label = menu_name,
subMenu = True,
p = parent,
tearOff = True,
postMenuCommandOnce = 1,
image = icon_file
)
# recursive buildSubMenu
self.build_sub_menu(the_path, submenu)
elif SEPARATOR in menu_name:
pywin.menuItem(
label = menu_name.replace(SEPARATOR, ''),
p = parent,
divider = True,
)
# if file is python
elif ext == '.py' or ext == '.mel':
# command of menuitem
command_script = 'execfile("{0}")'.format(the_path)
if ext == '.mel':
command_script = 'import maya.mel as mel\nmel.eval( "source \\"{0}\\"" )'.format(the_path)
# create menuitem
pywin.menuItem(
label = menu_name,
p = parent,
tearOff = True,
command = command_script,
image = icon_file
)
def build_menubar():
"""Build menubar function"""
menubar_path_list = system.get_menubar_path()
print menubar_path_list
# get all menubar root item
menubar_list = []
for o in menubar_path_list:
menubar_list += o.listdir()
# get maya main window
main_window = mel.eval("$temp=$gMainWindow")
# build all menubar root item
for menubar_root in menubar_list:
Menubar(menubar_root, parent = main_window)
class MyQtWindow(QtWidgets.QWidget):
"""
Pyside base class for dialog window inside maya
"""
@staticmethod
def get_maya_window():
"""
Get maya window
"""
# Get maya main window pointer
maya_window_ptr = omui.MQtUtil.mainWindow()
# Wrap maya main window pointer as QWidget
if maya_window_ptr is not None:
return shiboken.wrapInstance(long(maya_window_ptr), QtWidgets.QWidget)
else:
return False
@staticmethod
def setup_ui(uifile, base_instance=None):
"""Load a Qt Designer .ui file and returns an instance of the user interface
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Returns:
QWidget: the base instance
"""
ui = QtCompat.loadUi(uifile) # Qt.py mapped function
if not base_instance:
return ui
else:
for member in dir(ui):
if not member.startswith('__') and \
member is not 'staticMetaObject':
setattr(base_instance, member, getattr(ui, member))
return ui
def __init__(self, ui_file, title_tool = '', *args, **kwargs):
"""
Pyside base class for dialog window inside maya
:param uiFile: UI file as 'BasePath' object
"""
# remove existing tool first
try:
pm.deleteUI(title_tool)
except Exception as e:
print e
# Init parent class
super(MyQtWindow, self).__init__(*args, **kwargs)
self.ui = None
self.mainLayout = None
# Get maya window to parent for current tool
maya_window = self.get_maya_window()
# qtwidgets.QWidget.__init__(self, MayaWindow)
if maya_window:
# Parent current tool to maya window
self.setParent(maya_window)
# Set usual window system frame,
# like title, min, and max bar
self.setWindowFlags(QtCore.Qt.Window)
# Set current window tool name
if not title_tool:
title_tool = ui_file.name
self.setWindowTitle(title_tool)
self.setObjectName(title_tool)
# Build UI tool from UI file
self.build_ui(ui_file)
def build_ui(self, ui_file):
"""
Building Pyside UI from UI file
:param ui_file: UI file as 'BasePath' object
"""
# Set main layout of the window
self.mainLayout = QtWidgets.QVBoxLayout()
self.mainLayout.setContentsMargins(4, 4, 4, 4)
self.setLayout(self.mainLayout)
# Load the UI file
self.ui = self.setup_ui(ui_file.abspath())
# Add loaded UI to main layout
self.mainLayout.addWidget(self.ui)
# Set window size the same as size from UI file
size = self.ui.size()
self.resize(size.width(), size.height())
def docking(self, direction = 'left'):
title_tool = self.objectName()
width_tool = self.size().width()
pm.dockControl(
title_tool,
label = title_tool.replace("_", " "),
area = direction,
content = title_tool,
width = width_tool,
allowedArea = ['right', 'left']
)
def get_icon_file(full_file_path):
file_path, ext = os.path.splitext(full_file_path)
icon_type_list = ['svg', 'ico', 'png']
for each in icon_type_list:
icon_file = '{}.{}'.format(file_path, each)
if os.path.exists(icon_file):
return icon_file
return ''
def get_maya_window():
maya_window = next(o for o in QtWidgets.QApplication.instance().topLevelWidgets() if o.objectName() == "MayaWindow")
return maya_window
|
[
"muhammadfredo@gmail.com"
] |
muhammadfredo@gmail.com
|
f2930b4534b8fe5e0354c0b127f12fe0f96cd0c1
|
aeb6a66a70271cea4c724885b857fd07358ec6e5
|
/vfp_web_server/datasets_4_min.py
|
2c51bda6c8e7b7060e75034d2064dc66b830e850
|
[] |
no_license
|
pdMM11/Tese
|
b900febed04e9d6b863affa16eea3190e5f5867c
|
41219be91e0df8b66d8a95fe408c6339c6fcb71b
|
refs/heads/master
| 2022-11-15T01:58:01.160873 | 2020-07-10T18:49:02 | 2020-07-10T18:49:02 | 278,682,542 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,572 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 15:19:37 2020
@author: pedro
"""
import pandas as pd
import numpy as np
import os
def size_families_4_min(filename="Seqs_prots.csv"):
dataset = pd.read_csv(filename)
dataset['Family'] = dataset['Family'].str.strip()
dataset_3_in_family = pd.DataFrame({'Family' : [],
'Sequence_fusogenic': [],
'Sequence': []})
families_multiple_align = []
for i in dataset.Family.unique():
dataset_family = dataset.loc[dataset['Family'] == i]
if dataset_family.shape[0] > 3:
families_multiple_align.append((i, dataset_family.shape[0]))
return families_multiple_align
def vfp_seqs(filename="dataset1_usar.csv", families = "Seqs_prots.csv"):
colnames=['meta', 'fp']
dataset = pd.read_csv(filename, names=colnames, header=None)
dataset_family = pd.read_csv(families)
dataset_family['Family'] = dataset_family['Family'].str.strip()
families = [x.lower() for x in list(dataset_family['Family'].unique())]
for i in range(len(dataset['meta'])):
for j in families:
if j in str.lower(dataset['meta'][i]):
dataset.loc[i, 'meta'] = j
break
if '_' in dataset['meta'][i]:
temp = dataset['meta'][i].split('_')
dataset['meta'][i] = str.lower(temp[len(temp)-1])
dataset = dataset.sort_values(by=['meta'])
return dataset
def read_weblogo(weblogoDf, fusionpeptide, max_align):
best_score = 0.0
pos = 0
for i in range(weblogoDf.shape[0]-len(fusionpeptide)):
current_score = 0.0
for j in range(len(fusionpeptide)):
#current_score += (weblogoDf.loc[i+j,str(fusionpeptide[j])]/max_align
# )*weblogoDf.loc[i+j,'Entropy']
current_score += ((np.log2(20) - weblogoDf.loc[i+j,'Entropy'])
* weblogoDf.loc[i+j,str(fusionpeptide[j])]/max_align)
if current_score + np.log2(20) * (len(fusionpeptide) - j) < best_score:
break
if current_score > best_score:
best_score = current_score
pos = i
return best_score
def weblogo_sequence(sequence, filename='weblogo.txt', n_seqs=5, window_size = 15):
weblogoDf = pd.read_csv(filename, skiprows=7, sep='\t')
weblogoDf = weblogoDf[:-1]
columns = []
for i in weblogoDf.columns:
j = i.replace(' ','')
columns.append(j)
weblogoDf.columns = columns
# max_align = weblogoDf[columns[1:len(columns)-4]].max().max()
results = {}
for i in range(len(sequence)-window_size + 1):
results[str(i)+'-'+str(i+window_size-1)] = read_weblogo(weblogoDf, sequence[i:i+window_size], n_seqs) /window_size
return results
def weblogo_family(sequence, window_size=15):
families = size_families_4_min()
seq_dict = {}
print(sequence)
for i in families:
seq_dict[i[0]] = weblogo_sequence(sequence, i[0] + '_weblogo.txt', i[1], window_size)
return seq_dict
def scores_dataset():
dataset = vfp_seqs()
results_output = {}
for index, row in dataset.iterrows():
results_output[row['meta']+ str(index)]=weblogo_family(row['fp'], len(row['fp']))
return results_output
if __name__ == '__main__':
results_output=scores_dataset()
|
[
"46798433+pdMM11@users.noreply.github.com"
] |
46798433+pdMM11@users.noreply.github.com
|
92d96fdbf6933afe9c592bf0775c56eb914262e7
|
b011755718e66729e3e8b90b2dc4155a29db3ea6
|
/autoTagAnalyze.py
|
4a57c4c477b2d4cae267249c2073931de1c81197
|
[] |
no_license
|
Shbinging/Project-RSS
|
c4a8e08dd1a572abe349a077366538d29a1fb5a4
|
73b0efc849b4d3fe12c08ce12af87aa10b8cacc0
|
refs/heads/master
| 2022-08-01T22:56:48.748600 | 2020-05-31T10:37:35 | 2020-05-31T10:37:35 | 265,760,791 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,245 |
py
|
from adeq import makeConfig;
from adeq import adeqSql;
import jieba
import re
from timeAnalyze import timesplit;
from audienceAnalyze import audiencesplit;
class tagAnalyze:
def __init__(self,trainName, fromData=0, fromDataTag=0, Config=0, wordbagName=0, isToNewSet=0):#原始数据表,读取的标签,训练数据库config,分析数据库,是否新建
if (trainName == "time" or trainName == "audience"):
return;
self.fromData = fromData;
self.fromDataTag = fromDataTag;
self.trainName = trainName;
self.wordBagName = wordbagName;
self.wordBagData = adeqSql(Config, wordbagName);
if (isToNewSet):
self.trainData = adeqSql(Config, "train_"+trainName);
self.trainData.createTable("train_"+trainName);
self.analyzeData = adeqSql(Config, "analyze_"+trainName);
self.analyzeData.createTable("analyze_"+trainName);
self.sumData = adeqSql(Config, "sum_"+ trainName);
self.sumData.createTable("sum_"+trainName);
self.prepareContextTable();
self.prepareAnalyzeTable();
self.prepareSumTable();
else:
self.trainData = adeqSql(Config, "train_"+trainName);
self.analyzeData = adeqSql(Config, "analyze_"+trainName);
self.sumData = adeqSql(Config, "sum_"+ trainName);
def clearContextTable(self):
self.trainData.clearTable();
def clearAnalyzeTable(self):
self.analyzeData.clearTable();
def clearSumTable(self):
self.sumData.clearTable();
def setContextTableNotAnalyze(self):
n = self.trainData.getTableSize();
for i in range(1, n):
self.trainData.edit("id", i, "isAnalyze", 0);
def prepareContextTable(self, tagName = "@", value = ""):#isnew 1全部手工重建 0 改变已经训练标签为未训练
isNew = 0;
if (isNew):
self.trainData.clearTable();
self.trainData.createColumn("isAnalyze", "int DEFAULT 0");
self.trainData.createColumn("title", "VARCHAR(200)");
self.trainData.createColumn("tag", "VARCHAR(200)");
self.trainData.createColumn("number", "int");
#导入文本
n = self.fromData.getTableSize();
for i in range(1, n):
if (tagName != "@"):
tag = self.fromData.queryXY("id", i, tagName)[0];
if (tag != value):
continue;
title = self.fromData.queryXY("id", i, self.fromDataTag)[0];
self.trainData.insertKey("title", title);
self.trainData.edit("id", i, "isAnalyze", 0);
def prepareSumTable(self):
a = self.getwordBag();
print(a);
self.sumData.insertKey(a[0], 0);
for tagWord in a:
self.sumData.createColumn(tagWord, "int default 0");
self.sumData.edit(id, 1, tagWord, 0);
def getwordBag(self):
a = self.wordBagData.queryXY("tag", self.trainName, "word")[0].split(',');
return a;
def getwordFilter(self):
a = self.wordBagData.queryXY("tag", self.trainName, "filter")[0].split(',');
return a;
def analyzeByHand(self, l, r, isOverload = 0):#1 覆盖原来标签 0 不覆盖 从1开始
for i in range(l, r):
title = self.trainData.queryXY("id", i, "title")[0];
status = self.trainData.queryXY("id", i, "tag")[0];
if (status != None and isOverload == 0):#判断改标题是否处理过
continue;
#打印
if (old.refind(title) != "考试"):
continue;
print(title);
a = self.getwordBag();
for j in range(0, len(a)):
print(j, a[j], end = ' ');
print();
#记录
id = int(input());
if (id == -1):
continue;
self.trainData.edit("id", i ,"tag", a[id]);
self.trainData.edit("id", i, "isAnalyze", 0);
#self.trainData.printTable();
def prepareAnalyzeTable(self, isNew = 1):#1新建
self.analyzeData.clearTable();
self.analyzeData.createColumn("word", "VARCHAR(200)");
a = self.getwordBag();
print(a);
for tagWord in a:
self.analyzeData.createColumn(tagWord, "int default 0");
def filterWord(self, title):
a = self.getwordFilter();
for word in a:
tmpSt = '';
while (tmpSt != title):
tmpSt = title;
title = title.replace(word, "");
return title;
def analyzeTrain(self):
n = self.trainData.getTableSize();
ss = 0;
for i in range(1, n):
title = self.trainData.queryXY("id", i, "title")[0];
##时间词过滤
timer = timesplit();
title = timer.analyze(title);
##语法、无效词过滤
title = self.filterWord(title);
##词频统计
seg_list = jieba.lcut(title);
tmp = self.trainData.queryXY("id", i, "tag")[0];
if (tmp == None): continue;
#if (tmp == "其他"):
# continue;
if (tmp == "课程"):
ss += 1;
s1 = self.sumData.queryXY("id", 1, tmp)[0];
s1 += 1;
self.sumData.edit("id",1, tmp, s1);
seg_list = list(set(seg_list));
for word in seg_list:
if (self.analyzeData.hasKey("word", word)):#如果该词语存在就加1
s = self.analyzeData.queryXY("word", word, tmp)[0];
s += 1;
self.analyzeData.edit("word", word, tmp, s);
else:#否则增加该词语
self.analyzeData.insertKey("word", word);
self.analyzeData.edit("word", word, tmp, 1);
print(ss);
def sum(self, b):
s = 0;
for i in b:
s += i;
return s;
def sum1(self, b):
s = 0;
for i in b:
s += i[0];
return s;
def f(self, word, pattern):
a = self.getwordBag();
d = self.analyzeData.queryH("word", word);
if (not self.analyzeData.hasKey("word", word)):
return 0;
d = d[0][2:];
ans = 0;
e = self.analyzeData.queryXY("word", word, pattern)[0];
if (self.sum(d) < 2):
return 0;
if (self.sum(d) != 0):
ans = e/ self.sum(d);
d = self.analyzeData.queryL(pattern);
if (self.sum1(d) != 0):
ans = ans * e /self.sum1(d);
return ans*100;
def fBayes(self, segList,pattern):#朴素贝叶斯+拉普拉斯修正进行标签权重计算
a = self.getwordBag();
# d = self.analyzeData.queryH("word", word);
b = self.sumData.queryH("id", 1);
tagSum = len(b[0][1:]);#标签的种类数
s1 = self.sum(b[0][1:]);##计算所有标签出现的次数;
s2 = 0;##计算所有单词出现总次数
s5 = self.sumData.queryXY("id", 1, pattern)[0];
for word in segList:
if (not self.analyzeData.hasKey("word", word)):
continue;
s2 += self.sum(self.analyzeData.queryH("word", word)[0][2:]);
#for word in segList:
pPattern = (self.sumData.queryXY("id", 1, pattern)[0]+1) / (s1+2);#统计改标签的概率
ss = pPattern;#贝叶斯概率
tmp = self.analyzeData.queryL(pattern);
s3 = self.sum1(tmp);#某标签下所有单词出现的次数
wordSum = len(tmp);
for word in segList:
if (not self.analyzeData.hasKey("word", word)):
continue;
ss = ss*1/(s5+2);
ss = ss/(1/(wordSum));
#ss = ss/(1/(s1+2));
else:
tmp1 = self.analyzeData.queryXY("word", word, pattern)[0];
s4 = self.sum(self.analyzeData.queryH("word", word)[0][2:]);##某个单词出现总次数;
#if (tmp1 == 0 or s4 == 0):continue;
ss = ss*(tmp1+1)/(s5+2);
#ss = ss/((s4+1)/(s1 + 2));
ss = ss/((s4+1)/(s3 + wordSum));
#print(word, tmp1, s3, (tmp1+1)/(s3+2));
#print(word, 1/((tmp1+1)/(s4 + tagSum)));
return ss;
def refind1(self, title):
a = self.getwordBag();
timer = timesplit();
title = timer.analyze(title);
title = self.filterWord(title);
seg_list = jieba.lcut(title);
mx = -1;
ans = '';
for pattern in a:
#s = 1;
#for word in seg_list:
# s *= self.f(word, pattern);
#if (pattern == "交流"):
#print(pattern , word, self.f(word, pattern));
s = self.fBayes(seg_list, pattern);
if (s > mx):
mx = s;
ans = pattern;
list = [];
list.append(ans);
list.append(1);
return list;
def refind(self, title):
a = self.getwordBag();
timer = timesplit();
title = timer.analyze(title);
title = self.filterWord(title);
seg_list = jieba.lcut_for_search(title);
mx = -1;
ans = '';
for pattern in a:
s = 0;
for word in seg_list:
s += self.f(word, pattern);
#if (pattern == "交流"):
#print(pattern , word, self.f(word, pattern));
#print(pattern, s, sep = ' ');
if (s > mx):
mx = s;
ans = pattern;
list = [];
list.append(ans);
ans1 = self.refind1(title)[0];
if (ans1 == ans):
list.append(0);
else:
list.append(1);
return list;
def rfindTime(self, rst, backTime):#2012-12-11
timeana = timesplit();
timer = timeana.getTime(rst);
if (timer[1] != -1):
st = '';
year = 0;
if (timer[1] == 1):
st = "秋季学期";
year = timer[2][0];
if (timer[1] == 2):
st = "春季学期";
year = timer[2][0] - 1;
return str(year)+"-"+str(year+1)+"学年"+st;
st = '';
if ("暑假" in rst):
st = "暑假";
if ("寒假" in rst):
st = "寒假";
a = list(map(int, backTime.split('-')));
if (7 <=a[1] and a[1] <=8 and st ==''):
st = "暑假";
if (9 <= a[1] and a[1] <=12 or a[1] == 1):
st = "秋季学期";
if (2 <= a[1] and a[1] <= 6):
st = "春季学期";
year = 0;
if (timer[2][0] != -1):
year = timer[2][0];
else:
year = a[0];
if (st == '暑假' or st == '寒假'):
return str(year) + st;
if (st == "秋季学期"):
return str(year) + "-" + str(year+1) + "学年" +st;
if (st == "春季学期"):
return str(year-1) + "-" + str(year) + "学年" + st;
def rfindAudience(self, rst, backTime):
a = audiencesplit();
return a.find(rst, backTime);
if (__name__ == "__main__"):
config = makeConfig("notification");
fromData = adeqSql(config, "test2");
config1 = makeConfig("tag");
name = "column";
a = tagAnalyze(name, fromData, "title", config1, "wordbag", 0);
st = "关于“悦读经典计划”悦读学分认定的说明(2019-2020学年第一学期)";
print(a.refind(st));
print(a.refind1(st));
#print(a.analyzeData.queryXY("id", 1, "网络考试"));
######################################################
#增加对标签的分类统计
"""
name = "课程_activity";
a = tagAnalyze(name, fromData, "title", config1, "wordbag", 0);
a.clearSumTable();
a.clearAnalyzeTable();
a.prepareSumTable();
a.analyzeTrain();
a.sumData.printTable();
a.analyzeData.printTable();
"""
######################################################
|
[
"1436775971@qq.com"
] |
1436775971@qq.com
|
950f460f7d6a093c23d6a461d86389f6b4d95872
|
fc22bd2ff1791ef1203ab3f0bb3a92fc25744b0f
|
/scripts/smms_merge.py
|
a34ea8b0715f9e523e0025b9a8601a9be0e4f3af
|
[] |
no_license
|
ndrubins/harvard-informatics
|
7c460856483ce679cc51c3a818e26f2708ae4fff
|
917e6f6746cd041a6f69aa58e5f67e7716af3306
|
refs/heads/master
| 2021-01-15T11:56:34.638972 | 2013-02-14T21:14:15 | 2013-02-14T21:14:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,162 |
py
|
import os, sys, re, operator, xlrd, string, time
import settings
from django.core import management
from django.db import models
from django.db.models import *
from django.contrib.auth.models import User
from spinal_website.apps.reservations.models import ResourceReservation
from spinal_website.apps.resources.models import *
from spinal_website.apps.auth_active_directory.helper_classes import *
from spinal_website.apps.auth_active_directory.ldap_connection import *
from spinal_website.apps.auth_active_directory.models import ActiveDirectoryDomainInfo
from datetime import datetime
def email_to_user(email):
ldap_filter_str = '(&(objectClass=person)(mail=%s*))' % (email)
conn = LdapConnection(RC_DOMAIN) # from apps.auth_active_directory.ldap_connection.py
#conn = LdapConnection(NUCLEUS_DOMAIN) # from apps.auth_active_directory.ldap_connection.py
result = conn.search(filter=ldap_filter_str, search_fields_to_retrieve=MEMBER_ATTRIBUTE_LIST)
conn.unbind()
if result==None or len(result) == 0:
return
for user_entry in result:
if len(user_entry) < 2:
msgx('user entry missing member info data')
continue
mi = MemberInfo(user_entry[1])
if mi.sAMAccountName:
return mi.sAMAccountName
def parse_user_file(filename):
if not os.path.isfile(filename):
print "User file [%s] not found" % filename
return
file = open(filename,'r')
flines = file.readlines()
flines = map(lambda x: x.strip(), flines)
users = {}
count = 0
for line in flines:
f = line.split('\t');
if (len(f) > 3):
if len(f) > 1:
login = f[1].lower()
users[login] = {}
users[login]['login'] = login
else:
print "No login name - skipping line %s" % line
if len(f) > 0:
group = f[0]
users[login]['group'] = group
if len(f) > 2:
name = f[2]
users[login]['name'] = name
if len(f) > 3:
email = f[3]
users[login]['email'] = email
if len(f) > 4:
groupid = f[4]
users[login]['groupid'] = groupid
if len(f) > 5:
pi = f[5]
users[login]['pi'] = pi
else:
print "Wrong number of fields %d in line[%s]" % (len(f),line)
count = count+1
return users
def parse_file(filename,start_time,end_time,users):
if not os.path.isfile(filename):
print "ESI-TOF usage file [%s] not found" % filename
return
log = {}
file = xlrd.open_workbook(filename)
sh = file.sheet_by_index(0)
print sh.name, sh.nrows, sh.ncols
for rx in range(sh.nrows):
login = sh.cell_value(rx,2).lower()
group = sh.cell_value(rx,3)
sample = sh.cell_value(rx,4)
runtype = sh.cell_value(rx,5)
file1 = sh.cell_value(rx,6)
file2 = sh.cell_value(rx,8)
wiff = sh.cell_value(rx,9)
tmp1 = sh.cell_value(rx,10)
time1 = sh.cell_value(rx,11)
time2 = sh.cell_value(rx,12)
time3 = sh.cell_value(rx,13)
runstart= sh.cell_value(rx,14)
runend = sh.cell_value(rx,15)
tmp2 = sh.cell_value(rx,16)
tmp3 = sh.cell_value(rx,17)
tmp4 = sh.cell_value(rx,18)
notes = sh.cell_value(rx,20)
email = ""
user = ""
if users.has_key(login) and users[login].has_key('email'):
email = users[login]['email']
tmp = email_to_user(email)
if tmp != None:
user = tmp
if log.has_key(runtype) == False:
log[runtype] = list()
tmp = {}
tmp['login'] = login
tmp['group'] = group
tmp['sample'] = sample
tmp['runtype'] = runtype
tmp['file1'] = file1
tmp['file2'] = file2
tmp['wiff'] = wiff
tmp['time1'] = time1
tmp['time2'] = time2
tmp['time3'] = time3
tmp['email'] = email
tmp['user'] = user
p = re.compile("\d+/\d+/\d+ \d+:\d+:\d+")
if p.search(runstart):
tmp['start_time'] = datetime.strptime(runstart,"%m/%d/%y %H:%M:%S")
else:
tmp['start_time'] = None
if p.search(runend):
tmp['end_time'] = datetime.strptime(runend,"%m/%d/%y %H:%M:%S")
else:
tmp['end_time'] = None
tmp['notes'] = notes
log[runtype].append(tmp)
return log
def get_reservations(start_time,end_time):
res = Resource.objects.filter(name__icontains="ESI-TOF")
reserv = list()
for r in res:
tmpreserv = ResourceReservation.objects.filter(resource=r,start_time__gte=start_time,end_time__lt=end_time)
for t in tmpreserv:
tmp = {}
tmp['start_time'] = t.start_time
tmp['end_time'] = t.end_time
tmp['reservation'] = t
reserv.append(tmp)
return reserv
def cluster_log_and_reservations(log,res):
clus = list()
current_clus = None
print ""
#############################################################################
# 1 ) These are the log entries without a timestamp - we can't cluster these
#############################################################################
for l in log:
for i in range(len(log[l])):
if log[l][i]['start_time'] == None:
print "no_timestamp\t" + "%s"%log[l][i]['login'] + "\t" + "%s"%log[l][i]['runtype'] + "\t" + "%s"%log[l][i]['sample']
else:
res.append(log[l][i])
print ""
#############################################################################
# 2 ) Now we sort the log entries and reservations by start time and cluster
#############################################################################
res = sorted(res, key = lambda tmp: tmp['start_time'])
for r in res:
if current_clus == None:
current_clus = {}
current_clus['entries'] = list()
current_clus['start_time'] = r['start_time']
current_clus['end_time'] = r['end_time']
current_clus['entries'].append(r)
clus.append(current_clus)
else:
if r['start_time'] < current_clus['end_time']:
current_clus['entries'].append(r)
if (r['start_time'] < current_clus['start_time']):
current_clus['start_time'] = r['start_time']
if (r['end_time'] > current_clus['end_time']):
current_clus['end_time'] = r['end_time']
else:
current_clus = {}
current_clus['entries'] = list()
current_clus['start_time'] = r['start_time']
current_clus['end_time'] = r['end_time']
current_clus['entries'].append(r)
clus.append(current_clus)
count = 1
######################################################
# 3 )We loop over the clusters and print to the screen
######################################################
for c in clus:
cluslen = c['end_time']-c['start_time']
s = "%d" % len(c['entries'])
s += '\t'
s += "%s" % (c['start_time'])
s += '\t'
s += "%s" % (c['end_time']) + "\t"
s += "Length " + "%s" % (cluslen) + "\n"
has_reservation = False
has_log = False
log_user = None
res_user = None
user = None
email = None
clus_type = "None"
cstr = ""
c['entries'] = sorted(c['entries'], key = lambda tmp: tmp['start_time'])
head_str = "{0:15s}".format("Entry type")
head_str += "\t" + "{0:15s}".format("Length")
head_str += "\t" + "{0:19s}".format("Start")
head_str += "\t" + "{0:19s}".format("End")
head_str += "\t" + "{0:20s}".format("User")
head_str += "\t" + "Resource"
string_val = "=" * 132
for c in c['entries']:
user = "-"
entry_type = "-"
resource = ""
start_time = "%s" % (c['start_time'])
end_time = "%s" % (c['end_time'])
cluslen = "{0:15s}".format(c['end_time'] - c['start_time'])
if c.has_key('reservation'):
entry_type = 'reserv'
has_reservation = True
user = c['reservation'].lab_user.user.username
user = re.sub("^rc_","",user)
user = "{0:20s}".format(user)
resource = "%s" % (c['reservation'].resource)
elif c.has_key('runtype'):
entry_type = 'log'
has_log = True
resource = c['runtype']
if c.has_key('user') and c['user'] != None and c['user'] != "":
user = "{0:20s}".format(c['user'])
elif c.has_key('email'):
user = "{0:20s}".format(c['email'])
elif c.has_key('login'):
user = "{0:20s}".format(c['login'])
entry_type = "{0:15s}".format(entry_type)
cstr += entry_type + "\t" + cluslen + "\t" + start_time + "\t" + end_time + "\t" + user + "\t" + "%s"%(resource) + "\n"
if has_reservation and has_log:
clus_type = "both"
elif has_reservation:
clus_type = "reserv"
elif has_log:
clus_type = "log"
print "\n"+string_val+ "\nCluster number " + "%s"%(count) + "\tType: " + clus_type + "\tNumber of Entries " + s + string_val
print head_str + "\n" + cstr
count = count+1
def get_first_day_of_next_month(year,month):
tmpmonth = month+1
tmpyear = year
if tmpmonth == 13:
tmpmonth = 1
tmpyear = year+1
return datetime(tmpyear, tmpmonth, 1)
def help():
print "This script takes the usage log from the ESI-TOF machine and compares it to the reservations in SPINAL."
print "Both the log entries and the reservations are clustered by time and displayed."
print "Each log file is expected to contain one month's usage and the year and month need to be input on the command line"
print "\nUsage: python smms_merge.py <userfile> <logfile.xls> <year> <month>"
print "\nThe userfile contains the mapping from ESI-TOF username to email. It is tab-delimited and of the format :\n"
print "Group Login Name Full Name Email Group ID PI"
print "AEC AECohen Adam E Cohen cohen@chemistry.harvard.edu AEC Adam E Cohen"
print "AEC APFields Alexander P Fields fields@fas.harvard.edu AGM Andrew Myers"
print "\nThe logfile is the output logfile from the ESI-TOF machine"
print "The year is the full year e.g. 2012"
print "The month is the number i.e. 1-12\n"
if __name__ == '__main__':
if len(sys.argv) != 5:
help()
sys.exit(0)
userfile = sys.argv[1]
filename = sys.argv[2]
year = int(sys.argv[3])
month = int(sys.argv[4])
start_time = datetime(year,month,1)
end_time = get_first_day_of_next_month(year,month)
str = "Parsing user file [%s]" % userfile
users = parse_user_file(userfile)
str += " - found %s users" % len(users)
print str
str = "Parsing log file [%s] from %s - %s" % (filename,start_time,end_time)
log = parse_file(filename,start_time,end_time,users)
numlog = 0;
for l in log:
numlog += len(l)
str += " - found %d log entries" % numlog
print str;
str = "Fetching reservations"
res = get_reservations(start_time,end_time)
str += " - found %d reservations" % len(res)
print str
clus = cluster_log_and_reservations(log,res)
|
[
"michele.clamp@gmail.com"
] |
michele.clamp@gmail.com
|
f69489224cc04d36c7466b095f812c528f824242
|
9d2a3d18a0674154ced275ef5e1981f087b5ed02
|
/fifthWeek/python_30.py
|
95350d6c6ee16ed71543c6e1baee435096135e66
|
[] |
no_license
|
EthAlenazi/python_learning
|
b8720f1774b7f5465508fc38abdb174a1159e7a5
|
6eb08319a412953efe1f2f6f07b7f7b9021c34a6
|
refs/heads/master
| 2020-07-08T13:45:48.403867 | 2019-11-25T19:34:19 | 2019-11-25T19:34:19 | 203,692,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,281 |
py
|
Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> for X in range(20,2):
print(X)
>>> for X in range(0,20,2):
print(X)
0
2
4
6
8
10
12
14
16
18
>>> A =20
>>> for X in range(A):
print(X)
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
>>> for X in range(5,A):
print(X)
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
>>> for X in range(5,A,3):
print(X)
5
8
11
14
17
>>> B=['A','B','C','D','E','F']
>>> for x in A:
for y in B:
print(A,B)
Traceback (most recent call last):
File "<pyshell#16>", line 1, in <module>
for x in A:
TypeError: 'int' object is not iterable
>>> A=[1,2,3,4,5,6]
>>> for x in A:
for y in B:
print(A,B)
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
[1, 2, 3, 4, 5, 6] ['A', 'B', 'C', 'D', 'E', 'F']
>>> A=[1,2,3]
>>> B=[4.5.6]
SyntaxError: invalid syntax
>>> B=[4,5,6]
>>> for x in A:
for y in B:
print(A,B)
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
[1, 2, 3] [4, 5, 6]
>>> for x in A:
for y in B:
print(x,y)
1 4
1 5
1 6
2 4
2 5
2 6
3 4
3 5
3 6
>>> A=[1,2,3,4,5,6]
>>> B=['A','B','C','D','E','F']
>>> for x in A:
for y in B:
print(x,y)
1 A
1 B
1 C
1 D
1 E
1 F
2 A
2 B
2 C
2 D
2 E
2 F
3 A
3 B
3 C
3 D
3 E
3 F
4 A
4 B
4 C
4 D
4 E
4 F
5 A
5 B
5 C
5 D
5 E
5 F
6 A
6 B
6 C
6 D
6 E
6 F
>>> for D in range (0,15,5):
print(D)
else:
print('done')
0
5
10
done
>>>
|
[
"QoQ1213.1417@gmail.com"
] |
QoQ1213.1417@gmail.com
|
8fc3df572aa8224634a3036713c4a33e193207a7
|
7154eaaeba8bb85663fb3be2fe87bff02afb9f92
|
/app.py
|
f981c6b29a3001db5fd0b0c18697dea32737e686
|
[
"MIT"
] |
permissive
|
Lynxgsm/nvidia-bot
|
a3dfafeff774ee3e9c07f9ad7633df30ff3b47c5
|
6f26946532ffd607eb505996773e4dcda0df853a
|
refs/heads/master
| 2022-12-23T01:35:57.717623 | 2020-09-22T09:54:08 | 2020-09-22T09:54:08 | 297,620,499 | 1 | 0 |
MIT
| 2020-09-22T10:55:36 | 2020-09-22T10:55:35 | null |
UTF-8
|
Python
| false | false | 64 |
py
|
from cli import cli
if __name__ == "__main__":
cli.main()
|
[
"hari@nagarajan.io"
] |
hari@nagarajan.io
|
bb4411845beac8ed6a855d3894786bb21f41fa05
|
5179b07b8d1a31df18612ce55d35c56b851cead8
|
/tools/train.py
|
b0290aace7813a3edf21acd4895698b235e05300
|
[
"Apache-2.0"
] |
permissive
|
hamidehkerdegari/VFS
|
3e9c427c4a8ae0a6b66a3a1378bac5c6f9daaf51
|
8e055cc191578706f05b7484facf44be6fb1525a
|
refs/heads/master
| 2023-08-24T09:40:46.678233 | 2021-09-26T18:24:38 | 2021-09-26T18:24:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,658 |
py
|
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import init_dist, set_random_seed
from mmaction import __version__
from mmaction.apis import train_model
from mmaction.datasets import build_dataset
from mmaction.models import build_model
from mmaction.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='automatically resume training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--suffix', type=str, help='work_dir suffix')
parser.add_argument(
'--disable-wandb', action='store_true', help='disable wandb')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
print('cudnn_benchmark=True')
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority:
# CLI > config file > default (base filename)
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.suffix is not None:
cfg.work_dir = f'{cfg.work_dir}-{args.suffix}'
for i, h in enumerate(cfg.log_config.hooks):
if h.type == 'WandbLoggerHook':
if args.disable_wandb:
cfg.log_config.hooks.pop(i)
break
if args.suffix is not None:
wandb_dir = cfg.log_config.hooks[i].init_kwargs.dir
cfg.log_config.hooks[i].init_kwargs.dir = f'{wandb_dir}-' \
f'{args.suffix}'
mmcv.mkdir_or_exist(cfg.log_config.hooks[i].init_kwargs.dir)
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
elif args.auto_resume:
if osp.exists(osp.join(cfg.work_dir, 'latest.pth')):
cfg.resume_from = osp.join(cfg.work_dir, 'latest.pth')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config: {cfg.text}')
logger.info(f'Config.pretty_text: {cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
logger.info(f'Model: {str(model)}')
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
if args.validate:
warnings.warn('val workflow is duplicated with `--validate`, '
'it is recommended to use `--validate`. see '
'https://github.com/open-mmlab/mmaction2/pull/123')
val_dataset = copy.deepcopy(cfg.data.val)
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmaction version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmaction_version=__version__, config=cfg.text)
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=False,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
[
"xvjiarui0826@gmail.com"
] |
xvjiarui0826@gmail.com
|
9fe34c793ca1599d35af65ee91e407e0892adffb
|
d041d67aabef027c8f53e6d357a1af67e6b6149b
|
/ecommerce_project/api/serializers.py
|
957efac448caf9ad8408c4aab88c3bf0943693fc
|
[] |
no_license
|
ajeena-joseph/2restapi
|
5c7e06bd29293ad3c34a214668170612eede4235
|
b1f325eba195185dd9fdbbc5a468c62d8dc2636f
|
refs/heads/master
| 2023-07-15T20:44:12.616252 | 2021-08-27T15:06:46 | 2021-08-27T15:06:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
from rest_framework import serializers
from api.models import User, Category, Product
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
|
[
"salomyajeena@gmail.com"
] |
salomyajeena@gmail.com
|
80dcbb07b4eae97a3da6754b53fad8a053aa294d
|
079acf54f514ae0ee5c1b6605df81f7936c27f23
|
/rewyndapp/pull_scripts/by_term.py
|
20b2da8a69f19a9f87e25d8f5dcb8af234aeba8f
|
[
"MIT"
] |
permissive
|
c-o-89/RewyndT
|
cc65bed83b454ac070d43545a58445dfa01a1b88
|
09fe6b6f2124d705470306ed2a56035c60e4aa53
|
refs/heads/master
| 2020-03-30T00:32:24.955548 | 2018-10-10T04:34:50 | 2018-10-10T04:34:50 | 150,529,371 | 0 | 1 |
MIT
| 2018-10-04T07:04:42 | 2018-09-27T04:36:49 |
Python
|
UTF-8
|
Python
| false | false | 1,699 |
py
|
import subprocess
import json
import urllib
import urllib.parse
# twurl must have already been authorized using the "authorize" subcommand
def encode_url(url, qs):
qs_str = ''
last = len(qs)
for i in range(last-1):
qs_str += qs[i][0]+'='+qs[i][1]+ '"&"'
qs_str += qs[last-1][0]+'='+qs[last-1][1]
return url if len(qs_str) == 0 else "{}?{}".format(url, qs_str)
def search(url, qs):
statuses = []
counter = 0
tweetcount = 0
while True:
counter += 1
print(counter)
full_url = encode_url(url, qs)
command = "twurl " + full_url
print(command)
obj = json.loads(subprocess.check_output(command, shell=True))
if "statuses" in obj:
statuses.extend(obj["statuses"])
search_metadata = obj["search_metadata"]
tweets = search_metadata.get("count")
tweetcount += int(tweets)
next_results = search_metadata.get("next_results")
if next_results is None:
print("End of pagination")
msg = "Parsed {} tweets".format(str(tweetcount))
print(msg)
break
else:
print("Parsing page")
qs = urllib.parse.parse_qsl(next_results[1:])
print(next_results)
print(qs)
else:
print("Statuses not found")
break
return statuses
statuses = search('/1.1/search/tweets.json', [
('q', '#insecurehbo'),
('count', '500'),
('result_type', 'recent')
])
out_file = open('output.json', 'a+')
out_file.write(json.dumps(statuses, indent=2))
print("Alright, all done.")
out_file.close()
|
[
"chukwumaokpalugo@hotmail.com"
] |
chukwumaokpalugo@hotmail.com
|
968b121b2d3bd581090efdc594f6eb784540a2d4
|
e30e8d488c8e0a123840e1ae57fde098dc5402da
|
/Lectures/Lecture 5 Files - Call By Object and Parameters/parameters7.py
|
55bc4f5e773348a85a052ff7dd7326a2e91f806d
|
[] |
no_license
|
donaldjvillarreal/Python
|
7c064fea0d9359eeaeb390ddf9aaf6ae186cbfe5
|
68a05b2b61ac4224ad7ea7af8583932b0a5fd0b3
|
refs/heads/master
| 2022-11-19T12:11:34.546655 | 2015-05-26T15:05:05 | 2015-05-26T15:05:05 | 280,269,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
def another_function(**kwargs):
for key, value in kwargs.items():
print("The key %s contains the number %d" % (key, value))
if 'x' in kwargs and 'y' in kwargs:
print("The sum of x and y is %d" % (kwargs['x']+kwargs['y']))
print("")
another_function(z=5, y=9, x=10)
my_dict = {'x':4, 'y':5, 'z':6}
another_function(**my_dict)
my_dict = {'apple':7, 'banana':8}
another_function(**my_dict)
|
[
"donaldjvillarreal@Donalds-MacBook-Pro.local"
] |
donaldjvillarreal@Donalds-MacBook-Pro.local
|
8a73c785a44ece6263c3e40dfde840832bed6655
|
65c03709b91ce8f006641b30d481b4fda651520e
|
/Coding/3_indexing_slicing.py
|
a52c46b665b5ac657b828965eb9a307d71a3bd84
|
[] |
no_license
|
ahad-emu/python-code
|
332121ad289b169ca8099c88bde13d7121be1030
|
135805c78de38eaf1bd5500b44625b36b7b653c0
|
refs/heads/master
| 2020-09-09T01:01:41.313964 | 2020-07-04T16:31:37 | 2020-07-04T16:31:37 | 221,296,928 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 502 |
py
|
#indexing....
my_string = "hello World"
print(my_string)
print(my_string[0]) #index zero
print(my_string[7]) #index seven
print(my_string[8]) #index eight
print(my_string[-1]) #last index
print(my_string[-2]) #second last index
#slicing....
my_string = "ABCDEFGHIJKL"
print(my_string)
print(my_string[2:]) #index two to last
print(my_string[:3]) #index zero to two
print(my_string[2:6]) #index 2 to 5
print(my_string[::2]) #one step jump
print(my_string[::-1]) #reverse
|
[
"ahademu99@gmail.com"
] |
ahademu99@gmail.com
|
f54b7d59d1864cda1ea6a4cd32a98d9cab063635
|
6ca10769948e2eb678f24f37500e5df5aa27b02a
|
/tests/src/gretel_client/unit/transformers/transformers/test_redact_with_char.py
|
5e7daae19170ea84e21c2ca92c26dfbe8abb57a3
|
[
"Python-2.0",
"Apache-2.0"
] |
permissive
|
markanethio/gretel-python-client
|
cc04a61ee04b674be4293baa89589b95c3c86843
|
f910a64550fd6fba75f6d347f2a1251694dbde80
|
refs/heads/master
| 2023-06-25T01:36:54.985590 | 2021-06-17T16:13:06 | 2021-06-17T16:13:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,081 |
py
|
from gretel_client.transformers import RedactWithCharConfig
from gretel_client.transformers.base import factory
from gretel_client.transformers.string_mask import StringMask
def test_redact_with_char():
mask_last_name = StringMask(mask_after=' ')
mask_first_name = StringMask(mask_until=' ')
redact_with_char_config = RedactWithCharConfig(labels=['ip_address'], mask=[mask_last_name])
xf = factory(redact_with_char_config)
record = xf.transform_field("person_name", "John Doe", None)
assert record == {'person_name': 'John XXX'}
redact_with_char_config = RedactWithCharConfig(labels=['ip_address'], mask=[mask_first_name])
xf = factory(redact_with_char_config)
record = xf.transform_field("person_name", "John Doe", None)
assert record == {'person_name': 'XXXX Doe'}
redact_with_char_config = RedactWithCharConfig(labels=['ip_address'], mask=[mask_first_name, mask_last_name])
xf = factory(redact_with_char_config)
record = xf.transform_field("person_name", "John Doe", None)
assert record == {'person_name': 'XXXX XXX'}
|
[
"noreply@github.com"
] |
markanethio.noreply@github.com
|
c7c22f8b63c323b6da21a1eedb6146f61c738cc2
|
94f1cef5daa1dcfd65b7fc5180e61ffaad73346b
|
/main.py
|
3ebd3586a5233a93c7c8388ee51a1e502a4fe3e9
|
[] |
no_license
|
LaZyRaifur/Chicken-escape-py
|
e7ab7ec5d5eb9ffa69f75a4bf8aa65ea278f327f
|
3d48313f7513b841d57e6a4e19aac8ca30e53be7
|
refs/heads/main
| 2023-03-31T16:33:56.265698 | 2021-04-12T08:08:23 | 2021-04-12T08:08:23 | 357,107,950 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,858 |
py
|
import pygame
import random
screen_size = [360, 600]
screen = pygame.display.set_mode(screen_size)
pygame.font.init()
background = pygame.image.load('background.png')
user = pygame.image.load('user.png')
chicken = pygame.image.load('chicken.png')
user_x = 150
user_y = 520
score = 0
def display_score(score):
font = pygame.font.SysFont('Comic Sans MS',30)
score_text = 'Score: '+ str(score)
text_img = font.render(score_text,True, (0,255,0))
screen.blit(text_img, [20,10])
def random_offset():
return -1*random.randint(100, 2000)
chicken_y = [random_offset(),random_offset(),random_offset()]
def update_chicken_position(idx):
global score
if chicken_y [idx] > 600:
chicken_y [idx] = random_offset()
score = score + 5
print('score',score)
else:
chicken_y [idx] = chicken_y[idx] + 5
def crashed(idx):
global score
global keep_alive
score = score - 5
chicken_y [idx] = random_offset()
if score < 0:
keep_alive = False
keep_alive = True
clock = pygame.time.Clock()
while keep_alive:
pygame.event.get()
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT] and user_x < 300:
user_x += 10
elif keys[pygame.K_LEFT] and user_x > 0:
user_x -= 10
update_chicken_position(0)
update_chicken_position(1)
update_chicken_position(2)
screen.blit(background, [0,0])
screen.blit(user, [user_x,user_y])
screen.blit(chicken, [0,chicken_y[0]])
screen.blit(chicken, [150,chicken_y[1]])
screen.blit(chicken, [280,chicken_y[2]])
if chicken_y[0] > 500 and user_x < 70:
crashed(0)
if chicken_y[1] > 500 and user_x > 80 and user_x < 200:
crashed(1)
if chicken_y[2] > 500 and user_x > 220:
crashed(2)
display_score(score)
pygame.display.update()
clock.tick(20)
|
[
"raifurrahimcse13@gmail.com"
] |
raifurrahimcse13@gmail.com
|
ca1587a2f58e1798a90ae2c3a6ff57057ec9e10c
|
d9d8d3eeecfc22ca951a7feeca0e591edeba27a2
|
/tests/test_virtual_assistant.py
|
7ff513920fbcbc8fafc3afded3c3ddafc9ec8b2f
|
[
"BSD-3-Clause"
] |
permissive
|
john-james-ai/virtual-assistant
|
a5222854e82032f9922988800e4477791b7e6ac5
|
3e0aceeb82b284e7c19d18ae19e5ee778c058702
|
refs/heads/master
| 2022-12-07T23:45:49.980429 | 2020-09-05T14:26:08 | 2020-09-05T14:26:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 583 |
py
|
#!/usr/bin/env python
"""Tests for `virtual_assistant` package."""
import pytest
from virtual_assistant import virtual_assistant
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
|
[
"john.james@nov8.ai"
] |
john.james@nov8.ai
|
4a5a21bca80596cb5bea44e1aef99e00fcf93c2c
|
4c7d5602910e3b929ed127549210be6fd746302b
|
/practical-python/Solutions/9_5/porty-app/print-report.py
|
afe330af94326f125eaf352041d47a2775eea058
|
[
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jianchengwang/todo-python
|
8b57d5ce45ea855c8c1413024e82add2267bc9c2
|
36bdaf6fae714531946047ececca995d60f86e4a
|
refs/heads/master
| 2023-03-07T06:22:22.301555 | 2021-11-26T07:08:02 | 2021-11-26T07:08:02 | 199,950,188 | 0 | 0 |
MIT
| 2023-03-05T16:12:56 | 2019-08-01T00:55:21 |
Python
|
UTF-8
|
Python
| false | false | 98 |
py
|
#!/usr/bin/env python3
# print-report.py
import sys
from porty.report import main
main(sys.argv)
|
[
"jiancheng_wang@yahoo.com"
] |
jiancheng_wang@yahoo.com
|
25a234f2613dc95192ae0c914d933f8ad33937c8
|
e7fc555f547a2f311aec3f7371d8c0d55a05a9eb
|
/Auto_hydro_breaklines/connected_component_analysis.py
|
a058797dec0a204815e48c8d9a3bc27398a4bb28
|
[] |
no_license
|
JHusefest/Auto-hydro-breaklines-LIDAR
|
d3b8e5359f815edd771ec3b4b30bc1a19c759a9d
|
dcca25cea4ec326c412f665128826e65bfa9c2ff
|
refs/heads/master
| 2020-05-26T05:06:49.509354 | 2017-03-03T21:12:22 | 2017-03-03T21:12:22 | 82,465,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,776 |
py
|
import matplotlib.pyplot as plt
import numpy as np
from skimage.segmentation import clear_border
from skimage.measure import label, find_contours, regionprops
from skimage.morphology import remove_small_holes
from skimage.restoration import denoise_bilateral, denoise_tv_chambolle
from analyze_elevation import range_peaks
from load_dem_hist import Loaddems
from lidar_prep import Preperation
from skimage.external.tifffile import imshow
class Analyze_Elevation:
def __init__(self, elevation_data, int_data, scale):
self.elevation_data = elevation_data.astype(float)
self.int_data = int_data
self.scale = scale
def without_threshold_with_contours(self):
#remove artifacts connected to image border
cleared = self.elevation_data.copy()
clear_border(cleared)
#Insert peaks_left1 from analyze peaks
range_peaks1, range_peaks2, range_peaks3, range_peaks4 = range_peaks(elevation_array, scale)
for peak in range_peaks1:
print(peak)
for k in peak[1]:
try:
hey = l.search_coords_by_key(k)
for i, j in hey:
cleared[i, j] = 0
except KeyError:
print('Elevation finnes ikke', peak)
#Denoising DEM
denoised = denoise_bilateral(cleared, win_size=20, multichannel=False)
# Finds countours within 6 meteres elevation.
contours = find_contours(denoised, level=6, fully_connected='high')
# Put all of the countours as HIGH VALUE. This will make threshold in elev_reomve_spikes() better. #
for i in contours:
for j, k in i.astype(int):
denoised[j, k] = 100
labeled = label(denoised)
remove_holes = remove_small_holes(labeled, 1000)
new_labeled = label(remove_holes, background=1)
return new_labeled
### Removes artifacts like spikes and small holes in image ###
def elev_remove_spikes(self):
region_labeled = self.without_threshold_with_contours()
regions = regionprops(region_labeled, self.elevation_data)
for region in regions:
area = region.area * 2 * 2
if (area < 4026):
region_labeled = np.where(region_labeled == region.label, 0, region_labeled)
zero_area = np.unique(region_labeled)
if len(zero_area) == 1:
return None
else:
final_elev = remove_small_holes(region_labeled, 50)
return final_elev
class Analyze_Int:
def __init__(self, int_data):
self.int_data = int_data
def show_int(self):
cleared = self.int_data.copy()
clear_border(cleared)
hey = np.where(cleared < 20, 0, cleared)
h = np.where(cleared > 200, 800, hey)
champ = denoise_tv_chambolle(h, weight=0.002, multichannel=False)
yeah = np.where(champ < 0.002, 0, champ)
contours = find_contours(yeah, level=0.001, fully_connected='high')
for i in contours:
for j, k in i.astype(int):
hey[j, k] = 100
region_labeled = label(hey)
regions = regionprops(region_labeled, intensity_image=self.int_data)
for region in regions:
area = region.area * 2 * 2
if area < 2026/3:
region_labeled = np.where(region_labeled == region.label, 0, region_labeled)
zero_area = np.unique(region_labeled)
if len(zero_area) == 1:
return None
else:
imshow(region_labeled)
return region_labeled
def show_keep_double(self):
cleared = self.int_data.copy()
clear_border(cleared)
contours = find_contours(cleared, level=0.1, fully_connected='high')
for i in contours:
for j, k in i.astype(int):
cleared[j, k] = 1000
labeled = label(cleared)
remove_holes = remove_small_holes(labeled, 300, connectivity=2)
labeled2 = label(remove_holes, background=1)
rm_inner_holes = remove_small_holes(labeled2, 400, connectivity=2)
plt.show()
return rm_inner_holes
class CombineData:
def __init__(self, int_regions, elev_regions):
#self.int_regions = int_regions
#self.elev_regions = elev_regions
if elev_regions is None:
self.elev_regions = int_regions
elif elev_regions is None and int_regions is None:
print('Ingen funker..')
pass
else:
self.elev_regions = elev_regions
self.int_regions = int_regions
def show_regions(self):
imshow(self.elev_regions, 'Elevation Regions')
imshow(self.int_regions, 'Intensity Regions')
plt.show()
if __name__ == "__main__":
p = Preperation("32-1-503-109-11.laz", "/Users/joakimtveithusefest/Documents/master_env/kragero_drangeland/422/data")
#p.run_all()
elev_file, int_file = p.return_dems()
l = Loaddems(elev_file, int_file)
try:
elevation_array, int_data_array, scale = l.return_data()
a = Analyze_Elevation(elevation_array, int_data_array, scale)
elev_regions = a.elev_remove_spikes()
j = Analyze_Int(int_data_array)
int_regions = j.show_keep_double()
combined = CombineData(int_regions, elev_regions)
combined.show_regions()
except TypeError:
print('Her er det BARE vann', str(elev_file))
#a = Analyze_Elevation(elevation_array, int_data_array, scale)
#elev_regions = a.elev_remove_spikes()
#j = Analyze_Int(int_data_array)
#int_regions = j.show_keep_double()
#combined = CombineData(int_regions, elev_regions)
#combined.show_regions()
|
[
"joakim.tveit.husefest@nmbu.no"
] |
joakim.tveit.husefest@nmbu.no
|
d1194035877ccf46cd000542fa0cb83f128378d8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2847/60900/255175.py
|
9f7f8927fa27a8621f5be9e8716e364de835126c
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 192 |
py
|
n = input()
str1 = input()
nums = str1.split(" ")
str2 = input()
nums2 = str2.split(" ")
count = 0
for i in range(int(nums2[0]),int(nums2[1])):
count = count + int(nums[i-1])
print(count)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
57f42bd7a79058202025336c929b0d848371477c
|
6b41f96990a528867c84e10c5f1d1299e56cbfa3
|
/python/move-subscriber
|
bba56ff1a55474e0e4c151f192c757cacd6dcf3a
|
[
"BSD-3-Clause"
] |
permissive
|
fancychimp/public-api-examples
|
16c977e6ecd533513d87dd9ea537c932e1d2fcbe
|
fe593a51402dd13469fb5decfefa6cc06dcbb31d
|
refs/heads/master
| 2020-04-15T17:49:13.265093 | 2019-01-04T18:13:11 | 2019-01-04T18:13:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,646 |
#!/usr/bin/env python
from __future__ import print_function
import json
import sys
import requests
from requests_oauthlib import OAuth1Session
with open('credentials.json', 'rt') as f:
credentials = json.load(f)
session = OAuth1Session(
credentials['consumer_key'],
client_secret=credentials['consumer_secret'],
resource_owner_key=credentials['access_token'],
resource_owner_secret=credentials['token_secret'])
def get_collection(url):
collection = []
while url:
response = session.get(url)
response.raise_for_status()
body = response.json()
collection.extend(body.pop('entries'))
# if there is a next link, there are more pages to retrieve
next_link = body.get('next_collection_link')
url = next_link if next_link else None
return collection
# get all the accounts entries
account_url = 'https://api.aweber.com/1.0/accounts'
accounts = get_collection(account_url)
# get all the list entries for the first account
lists = get_collection(accounts[0]['lists_collection_link'])
if len(lists) < 2:
print('You must have 2 lists to move a subscriber!')
sys.exit()
# pick the list to move the subscriber from and to
origin_list = lists[0]
destination_list = lists[1]
subscribers = get_collection(origin_list['subscribers_collection_link'])
# pick the subscriber we want to move
subscriber = subscribers[0]
if not subscriber:
print('You must have a subscriber on list: {}!'.format(
origin_list['name']))
sys.exit()
data = {'ws.op': 'move', 'list_link': destination_list['self_link']}
try:
# attempt to move the subscriber to the second list
move_response = session.post(subscriber['self_link'], json=data)
move_response.raise_for_status()
print('Moved subscriber {} from list: {} to list: {}'.format(
subscriber['email'], origin_list['name'], destination_list['name']))
except requests.RequestException as e:
error = e.response.json()['error']
status = e.response.status_code
# An error will contain the following information:
# type, message, documentation_url and status
# The error type is most often an HTTP reason.
# The error message gives you the most detail about an error.
# The documentation_url is a link to the error explanation.
# The status is the error HTTP status code.
# For more info see: https://api.aweber.com/#tag/Troubleshooting
print('Could not move subscriber!')
print('( {} {} ) {}'.format(status, error['type'], error['message']))
print('Follow this link for more information: {}'.format(
error['documentation_url']))
|
[
"amberh@aweber.com"
] |
amberh@aweber.com
|
|
568ba07dd81a56bf1728395122d6d1f372265fca
|
906476efa1f015cd287d30569fdfacead6570646
|
/tensorflow_model/example/example1.py
|
d0fcdbe002fcf1f1772fc2f1ef0ffae0112a8050
|
[] |
no_license
|
NewDolphin/deep_model
|
173218eee9ff0efcde4867533749500ffa87d234
|
a6e697ad3620c74112df11e5fc8a2332cc183f11
|
refs/heads/master
| 2022-04-18T15:17:55.068840 | 2020-04-14T12:00:57 | 2020-04-14T12:00:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 368 |
py
|
import math
import numpy as np
import random as rn
import sklearn.metrics as skm
import tensorflow as tf
# 两个矩阵相乘
x = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
y = tf.reduce_mean(x, axis=1, keepdims=True)
z = tf.reduce_sum(x, 1)
u = tf.square(x)
with tf.Session() as sess:
# print(sess.run(y))
# print(sess.run(z))
print(sess.run(u))
|
[
"yqisong1992@163.com"
] |
yqisong1992@163.com
|
5412884383a24527b37ea1598d51e24ea48eaf2c
|
b5fa57919ce7d71284106505c88d0d438195049e
|
/equipos/admin.py
|
34de715f36f9c29173a2e3580404bf6793f3484a
|
[] |
no_license
|
ninja2410/final_django
|
aac9217970bf83071bfa500859bbb67281bc949b
|
eb6dc6fe274b889d8aef044177d9aed329de0b10
|
refs/heads/master
| 2020-04-04T22:51:23.746479 | 2018-11-07T06:09:15 | 2018-11-07T06:09:15 | 156,338,309 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 216 |
py
|
from django.contrib import admin
from equipos.models import Jugador, JugadorAdmin, Equipo, EquipoAdmin
# Register your models here.
admin.site.register(Jugador, JugadorAdmin)
admin.site.register(Equipo, EquipoAdmin)
|
[
"pablo.felg1996@gmail.com"
] |
pablo.felg1996@gmail.com
|
efce3becadd80a8ff880f40a1c7f9c87dd5e9bab
|
e00350c05fb573a6987fdbabe249c168cd063654
|
/Projects/Milestone 2/BlackJack.py
|
cad52db98d9030a9930aad44c55e46cd0401f892
|
[] |
no_license
|
david13pod/Python-Development
|
0417919432627ec061da2c5e550593f9a90a5060
|
5ecbc8d381cd93c574ab1e6342fea47af17d263a
|
refs/heads/main
| 2023-03-23T02:44:48.008692 | 2021-03-15T23:25:27 | 2021-03-15T23:25:27 | 337,376,448 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,483 |
py
|
from IPython.display import clear_output
clear_output()
class Gamerules():
def carddeck (self):
from random import shuffle
#define a deck of card
deck1=[2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace']
deck2=[2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace']
deck3=[2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King','Ace']
self.deck4=deck1+deck2+deck3
shuffle(self.deck4)
def instructions (self):
print(f'{self.name} welcome to Blackjack game \nThis is a card game, you will be paying against the '
'dealer. \nThese are the rules of the game: \n\n1) You are given 2 random cards by the dealer and the dealer has 2 cards too\n'
'The decisions you can make after receiving your cards: \nHit - collect more cards from the dealer \nStand'
' - refuse to collect more cards from the dealer \nSplit - only possible if you have cards of same values'
' only after the dealer has given you your 2 cards, your bet is doubled and you receive another'
' card \nDouble - your bet is doubled and you are given another card by the dealer on each split \nSurrender -'
' the game ends immediately with you only getting half your bet back \n\n2) You win if you have Blackjack'
'(if the addition of your cards is 21) or if the total value of your card is greater than the dealers'
'\n\n3) You loose if the dealers card is greater \n\n4) You draw if you have same value with the dealer, however'
' if the number of cards are not the same, the person with the lowest number of cards wins\n\n')
def winn (self):
self.winnings= self.pbet * 1.5
if self.splita != 'checkcheck':
self.funddep += self.winnings
print(f'You won your bet, balance is {self.funddep}')
elif self.splita == 'checkcheck':
self.funddep += self.winnings *0.5
print(f'You won your bet, balance is {self.funddep}')
def half (self):
self.surrend = self.pbet * 0.5
self.funddep += self.surrend
print(f'You have lost half of your bet, balance is {self.funddep}')
def dcredit (self):
if self.splita != 'checkcheck':
self.funddep += self.pbet
print(f'You drew, your betis returned, balance is {self.funddep}')
elif self.splita == 'checkcheck':
self.funddep += self.pbet
print(f'You drew, a hand bet is returned, balance is {self.funddep}')
def gbet(self):
if self.funddep >= self.pbet:
self.funddep -= self.pbet
print(f'you have place a bet of {self.pbet}')
print(f'you still have {self.funddep} availble for betting')
else:
print(f'you can not place any bet, you are low on fund. your balance is {self.funddep}')
def details (self):
print(f'Player: {self.name}')
return f'Account balance: {self.funddep}'
def blackjack (self):
if self.playersum == 21 and 'blackjack' == self.decision:
print (f'{self.name} has blackjack')
Gamerules.winn(self)
Gameplay.endgame(self)
else:
print ('you do not have blackjack as sum of your card is not 21. Make another decision')
Gameplay.decisions (self)
def surrender (self):
if 'surrender' == self.decision:
print (f'{self.name} has surrendered')
Gamerules.half(self)
def split (self):
if 'split' == self.decision and self.cardn[0] == self.cardn[1] and len(self.cardn1) == 0 :
if self.funddep >= 2*self.pbet:
self.funddep -= self.pbet
print (f'{self.name} has splited card ')
self.cardn1.append(self.cardn[0])
self.cardn2.append(self.cardn[1])
print(f'you have place an extra bet of {self.pbet}')
print(f'you still have {self.funddep} availble for betting')
self.cardn1.append(self.deckcards[self.gamecount])
self.gamecount +=1
self.cardn2.append(self.deckcards[self.gamecount])
self.gamecount +=1
print(f'{self.name} here are your 2 sets of cards: Hand 1 {self.cardn1} and Hand 2 {self.cardn2}')
choice = int(input('which hand do you want to focus on 1 or 2 ? '))
for op in [1,2]:
if choice == 1:
print('You are now focusing in hand 1')
self.splita='checkcheck'
self.playersum = sum(self.cardn1)
self.cardn = self.cardn1
Gameplay.cvalues(self)
Gameplay.checkstatus1 (self)
if self.splitas == 'roll':
choice = 2
continue
Gameplay.decisions (self)
Gameplay.cvalues(self)
choice = 2
self.cardnh1 = self.cardn
elif choice ==2:
print('You are now focusing in hand 2')
self.splita='checkcheck'
self.playersum2 = sum(self.cardn2)
self.cardn2 = self.cardn2
Gameplay.cvalues(self)
Gameplay.checkstatus1 (self)
if self.splitas == 'roll':
choice = 1
continue
Gameplay.decisions (self)
Gameplay.cvalues(self)
choice = 1
self.cardnh2 = self.cardn
else:
print(f'You can not split, you are low on fund. your balance is {self.funddep} make another decision')
Gameplay.decisions (self)
else:
print('Both cards do not have same values- also you can only split or double the first two cards. Make another decision')
Gameplay.decisions (self)
def double (self):
if 'double' == self.decision and len(self.cardn) <=2 :
if self.funddep >= 2*self.pbet:
self.funddep -= self.pbet
print(f'you have place an extra bet of {self.pbet}')
print(f'you still have {self.funddep} availble for betting')
self.cardn.append(self.deckcards[self.gamecount])
print (f'you have drawn card: {self.deckcards[self.gamecount]}')
self.gamecount +=1
else:
print(f'You can not place an extra bet, you are low on fund. your balance is {self.funddep}. Make another decision')
Gameplay.decisions (self)
else:
print('You can only split or double the first two cards. Make another decision')
Gameplay.decisions (self)
def stand(self):
if self.decision == 'stand':
print (f'{self.name} has decided to stand ')
def hit (self):
if self.decision == 'hit':
self.cardn.append(self.deckcards[self.gamecount])
print (f'you have drawn card: {self.deckcards[self.gamecount]}')
self.gamecount +=1
class Gameplay(Gamerules):
def __init__(self):
self.cardn =[]
self.dealern =[]
self.cardn1=[]
self.cardn2=[]
self.gamecount=0
self.dealersum = sum(self.dealern)
self.playersum = sum(self.cardn)
self.end =''
self.name = input('Please enter your name: ')
def start (self):
self.decision=''
self.cardnh1 =[]
self.cardnh2 = []
self.splita='notcheck'
self.splitas = 'ntrolls'
Gamerules.instructions(self)
Gamerules.carddeck(self)
self.deckcards=self.deck4
self.funddep = int(input('Enter the total amount you have available for playing the game: '))
def bet (self):
self.pbet = int(input('Enter your bet for this round: '))
from IPython.display import clear_output
clear_output()
Gamerules.gbet(self)
def distribute (self):
self.cardn =[]
self.dealern =[]
self.cardn1=[]
self.cardn2=[]
self.gamecount=0
for i in [1,2]:
self.cardn.append(self.deckcards[self.gamecount])
self.gamecount +=1
self.dealern.append(self.deckcards[self.gamecount])
self.gamecount +=1
print(f'{self.name} here are your cards: {self.cardn}')
print(f'This is one of the dealers card: {self.dealern[0]}')
def distributedealer (self):
dist = True
self.dealersum = sum(self.dealern)
while dist:
if self.dealersum <= 16:
self.dealern.append(self.deckcards[self.gamecount])
Gameplay.cvalues(self)
self.gamecount +=1
self.dealersum = sum(self.dealern)
if self.dealersum >= 16:
dist= False
break
else:
dist= False
break
Gameplay.cvalues(self)
def cvalues (self):
self.carddict = {'Jack':10, 'Queen': 10, 'King': 10, 'Ace':[1,11]}
facecard =['Jack','Queen','King','Ace']
dealercard=0
for ii in range(0,len(self.cardn)):
for fc in facecard:
if fc == 'Ace' and fc in self.cardn:
adecide= int(input('What value do you want your Ace card to be 1 or 11 ? '))
self.cardn.remove(fc)
self.cardn.append(adecide)
elif fc != 'Ace' and fc in self.cardn:
self.cardn.remove(fc)
self.cardn.append(self.carddict[fc])
for jj in range(0,len(self.dealern)):
for kk in self.dealern:
if isinstance(kk, int):
dealercard +=kk
for fc in facecard:
if fc == 'Ace' and fc in self.dealern:
if dealercard <= 10:
self.dealern.remove(fc)
self.dealern.append(11)
elif dealercard > 10:
self.dealern.remove(fc)
self.dealern.append(1)
elif fc != 'Ace' and fc in self.dealern:
self.dealern.remove(fc)
self.dealern.append(self.carddict[fc])
def checkstatus1 (self):
self.end =''
self.dealersum = sum(self.dealern)
self.playersum = sum(self.cardn)
if self.dealersum == 21 and self.playersum != 21:
print (f'Dealer has blackjack 21, Dealer wins the game with {self.dealersum}')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif self.splita == 'checkcheck':
self.splitas = 'roll'
elif self.playersum == 21 and self.dealersum != 21:
print (f'Blackjack {self.name} wins the game with {self.playersum}')
self.end = 'ender'
Gamerules.winn(self)
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif self.splita == 'checkcheck':
self.splitas = 'roll'
elif self.playersum == 21 and self.dealersum == 21:
print (' Game is a draw ')
self.end = 'ender'
Gamerules.dcredit(self)
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif self.splita == 'checkcheck':
self.splitas = 'roll'
def checkstatus (self):
self.end =''
self.dealersum = sum(self.dealern)
self.playersum = sum(self.cardn)
if self.dealersum > 21 and self.playersum <21:
print (f'Dealer busrted {self.name} wins the game with {self.playersum}')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif self.playersum > 21 and self.dealersum <21:
print (f'{self.name} busrted, Dealer wins the game with {self.dealersum}')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif self.dealersum > self.playersum and self.dealersum <= 21:
print (f'Dealer wins the game with {self.dealersum}')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif self.dealersum < self.playersum and self.playersum <= 21:
print (f'{self.name} wins the game with {self.playersum}')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif self.dealersum == self.playersum and self.playersum <= 21:
if len(self.dealern) > len(self.cardn):
print (f'{self.name} wins the game with {self.playersum}')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif len(self.dealern) < len(self.cardn):
print (f'Dealer wins the game with {self.dealersum}')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
elif len(self.dealern) == len(self.cardn):
print ('The game is a draw!')
self.end = 'ender'
if self.splita != 'checkcheck':
Gameplay.endgame(self)
def decisions (self):
print ('Here are your options')
print ('blackjack, surrender, split, double, stand, hit ... Note you cannot split more than once ')
self.decision = input('Pls input you decision here: ')
if 'blackjack' == self.decision:
Gamerules.blackjack(self)
elif 'surrender' == self.decision:
Gamerules.surrender(self)
self.end = 'ender'
Gameplay.endgame(self)
elif 'split' == self.decision:
Gamerules.split(self)
Gameplay.distributedealer(self)
self.hand = 1
for sp in [1,2]:
if self.hand == 1:
self.cardn = self.cardnh1
Gameplay.checkstatus(self)
self.hand+=1
elif self.hand == 2:
self.cardn = self.cardnh2
Gameplay.checkstatus(self)
self.hand+=1
self.end = 'ender'
Gameplay.endgame(self)
elif 'double' == self.decision:
Gamerules.double(self)
Gameplay.cvalues(self)
elif 'stand' == self.decision:
Gamerules.stand(self)
elif 'hit' == self.decision:
Gamerules.hit(self)
Gameplay.cvalues(self)
else:
print ('wrong input, make sure its the correct word in lower case')
Gameplay.decisions(self)
#def splitgame(self):
# if self.decision == 'split':
def endgame (self):
self.endt=True
if self.end == 'ender':
lasttime=input('Do you wnat to end game Y/N : ')
if lasttime =='Y':
print('Thanks Goodbye')
return
elif lasttime == 'N':
Gameplay.play(self)
def play (self):
self.endt=False
Gameplay.start (self)
Gameplay.bet (self)
Gameplay.distribute (self)
Gameplay.cvalues (self)
Gameplay.checkstatus1 (self)
Gameplay.decisions (self)
if self.endt== False:
Gameplay.distributedealer (self)
Gameplay.checkstatus (self)
elif self.endt== True:
return
blckjck=Gameplay()
blckjck.play()
|
[
"odedereoluwapelumi13@gmail.com"
] |
odedereoluwapelumi13@gmail.com
|
fbf24e42c6d7e8f22c1daee7c96ee466bdb31af8
|
7dc05dc9ba548cc97ebe96ed1f0dab8dfe8d8b81
|
/branches/0.4/pida/core/application.py
|
94dcd60a715f1aa4cab7fa59b29e7d1b46b9eb49
|
[] |
no_license
|
BackupTheBerlios/pida-svn
|
b68da6689fa482a42f5dee93e2bcffb167a83b83
|
739147ed21a23cab23c2bba98f1c54108f8c2516
|
refs/heads/master
| 2020-05-31T17:28:47.927074 | 2006-05-18T21:42:32 | 2006-05-18T21:42:32 | 40,817,392 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,003 |
py
|
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
#Copyright (c) 2005 Ali Afshar aafshar@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
# system import(s)
import os
import sys
import optparse
import warnings
def die(message):
"""Die in a command line way."""
print message
print 'Exiting. (this is fatal)'
sys.exit(1)
# First gtk import, let's check it
try:
import gtk
major, minor, rev = gtk.pygtk_version
if major < 2 or minor < 6:
die('PIDA requires PyGTK >= 2.6. It only found %s.%s'
% (major, minor))
except ImportError:
die('PIDA requires Python GTK bindings. They were not found.')
# the threads evilness
gtk.threads_init()
def die_gui(message):
"""Die in a GUI way."""
mu = ('<b>There was an error starting PIDA</b>\n\n'
'%s\n\n<i>This is fatal</i>' % message)
dlg = gtk.MessageDialog(parent=None,
flags=0,
type=gtk.MESSAGE_ERROR,
buttons=gtk.BUTTONS_CLOSE)
dlg.set_markup(mu)
dlg.run()
die(message)
# Python 2.4
major, minor = sys.version_info[:2]
if major < 2 or minor < 4:
die_gui('Python 2.4 is required to run PIDA. Only %s.%s was found' %
(major, minor))
# Setuptools is needed to run PIDA
try:
import setuptools
import pkg_resources
pkg_resources.require('pida')
except ImportError:
raise
die_gui('PIDA requires setuptools to be installed.')
# This can test if PIDA is installed
try:
import pida.core.boss as boss
import pida.pidagtk.debugwindow as debugwindow
except ImportError:
die_gui('PIDA could not import itself.')
# Start lock threads here because an exception may be raised
# and the dialog would be frozen
gtk.threads_enter()
# Now we can use a gui exception hook
old_excepthook = sys.excepthook
sys.excepthook = debugwindow.show
def get_version():
from pkg_resources import resource_string
try:
version_file = resource_string('pida', 'data/version')
except:
version_file = 'unversioned'
return version_file
pida_version = get_version()
class environment(object):
"""Handle environment variable and command line arguments"""
def __init__(self):
self.__editorname = None
self.__parseargs()
def __parseargs(self):
home_dir_option = None
default_home = os.path.join(os.path.expanduser('~'), '.pida')
if default_home == os.path.join('~', '.pida'):
# When on win32
from win32com.shell import shell, shellcon
default_home = shell.SHGetSpecialFolderLocation(
0,
shellcon.CSIDL_APPDATA
)
default_home = shell.SHGetPathFromIDList(default_home)
default_home = os.path.join(default_home, "Pida")
del shell
del shellcon
op = optparse.OptionParser()
op.add_option('-d', '--home-directory', type='string', nargs=1,
action='store',
help=('The location of the pida home directory. '
'If this directory does not exist, it will be created. '
'Default: %s' % default_home),
default=default_home)
op.add_option('-o', '--option', type='string', nargs=1,
action='append',
help=('Set an option. Options should be in the form: '
'servicename/group/name=value. '
'For example (without quotes): '
'"pida -o editormanager/general/editor_type=Vim". '
'More than one option can be set by repeated use of -o.'))
op.add_option('-v', '--version', action='store_true',
help='Print version information and exit.')
op.add_option('-D', '--debug', action='store_true',
help=('Run PIDA with added debug information. '
'This merely sets the environment variables: '
'PIDA_DEBUG=1 and PIDA_LOG_STDERR=1, '
'and so the same effect may be achieved by setting them.'))
op.add_option('-r', '--remote', action='store_true',
help=('Run PIDA remotely to open a file in an existing instance '
'of PIDA. Usage pida -r <filename>.'))
op.add_option('-F', '--first-run-wizard', action='store_true',
help='Run the PIDA first time wizard')
op.add_option('-t', '--testing-mode', action='store_true',
help='Run te PIDA self test')
opts, args = op.parse_args()
envhome = self.__parseenv()
if envhome is not None:
home_dir_option = envhome
else:
home_dir_option = opts.home_directory
self.__home_dir = home_dir_option
self.__create_home_tree(self.__home_dir)
self.__args = args
self.opts = opts
def __parseenv(self):
if 'PIDA_HOME' in os.environ:
return os.environ['PIDA_HOME']
def __create_home_tree(self, root):
dirs = {}
self.__mkdir(root)
for name in ['conf', 'log', 'run', 'vcs', 'sockets', 'data',
'projects', 'library']:
path = os.path.join(root, name)
self.__mkdir(path)
dirs[name] = path
return dirs
def __mkdir(self, path):
if not os.path.exists(path):
os.mkdir(path)
def get_positional_args(self):
return self.__args
positional_args = property(get_positional_args)
def get_home_dir(self):
return self.__home_dir
home_dir = property(get_home_dir)
def get_version(self):
return pida_version
version = property(get_version)
def override_configuration_system(self, services):
if self.__editorname:
svc = services.get('editormanager')
svc.set_option('general', 'type', self.__editorname)
#svc.options.save()
if not self.opts.option:
return
for opt in self.opts.option:
if '=' in opt:
name, value = opt.split('=', 1)
if '/' in name:
parts = name.split('/', 3)
if len(parts) == 3:
service, group, option = parts
try:
svc = services.get(service)
svc.options.get(group).get(option).load(value)
except:
pass
def override_editor_option(self, editorname):
self.__editorname = editorname
class application(object):
"""The pIDA Application."""
def __init__(self,
bosstype=boss.boss,
mainloop=gtk.main,
mainstop=gtk.main_quit,
environment=environment()):
self.__mainloop = mainloop
self.__mainstop = mainstop
self.__env = environment
self.__boss = bosstype(application=self, env=self.__env)
self.boss = self.__boss
self.env = self.__env
def start(self):
"""Start PIDA."""
self.__boss.start()
self.__mainloop()
def stop(self):
"""Stop PIDA."""
self.__mainstop()
def run_pida(env, bosstype, mainloop, mainstop):
if run_firstrun(env):
app = application(bosstype, mainloop, mainstop, env)
app.start()
return 0
else:
return 1
def run_version(env, *args):
print 'PIDA, version %s' % pida_version
return 0
def run_remote(env, *args):
import pida.utils.pidaremote as pidaremote
pidaremote.main(env.home_dir, env.positional_args)
return 0
def run_firstrun(env, *args):
first_filaname = os.path.join(env.home_dir, '.firstrun')
if not os.path.exists(first_filaname) or env.opts.first_run_wizard:
import pida.utils.firstrun as firstrun
ftw = firstrun.FirstTimeWindow()
response, editor = ftw.run(first_filaname)
if response == gtk.RESPONSE_ACCEPT:
if editor is None:
raise RuntimeError('No Working Editors')
else:
env.override_editor_option(editor)
return True
else:
return False
else:
return True
def main(bosstype=boss.boss, mainloop=gtk.main, mainstop=gtk.main_quit):
warnings.filterwarnings("ignore", category=DeprecationWarning)
env = environment()
if env.opts.debug:
os.environ['PIDA_DEBUG'] = '1'
os.environ['PIDA_LOG_STDERR'] = '1'
if env.opts.testing_mode:
sys.excepthook = old_excepthook
if env.opts.version is not None:
run_func = run_version
elif env.opts.remote:
run_func = run_remote
else:
run_func = run_pida
exit_val = run_func(env, bosstype, mainloop, mainstop)
gtk.threads_leave()
sys.exit(exit_val)
if __name__ == '__main__':
main()
|
[
"aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7"
] |
aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7
|
abcc870c063bb7e215a35e1713cb35a8f84a57e0
|
129a2f54817ca246b53631d669ffd4bb0330f03f
|
/mysite/settings.py
|
ac82f40fee3fb513591e6d9c77cbb9b58e1cb104
|
[] |
no_license
|
MDYLL/BARS
|
9460335d1ea03266f8058e0f1e4571f5621b5bdd
|
1441e5a6c074e54968d4e6bafa406f68312d0160
|
refs/heads/master
| 2020-09-05T18:52:18.948287 | 2019-11-07T08:21:41 | 2019-11-07T08:21:41 | 220,185,151 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,310 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6_3!vt#9*dqup5m&c-1$_od2&9kk375&0_e9g_dg(i0pumpmo7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.heroku.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'bars.sith1@gmail.com'
EMAIL_HOST_PASSWORD = 'bars11111'
EMAIL_PORT = 587
|
[
"mdylll@mail.ru"
] |
mdylll@mail.ru
|
b240e41159b4ff5033205b868e3fcfc8f48df0bd
|
d85a3cc3466b33fe7edd4198b367c2de6c45d13b
|
/SnakeAi/test.py
|
fbc612b6f8289feb1cdc997b494faf33698a5887
|
[] |
no_license
|
Ahmetf1/Machine-Learning
|
3b5faaca699828e9a5582461a72624fb67539164
|
7396ccff574e89c20ff9bb089cbd718459f02657
|
refs/heads/master
| 2023-06-30T12:54:11.975452 | 2021-08-04T23:35:43 | 2021-08-04T23:35:43 | 321,272,337 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 700 |
py
|
from ai_game import GAME, SNAKE, FRUIT
from network import Network, Agent
import torch
gamma = 0.99
epsilon = 0.01
batch_size = 512
n_action = 4
input_dims = [2, 20, 20]
mem_size = 100000
epsilon_end = 0.01
epsilon_dec = 1e-4
network = torch.load('model_2000')
agent = Agent(network, gamma, epsilon, batch_size, n_action, input_dims, mem_size, epsilon_end, epsilon_dec)
game = GAME()
game.set_timer(150)
while True:
observation = game.get_states()
done = 0
while not done:
action = agent.choose_action(observation)
observation = game.get_states()
observation_, reward, done, score = game.spin_once(action)
game.draw()
observation = observation_
|
[
"ahmetfakinci@gmail.com"
] |
ahmetfakinci@gmail.com
|
cc8f3b6012f30c1bdad4f411f454e6e816b04bde
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02549/s176160941.py
|
bdc2e8e51f883ca3eca69259dc2774ce9724f789
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 491 |
py
|
N, K = map(int, input().split())
L = [0] * K
R = [0] * K
for i in range(0, K):
L[i], R[i] = map(int, input().split())
moves = [0] * N
moves[0] = 1
rui_wa = [0] * N
rui_wa[0] = 1
for i in range(1, N):
for j in range(0, K):
l = max(i - L[j], 0)
r = max(i - R[j], 0)
if i - L[j] < 0:
continue
moves[i] += (rui_wa[l] - rui_wa[r - 1]) % 998244353
rui_wa[i] = (moves[i] + rui_wa[i - 1]) % 998244353
print(moves[N - 1] % 998244353)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
25fa45f71853b5403b82a98d2fcc0438234355ac
|
32b7903477752f7a1bc1f656b024527b9884d919
|
/Style Transfer/run.py
|
53a58003db0b4c44ea824733009d5cd5d7e5db25
|
[
"MIT"
] |
permissive
|
fuxiao-zhang/PyTorch
|
5f37eeb5daf2ae04330747f5fcc33c86613cd0dd
|
114537ebbae878a5522adcfe262f936fd9c2368d
|
refs/heads/master
| 2023-01-25T01:08:43.338520 | 2020-12-07T07:18:39 | 2020-12-07T07:18:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,151 |
py
|
import torch
from tqdm import tqdm
import torch.nn as nn
import torchvision
import numpy as np
from PIL import Image
import albumentations as alb
from torchvision.utils import save_image
class model(nn.Module):
def __init__(self):
super(model, self).__init__()
self.out_features_number = [0, 5, 10, 19, 28]
self.base_model = torchvision.models.vgg19(pretrained=True).features[:29]
def forward(self, x):
features = []
for num, layer in enumerate(self.base_model):
x = layer(x)
if num in self.out_features_number:
features.append(x)
return features
def augentation(data):
imagenet_mean = (0.5071, 0.4867, 0.4408)
image_net_std = (0.2675, 0.2565, 0.2761)
transforms = alb.Compose(
[
alb.Normalize((0, 0, 0), (1, 1, 1), max_pixel_value=255, always_apply=True),
alb.Resize(420, 420, always_apply=True),
],
additional_targets={"image2" : "image"}
)
image = transforms(**data)
return image
def image_loader(real_image_path, style_image_path):
image = np.array(Image.open(real_image_path).convert("RGB"))
image2 = np.array(Image.open(style_image_path).convert("RGB"))
aug_input = {
'image': image,
'image2' : image2
}
image = augmentation(aug_input)
return image
image_name = 'henry.jpg'
style_name = 'starry_night.jpg'
real_image_path = f'Input/{image_name}'
style_image_path = f'Input/{style_name}'
steps = 4000
alpha = 0.98
beta = 0.02
if torch.cuda.is_available():
compute = 'cuda'
torch.backends.cudnn.benchmark=True
else:
compute = 'cpu'
device = torch.device(compute)
image = image_loader(real_image_path, style_image_path)
real_img = torch.tensor(image['image'].transpose(2, 0, 1)).unsqueeze(0).to(device)
style_img = torch.tensor(image['image2'].transpose(2, 0, 1)).unsqueeze(0).to(device)
generated_img = real_img.clone().requires_grad_(True)
optimizer = torch.optim.Adam([generated_img], lr=0.001)
model = model().to(device).eval()
for step in tqdm(range(steps)):
generated_img.data.clamp_(0, 1)
style_features = model(style_img)
generated_features = model(generated_img)
original_features = model(real_img)
style_loss, orig_loss = 0, 0
for generated_feature, original_feature, style_feature in zip(
generated_features, original_features, style_features
):
batch_size, channel, height, width = generated_feature.shape
orig_loss += torch.mean((generated_feature- original_feature)**2)
gen_gram_matrix = torch.mm(generated_feature.view(channel, height*width), generated_feature.view(channel, height*width).t())
style_gram_matrix = torch.mm(style_feature.view(channel, height*width), style_feature.view(channel, height*width).t())
style_loss += torch.mean((gen_gram_matrix - style_gram_matrix)**2)
total_loss = alpha*orig_loss + beta*style_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if step%100 == 0:
save_image(generated_img, f'Output/generated_{step}.png')
|
[
"noreply@github.com"
] |
fuxiao-zhang.noreply@github.com
|
49082f777a4c3f494a628aa8a98f1e3a9039e35e
|
5ea521e5cd24c8c49d62f23864d754aeda0bf2e6
|
/game/libs/jsonpickle/tags.py
|
c4ced32502372278c191ee7705263df421384123
|
[] |
no_license
|
bravelittlescientist/asteroid-lander
|
fba45e40b7af96b4faf5fdd8d854e241bc62e81c
|
a9d03148d8a50d0db726a796b83b56a5536b90eb
|
refs/heads/master
| 2016-09-06T13:09:06.000935 | 2013-04-12T03:33:00 | 2013-04-12T03:33:00 | 7,905,313 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 572 |
py
|
"""The jsonpickle.tags module provides the custom tags
used for pickling and unpickling Python objects.
These tags are keys into the flattened dictionaries
created by the Pickler class. The Unpickler uses
these custom key names to identify dictionaries
that need to be specially handled.
"""
from jsonpickle.compat import set
ID = 'py/id'
OBJECT = 'py/object'
TYPE = 'py/type'
REPR = 'py/repr'
REF = 'py/ref'
TUPLE = 'py/tuple'
SET = 'py/set'
SEQ = 'py/seq'
STATE = 'py/state'
# All reserved tag names
RESERVED = set([OBJECT, TYPE, REPR, REF, TUPLE, SET, SEQ, STATE])
|
[
"vaibps17@gmail.com"
] |
vaibps17@gmail.com
|
5f9c762d06042b8c296c2b16f570b7c112d50b38
|
c55615a3638317e71f966cbde3a81c61ca7beee1
|
/Vadym_Bilestkyi/0/Task1.py
|
d0f418ee908b2443ee1e900c979856f759fa56a2
|
[] |
no_license
|
SmischenkoB/campus_2018_python
|
8b139f4ad2b7f8476f31ee1bb96fbbce0040ec8c
|
291592e97b6d8fe9f9e6627dc0023875918d3463
|
refs/heads/master
| 2020-05-01T05:13:59.107471 | 2018-12-14T13:18:55 | 2018-12-14T13:18:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 317 |
py
|
def gen_range(start, stop, step=1):
if step == 0:
raise ValueError('step must be nonzero value')
while step > 0 and start < stop or step < 0 and start > stop:
yield start
start += step
print(list(gen_range(0, 5, 1)))
print(list(gen_range(0, 5, -1)))
print(list(gen_range(5, 0, -2)))
|
[
"umpire333@gmail.com"
] |
umpire333@gmail.com
|
c0283e4aa9ef77fcad4f242bf2de3af0b18ec5b3
|
cee0fac1c0807f671b1ca499d6b16011f1cfd705
|
/fetchEstimoteData.py
|
397005520c4647e1ed10bfe4512ec7f4a6908e28
|
[] |
no_license
|
siman4457/ProximityBeacons
|
f85eb446f5581d1d139d77fb7aa989f854c8e611
|
7ddebe69712aed494489eb186df7d3600f16a2c6
|
refs/heads/master
| 2020-03-17T20:13:19.195376 | 2018-05-18T03:56:10 | 2018-05-18T03:56:10 | 133,894,936 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,534 |
py
|
#A short program that fetches data from the Estimote cloud and exports it into a csv file.
#HUBBS Labs
#By: Siman Shrestha
import requests
import csv
import ast
#---Get visit duration for BLUEBERRY BEACON ---
url_1 = 'https://cloud.estimote.com/v2/analytics/devices/cc95d1d6b8587f69bb2d353a599fcd00/duration?from=1520812800&to=1523232000&granularity=hourly'
response_1 = requests.get(url_1,auth = ('hubbs-beacon-1g4','9dd89caad492d2a7d4b4c023d591cc17'))
url_1 = 'https://cloud.estimote.com/v2/analytics/devices/cc95d1d6b8587f69bb2d353a599fcd00/duration?from=1520812800&to=1523232000&granularity=hourly'
response_1 = requests.get(url_1,auth = ('hubbs-beacon-1g4','9dd89caad492d2a7d4b4c023d591cc17'))
# print(response_3.text) #Prints data to terminal
# print("\n")
# Converts the dictionary string output of response.text into an actual dictionary
allData = ast.literal_eval(response_1.text)
data = allData['data']
print("-------------------------------------------------------------")
print("EXPORTING CSV: Get Visit Duration for Blueberry Beacon: \n")
print("-------------------------------------------------------------")
try:
keys = data[0].keys() #keys #['date','count']
with open('BlueberryProximityData.csv','wb') as output_file:
thewriter = csv.DictWriter(output_file,['date','count'])
thewriter.writeheader()
for row in data:
thewriter.writerow(row)
print("Success! :]")
except IOError:
print "Could not open file! Please close Excel!"
#---Get visit duration for Mint BEACON---
url_2 = 'https://cloud.estimote.com/v2/analytics/devices/193fde558e5f12386ab392df7761900a/duration?from=1520812800&to=1523232000&granularity=hourly'
response_1 = requests.get(url_2,auth = ('hubbs-beacon-1g4','9dd89caad492d2a7d4b4c023d591cc17'))
# print(response_3.text) #Prints data to terminal
# print("\n")
# Converts the dictionary string output of response.text into an actual dictionary
allData = ast.literal_eval(response_1.text)
data = allData['data']
print("-------------------------------------------------------------")
print("EXPORTING CSV: Get Visit Duration for Mint Beacon: \n")
print("-------------------------------------------------------------")
try:
keys = data[0].keys() #keys #['date','count']
with open('MintProximityData.csv','wb') as output_file:
thewriter = csv.DictWriter(output_file,['date','count'])
thewriter.writeheader()
for row in data:
thewriter.writerow(row)
print("Success! :]")
except IOError:
print "Could not open file! Please close Excel!"
|
[
"siman4457@gmail.com"
] |
siman4457@gmail.com
|
39d2ad8c72be7e579b00d0f7f038bb5dc74229a5
|
1070dd709a69aa5c6d0dc162e2206912f8067b98
|
/src/Interface/forward_kinematics.py
|
36dba4fe09874c5faeca885e314be26d6d8559b6
|
[] |
no_license
|
quarkytale/YouBotVRepROSInterface
|
6f180bebbd7af9ceba0e0dbc755b91f836d431c0
|
22830009d79f261ea74c5bbfaaa53eab0ed70615
|
refs/heads/master
| 2021-09-13T02:10:45.601701 | 2018-04-23T20:51:16 | 2018-04-23T20:51:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,507 |
py
|
from sympy import *
import numpy as np
a = 10 #not too sure about this
b = 72
c = 150.5
d = 75
e = 155
f = 135
g = 113
h = 105
def forward_kinematics(q1, q2, q3, q4, q5, x, y, theta):
global a, b, c, d, e, f, g, h
t = [0, np.deg2rad(q1), np.deg2rad(q2 - 90), np.deg2rad(q3), np.deg2rad(q4), np.deg2rad(90), 0, np.deg2rad(q5)]
dr = [b, d, 0, 0, 0, 0, 0, h]
ar = [-c, a, e, f, g, 0, 0, 0]
alpha = [0, np.deg2rad(-90), 0, 0, 0, 0, np.deg2rad(90), 0]
Rotz = np.matrix([[cos(t[0]), -sin(t[0]), 0, 0], [sin(t[0]), cos(t[0]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[0]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[0]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[0]), -sin(alpha[0]), 0], [0, sin(alpha[0]), cos(alpha[0]), 0], [0, 0, 0, 1]])
TR0 = Rotz * Transz * Transx * Rotx
Rotz = np.matrix([[cos(t[1]), -sin(t[1]), 0, 0], [sin(t[1]), cos(t[1]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[1]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[1]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[1]), -sin(alpha[1]), 0], [0, sin(alpha[1]), cos(alpha[1]), 0], [0, 0, 0, 1]])
T01 = Rotz * Transz * Transx * Rotx
Rotz = np.matrix([[cos(t[2]), -sin(t[2]), 0, 0], [sin(t[2]), cos(t[2]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[2]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[2]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[2]), -sin(alpha[2]), 0], [0, sin(alpha[2]), cos(alpha[2]), 0], [0, 0, 0, 1]])
T12 = Rotz * Transz * Transx * Rotx
Rotz = np.matrix([[cos(t[3]), -sin(t[3]), 0, 0], [sin(t[3]), cos(t[3]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[3]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[3]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[3]), -sin(alpha[3]), 0], [0, sin(alpha[3]), cos(alpha[3]), 0], [0, 0, 0, 1]])
T23 = Rotz * Transz * Transx * Rotx
Rotz = np.matrix([[cos(t[4]), -sin(t[4]), 0, 0], [sin(t[4]), cos(t[4]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[4]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[4]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[4]), -sin(alpha[4]), 0], [0, sin(alpha[4]), cos(alpha[4]), 0], [0, 0, 0, 1]])
T3D1 = Rotz * Transz * Transx * Rotx
Rotz = np.matrix([[cos(t[5]), -sin(t[5]), 0, 0], [sin(t[5]), cos(t[5]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[5]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[5]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[5]), -sin(alpha[5]), 0], [0, sin(alpha[5]), cos(alpha[5]), 0], [0, 0, 0, 1]])
TD1D2 = Rotz * Transz * Transx * Rotx
Rotz = np.matrix([[cos(t[6]), -sin(t[6]), 0, 0], [sin(t[6]), cos(t[6]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[6]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[6]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[6]), -sin(alpha[6]), 0], [0, sin(alpha[6]), cos(alpha[6]), 0], [0, 0, 0, 1]])
TD24 = Rotz * Transz * Transx * Rotx
Rotz = np.matrix([[cos(t[7]), -sin(t[7]), 0, 0], [sin(t[7]), cos(t[7]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Transz = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, dr[7]], [0, 0, 0, 1]])
Transx = np.matrix([[1, 0, 0, ar[7]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Rotx = np.matrix([[1, 0, 0, 0], [0, cos(alpha[7]), -sin(alpha[7]), 0], [0, sin(alpha[7]), cos(alpha[7]), 0], [0, 0, 0, 1]])
T4T = Rotz * Transz * Transx * Rotx
TWR = np.matrix([[cos(np.deg2rad(theta)), sin(np.deg2rad(theta)), 0, x], [-sin(np.deg2rad(theta)), cos(np.deg2rad(theta)), 0, y], [0, 0, 1, 0], [0, 0, 0, 1]])
TR1 = TR0 * T01
TR2 = TR0 * T01 * T12
TR3 = TR0 * T01 * T12 * T23
TR4 = TR0 * T01 * T12 * T23 * T3D1 * TD1D2 * TD24
TRT = TR0 * T01 * T12 * T23 * T3D1 * TD1D2 * TD24 * T4T
TWT = TWR * TRT
tipPos_robot = TRT[0:3, 3]
tipPos_world = TWT[0:3, 3]
return tipPos_world
print(forward_kinematics(0, 0, 0, 0, 0, 0, 0, 0)) #simple test example, should output: [-140.5, 0, 655]
|
[
"noreply@github.com"
] |
quarkytale.noreply@github.com
|
caec2e5729e08c77d3b01126a32f794a0c4931f2
|
db241b5457dd8df4239214bdc31a221d7ffa7e25
|
/opencv/utils/__init__.py
|
1d5656e97542599a2b0c334e9630007b7844a84d
|
[] |
no_license
|
TaoistQu/AI
|
cd8c26bf73aef20e5c48bd928a2067fdfef6241f
|
1f14dd5c202d3291a8867c53110f78f33c5ce316
|
refs/heads/main
| 2023-06-01T08:15:04.435754 | 2023-05-16T13:16:18 | 2023-05-16T13:16:18 | 263,840,509 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 217 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2022 #
# @Time : 2022/12/23 0:12
# @Author : TaoistQu
# @Email : qulei_20180331@163.com
# @File : __init__.py.py
# @Software: PyCharm
# description:
|
[
"qulei_20180331@163.com"
] |
qulei_20180331@163.com
|
a4ee93d69cb5b32fecf72f51bad2601bc913f577
|
e25e7f0d944d302c2fd13b7517d97c5e0b5558ec
|
/FixTree_TBCNN/03-ConstructCandW/nn/helper.py
|
613ea1a4c7ecd2d0c925d83a482a3faa4aa99346
|
[] |
no_license
|
NizhenJenny/FixTree
|
06702a0d529d861e34b045aac286434b0ce3d86f
|
be30a2cdeb6cc0aa13f29d2cd4d4ce325f00f2a0
|
refs/heads/master
| 2020-05-24T21:33:04.030992 | 2019-08-19T09:52:10 | 2019-08-19T09:52:10 | 187,477,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,733 |
py
|
# -*- coding: utf-8 -*-
import numpy as np
import copy
def addlist(l, e):
if not l :
l = []
l.append(e)
else:
l.append(e)
return l
def main():
a = np.array([[-0.4527861 ],
[-1.49480242]])
#print dummySigmoidPrime(a,a)
#print sigmoid(1, np.array([1,2,3]))
#print sigmoid(3)
# l = None
# e = 3
# l = addlist(l, e)
# print l
def computeMSE(output, target):
numOutput, numData = target.shape
# Error = .5 (y - 5)^2
y_t = output - target
Error = .5 * np.sum( y_t * y_t )
return Error / numData
def numericalGradient(f, x, theta):
import numpy as np
delta = 0.01
gradTheta = np.zeros_like(theta)
for idx in xrange(len(theta)):
tmpTheta = copy.copy(theta)
tmpTheta[idx] += delta
fplus = f(x, tmpTheta)
tmpTheta[idx] -= 2*delta
fminus = f(x, tmpTheta)
gradTheta[idx] = (fplus - fminus) / 2 / delta
return gradTheta
def checkGradient(numGrad, anaGrad, THRES = 0.01, verbose=True):
toselect = abs(numGrad)>1e-8
numGrad = numGrad[toselect]
anaGrad = anaGrad[toselect]
ratio = numGrad / anaGrad
if verbose:
print ratio
ratio = ratio[ ~np.isnan(ratio),]
print 'The max ratio is', max(ratio),'and the min ratio is', min(ratio)
if max(ratio) < 1 + THRES and min(ratio) > 1 - THRES:
print 'Numerical gradient checking passed with non-zero gradient # =', len(toselect)
return True
else:
print 'Numercial gradient checking failed'
return False
if __name__== '__main__':
main()
|
[
"noreply@github.com"
] |
NizhenJenny.noreply@github.com
|
9aed10d3c3a078ed98141ea7813a5f41d2a2e1d4
|
a2742344744180054dd37cd5f565dc56e166d600
|
/main.py
|
afc85ecc47408102b06eff1773941b6c36a6b4db
|
[] |
no_license
|
Kaisaurus/uda-blog
|
b409ae318b494026c79354090ca756adb549d7dc
|
35b7f8144f350ad8e3d97150fdae6f35298b5a7b
|
refs/heads/master
| 2020-04-18T05:35:18.631590 | 2016-08-22T07:10:07 | 2016-08-22T07:10:07 | 66,248,267 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,569 |
py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import webapp2
import jinja2
import re
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=True)
class Handler(webapp2.RequestHandler):
"""docstring for Handler"""
def write(self, *a, **kw):
self.response.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **params):
self.write(self.render_str(template, **params))
class Blog(db.Model):
subject = db.StringProperty(required=True)
blog = db.TextProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
class MainHandler(Handler):
def get(self):
blog = db.GqlQuery("SELECT * FROM Blog "
"ORDER BY created DESC")
self.render("home.html", blog=blog)
class NewPostHandler(Handler):
def render_front(self, subject="", blog="", errors=""):
self.render('newpost.html', subject=subject, blog=blog, errors=errors)
def get(self):
self.render_front()
def post(self):
subject = self.request.get("subject")
blog = self.request.get("blog")
if subject and blog:
entry = Blog(subject = subject, blog = blog)
entry.put()
self.redirect('/%s' % str(entry.key().id()))
else:
error = "Insert both subject and content please"
self.render_front(subject, blog, error)
class PermalinkHandler(Handler):
def get(self, entry_id):
entry = Blog.get_by_id(int(entry_id))
if entry:
self.render("single_entry.html", subject=entry.subject, blog=entry.blog, error="")
else:
self.render("single_entery.html", subject="", body="", error="Blog post %s not found" % entry_id)
app = webapp2.WSGIApplication([
('/', MainHandler),
('/newpost', NewPostHandler),
('/(\d+)', PermalinkHandler)
], debug=True)
|
[
"kaigotoh@gmail.com"
] |
kaigotoh@gmail.com
|
1fca02c281ca3e871dc4a6d6211ce9438fa335cd
|
071998f813d7fd54e4629cc4625c9c2c6325d0e3
|
/src/test/lp/atividade_continua_2/range_of_primes/main_test.py
|
685ec19bd339f1f948404b55609b67018a777317
|
[
"Unlicense"
] |
permissive
|
shirayukikitsune/python-code
|
d43c110b77a7fa0f778a8904b2554a0ca0e6dd18
|
bec56b92510470e54db1eebafcf06b9877aed5fc
|
refs/heads/main
| 2023-08-21T20:03:29.499242 | 2021-09-15T10:35:16 | 2021-09-15T10:35:16 | 399,742,530 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 392 |
py
|
import unittest
from os.path import abspath, dirname
from src.library.commons.testing.utils import run_tests_with_io
from src.main.lp.atividade_continua_2.range_of_primes import main
class ExtremelyBasicTestCase(unittest.TestCase):
@run_tests_with_io(abspath(dirname(__file__)) + '/cases')
def test_inputs(self):
main.run()
if __name__ == '__main__':
unittest.main()
|
[
"shirayukikitsune@gmail.com"
] |
shirayukikitsune@gmail.com
|
5de0b81f7eb9ffcb6f37c172ee267011003055f3
|
8a03b8459902d1bf0806f8d3387fb962bb57cf58
|
/User_create/Negative_changepwd.py
|
b654fadc05e357cbb963c843646791c0392766c4
|
[] |
no_license
|
chetandg123/cQube
|
f95a0e86b1e98cb418de209ad26ae2ba463cfcbc
|
a862a1cdf46faaaff5cad49d78c4e5f0454a6407
|
refs/heads/master
| 2022-07-18T12:43:06.839896 | 2020-05-22T13:23:52 | 2020-05-22T13:23:52 | 258,089,042 | 0 | 0 | null | 2020-05-08T16:28:26 | 2020-04-23T03:55:52 |
HTML
|
UTF-8
|
Python
| false | false | 1,828 |
py
|
import time
import unittest
from selenium import webdriver
from Data.Paramters import Data
class Click_ChangePwd(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(Data.Path)
self.driver.maximize_window()
self.driver.implicitly_wait(10)
self.driver.get(Data.URL)
self.driver.find_element_by_xpath(Data.email).send_keys(Data.username)
self.driver.find_element_by_xpath(Data.pwd).send_keys(Data.password)
self.driver.find_element_by_xpath(Data.loginbtn).click()
time.sleep(5)
def test_set_negative_newpwd(self):
self.driver.find_element_by_xpath(Data.Dashboard).click()
time.sleep(3)
self.driver.find_element_by_xpath("/html/body/app-root/app-home/mat-sidenav-container/mat-sidenav/div/mat-nav-list/mat-list/mat-list-item/div/button/span/mat-icon").click()
time.sleep(3)
self.driver.find_element_by_xpath("/html/body/app-root/app-home/mat-sidenav-container/mat-sidenav/div/mat-nav-list/mat-list/div/a[2]/div/span").click()
pwd =self.driver.find_element_by_xpath("//h2").text
self.assertEqual(pwd,"Change Password","Change password is not found!..")
self.driver.find_element_by_xpath("//input[@name='newPasswd']").send_keys("tibil123")
time.sleep(2)
self.driver.find_element_by_xpath("//input[@name='cnfpass']").send_keys("tibil12")
time.sleep(2)
self.driver.find_element_by_xpath("//button[@type='submit']").click()
time.sleep(3)
errormsg = self.driver.find_element_by_xpath("//p").text
print(errormsg)
self.assertEqual(errormsg,"Password not matched" ,"Matching password!")
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
[
"chetan.goudar@tibilsolutions.com"
] |
chetan.goudar@tibilsolutions.com
|
f7f0c2ff48ccc26e418233312c0899632de4b4ef
|
f674eaaf46491ab376199238143e18837d3f85c0
|
/SiO2/large/pythonScripts/forceDistribution.py
|
e70c1cbeb448ec2e8a87f386df3b4e239478fea5
|
[] |
no_license
|
filiphl/master-thesis
|
7402dafb3d2f3ec70f61ec97cd7a235405fc40fc
|
54aa0d1d49765c82c4dcaea149f2c2cf00f54bec
|
refs/heads/master
| 2021-06-18T14:09:08.652591 | 2017-06-19T08:39:48 | 2017-06-19T08:39:48 | 66,485,624 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,131 |
py
|
from surface import *
from compareMatrices import *
from radialBinning import smooth
from matplotlib import rc
from scipy import ndimage
import matplotlib as mpl
import pickle
class ForceDistribution:
def __init__(self, N, surfN, nearestNeighbor, binWidth, cx, cy):
self.N = N
self.surfN = surfN
self.nn = nearestNeighbor
self.bw = binWidth
self.cx=cx
self.cy=cy
self.mapping = float(N)/surfN
self.surf = self.loadSurface('../dataFiles/surface.pkl', N=surfN, s=nn)
self.force = self.loadForces('../dataFiles/forces.pkl')
#self.radialBinning = smooth(N, cx, cy, binWidth, nBins=int(16/binWidth))
self.surf.plotPlanes()
def loadSurface(self, filePath=False, N=46, s=5):
if filePath:
filePath = filePath.rstrip('.pkl') + 'N%ds%d.pkl'%(N,s)
try:
with open(filePath, 'rb') as input:
print "Loaded surface file with N=%d and s=%d."%(N,s)
return pickle.load(input)
except:
print "Couldn't load surface file."
s = SurfaceRegression('../surfaceFiles/', N, False, s)
if filePath:
with open(filePath, 'wb') as output:
pickle.dump(s, output, pickle.HIGHEST_PROTOCOL)
return s
def loadForces(self, filePath=False):
if filePath:
try:
with open(filePath, 'rb') as input:
print "Loaded force file."
return pickle.load(input)
except:
print "Couldn't load force file."
F = Forces('../forceFiles/forcesAll.txt')
F.plotAverage = True
F.name = 'Averaged normal force'
if filePath:
with open(filePath, 'wb') as output:
pickle.dump(F, output, pickle.HIGHEST_PROTOCOL)
return F
def transform(self, matrix, N, M, R, cx, cy):
r = np.linspace(0,R,N)
theta = np.linspace(0, 2*np.pi, M)
myevalmatrix = np.zeros((N, M, 2))
for i in range(N):
for j in range(M):
myevalmatrix[i, j,:] = np.asarray([cx+r[i]*np.cos(theta[j]), cy+r[i]*np.sin(theta[j])])
return ndimage.map_coordinates(matrix, np.transpose(myevalmatrix[:, :]), order=1)
#------------------------------------------------------------------------------#
def computeDistributions(self):
#surf.plotPlanes()
#plt.figure()
#force.plotMatrix()
#plt.show()
#radialBinning.show(3)
self.normal = copy.deepcopy(self.force.absoluteForces)
self.shear = copy.deepcopy(self.force.absoluteForces)
Fs = np.zeros((self.N, self.N, 3))
for i in xrange(self.N):
for j in xrange(self.N):
if not np.isnan( np.cos( self.surf.getAngle( self.force.matrix[i][j], self.surf.grid[ int(i/self.mapping),int(j/self.mapping) ] ) ) ):
a = np.dot( self.force.matrix[i,j], self.surf.grid[ int(i/self.mapping),int(j/self.mapping) ] )
b = np.dot(self.force.matrix[i][j], -self.surf.grid[int(i/self.mapping)][int(j/self.mapping)] )
if a > b:
self.normal[i,j] = a
Fs[i,j] = self.force.matrix[i,j] - self.surf.grid[ int(i/self.mapping), int(j/self.mapping) ] * self.normal[i,j]
else:
self.normal[i,j] = b
Fs[i,j] = self.force.matrix[i,j] + self.surf.grid[ int(i/self.mapping), int(j/self.mapping) ] * self.normal[i,j]
self.shear [i,j] = np.sqrt(sum(Fs[i,j]**2))
#x = i-self.cx
#y = j-self.cy
#self.shear[i,j] *= np.dot( self.surf.Norm(np.asarray([x,y])), Fs[i,j,:2] )
#angle = self.surf.getAngle( self.shear[i,j], self.surf.grid[ int(i/self.mapping), int(j/self.mapping) ] )
# Dot product of shear vector with unit vector from center to current point (i,j).
def plotDistributions(self):
fig, ax = plt.subplots(2,3, figsize=(15,10), sharey='row', sharex='col')
cmax = max(self.force.absoluteForces.max(), self.normal.max(), self.shear.max())
N = 46
M = 46
R = 16
c=0
for f in [self.force.absoluteForces, self.normal, self.shear]:
output = self.transform(f,N,M,R,22.5,22.5)
im=ax[0,c].pcolor(output, vmin=0, vmax=cmax)
radialDist = np.mean(output,0)
ax[1,c].plot(radialDist, linewidth=2, color="#478684")
ax[0,c].set_xlim([0,46])
ax[1,c].set_xlabel(r"$r$", fontsize=18)
ax[1,c].set_xticks(np.linspace(0,N,6))
ax[1,c].set_xticklabels(['%.0f'%i for i in np.linspace(0,R,6)])
#ax[0,c].set_ylim([0,ymax*1.05])
ax[1,c].grid('on')
c+=1
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.89, 0.535, 0.02, 0.3648])
fig.colorbar(im, cax=cbar_ax, format=ticker.FuncFormatter(self.force.fmt))
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax[0,0].set_ylim([0,46])
ax[0,0].set_ylabel(r"$\theta$", fontsize=18)
ax[1,0].set_ylabel(r"$eV/\AA$", fontsize=16)
ax[0,0].set_title(r"$ $Magnitude", fontsize=16)
ax[0,1].set_title(r"$ $Normal", fontsize=16)
ax[0,2].set_title(r"$ $Shear", fontsize=16)
ax[0,0].set_yticks(np.linspace(0,M,5))
ax[0,0].set_yticklabels([r'$0$', r'$\pi/2$', r'$\pi$', r'$3\pi/2$', r'$2\pi$'], fontsize=14)
#------------------------------------------------------------------------------#
if __name__ == '__main__':
for bw in np.linspace(1,1,1):
N = 45
surfN = 40
#bw = 1.2
cx=21.5
cy=22
nn=8
dist = ForceDistribution(N, surfN, nn, bw, cx, cy)
dist.computeDistributions()
dist.plotDistributions()
plt.show()
|
[
"filiphenriklarsen@gmail.com"
] |
filiphenriklarsen@gmail.com
|
7229a9c285b03df22f176624c5e0f5b54b27a88d
|
a2fab78b021469748337bdbe46d60f4b2dccf6b9
|
/day04/03.字符串的遍历.py
|
c5d627376bea9ed9bd537324019d43ced7a0f603
|
[] |
no_license
|
yywecanwin/PythonLearning
|
06175886b42f6ec6be5ee8fa379365779e8e14e6
|
f59d381692f22b3c7cf605aec88500f6c0267ffc
|
refs/heads/master
| 2020-08-01T13:03:17.458829 | 2020-02-11T02:53:33 | 2020-02-11T02:53:33 | 211,006,180 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
# -*- coding: utf-8 -*-
# author:yaoyao time:2019/9/28
"""
字符串的遍历:
一个一个的得到里面的元素
"""
s = "hello python"
"""
# 遍历的方式1:
# 1.定义一个变量i 表示元素的索引,赋值为0,因为元素的索引是从0开始的
i = 0
# 2.while循环遍历字符串
while i <= len(s)-1:
#3.在循环中, 根据索引得到元素, 把元素打印出来
print(s[i])
# 4.在循环中,让i加1,是为了让索引加1,便于下次循环时得到下一个元素
i += 1
"""
"""
for 变量 in range()函数或者容器
"""
# 遍历方式2:for循环
for c in s:
print(c)
|
[
"3216561314@qq.com"
] |
3216561314@qq.com
|
182744c1cafe5920f8cf0055d5f8bdf9f0fc912a
|
a8d130fd32506cc0ceef711dd02c9a34bcb3fb31
|
/Ex7.py
|
7ef21808b4213e41ea9b17cd0d68d2e55d76f4e9
|
[] |
no_license
|
Jorgezepmed/Python
|
554b1933ae4d027b2fefc4f61ef48556274e04e9
|
25bd71ac25bb73575eec8aedf229d0e01ff43720
|
refs/heads/master
| 2021-01-01T15:34:10.371156 | 2018-03-05T06:16:58 | 2018-03-05T06:16:58 | 97,438,806 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 710 |
py
|
#Ex7 MORE PRINTING
print "Mary had a little lamb."
print "Its fleece was while as %s." % 'snow' #NO PRECICAMENTE DEBE DE SER VARIABLES DECLARADAS AL PONER % SE PUEDE PONERLA COM OSTRING ESPECIAFIACA AL LADO
print "And everywhere thet Mary went."
print "." * 10 # SE MULTIMPLICA POR 10 LOS PUNTOS EN EL MISMO RENGLON.
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
print end1 + end2 + end3 + end4 + end5 + end6, #lA COMA HACE QUE EL SIGUIENTE PRINT NO HAGA UN SALTO DE RENGLON
print end7 + end8 + end9 + end10 + end11 + end12
|
[
"jorge.david.zepmed@gmail.com"
] |
jorge.david.zepmed@gmail.com
|
2168a885042c375e9a709028942bca7cd1d3e5ef
|
f1ee770cd7c8f932d96498016fc298cfab631d5e
|
/myapp/migrations/0008_auto_20180718_0152.py
|
46cc7bf01fa3344d3c911fc8320a7fa132350468
|
[] |
no_license
|
umangforlife/umang1
|
fc06e6b7266b5a7c0f5022b5d6680dca368b9556
|
33a77a637e05a5b1a80b830bac568908d410a90b
|
refs/heads/master
| 2020-03-23T09:36:55.229141 | 2018-07-18T07:44:53 | 2018-07-18T07:44:53 | 141,398,814 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,641 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-17 20:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0007_contact_ngo'),
]
operations = [
migrations.CreateModel(
name='donate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=30)),
('lname', models.CharField(max_length=30)),
('city', models.CharField(max_length=30)),
('state', models.CharField(max_length=30)),
('phone', models.IntegerField(null=True)),
('zipcode', models.IntegerField(null=True)),
('demail', models.EmailField(max_length=254, null=True)),
('message', models.TextField(max_length=100)),
('ngo', models.CharField(choices=[('SP', 'Saras prayas'), ('V', 'Vikas'), ('PA', 'PAWS'), ('PF', 'Posh Foundation'), ('LN', 'Lakshyam NGO Delhi'), ('CW', 'Child & Women Care Society'), ('SA', 'Shivashrya'), ('SK', 'Satkartar')], max_length=2, null=True)),
],
),
migrations.AlterField(
model_name='contact',
name='ngo',
field=models.CharField(choices=[('SP', 'Saras prayas'), ('V', 'Vikas'), ('PA', 'PAWS'), ('PF', 'Posh Foundation'), ('LN', 'Lakshyam NGO Delhi'), ('CW', 'Child & Women Care Society'), ('SA', 'Shivashrya'), ('SK', 'Satkartar')], max_length=2, null=True),
),
]
|
[
"umangforlife@gmail.com"
] |
umangforlife@gmail.com
|
148f59407e6dfbfc0392c86f57e79bebc1f03ec5
|
1f4c4f4799625030ef03f6510943aad6f5d7c080
|
/quickstart/views.py
|
d705ba73a383e7a5578ac57bf2979079804c9b8d
|
[] |
no_license
|
sensactive/api
|
8b19c78010a928c1608d3b8fcc527f2ab01e5209
|
e3a431ab1d353f302e733dff31a18fb02b38a71c
|
refs/heads/master
| 2020-04-03T15:26:47.686488 | 2018-10-30T10:09:19 | 2018-10-30T10:09:19 | 155,362,731 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,162 |
py
|
from rest_framework.response import Response
from . models import TempModel
from rest_framework import viewsets, serializers
from . serializers import TempModelSerializer
from django.http import HttpResponse
import requests
import pytemperature
class TempModelViewSet(viewsets.ModelViewSet):
queryset = TempModel.objects.all()
serializer_class = TempModelSerializer
def get_paginated_response(self, data):
return Response(data)
def search(request):
if 't' in request.GET:
api_url = 'http://127.0.0.1:8000/temperatures/'
res = requests.get(api_url)
data = res.json() #получаем список дат
for c in data:
if request.GET['t'] in c['date']:
if 'g' in request.GET:
if request.GET['g'] == 'f':
return HttpResponse(pytemperature.c2f(c['temperature']))#Фаренгейты
elif request.GET['g'] == 'k':
return HttpResponse(pytemperature.c2k(c['temperature']))#Кельвины
return HttpResponse(c['temperature'])
return HttpResponse('empty request')
|
[
"sensor14@list.ru"
] |
sensor14@list.ru
|
865a45266601f0b3b988e02fbb6b2dfe682de7d0
|
a2cd4d19a423010d689ca40d22cc0d93b6115464
|
/EZAI/api/forms.py
|
404b3483349a7614dc861a340438471d72b3b655
|
[] |
no_license
|
sebastianpenttinen/EZAI
|
a8c284530e25d76682edae9a32ecae235336792f
|
292b7d1f8d3205598efca9da40e0f3fd55db4043
|
refs/heads/master
| 2020-12-07T04:49:35.386277 | 2020-01-13T19:04:30 | 2020-01-13T19:04:30 | 232,636,824 | 0 | 0 | null | 2020-02-05T11:43:16 | 2020-01-08T19:00:31 |
HTML
|
UTF-8
|
Python
| false | false | 1,939 |
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from markdownx.fields import MarkdownxFormField
from .models import ModelDocumentation, MLModel
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=True, help_text='Required')
last_name = forms.CharField(max_length=30, required=True, help_text='Required')
email = forms.EmailField(max_length=254, required=True,
help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )
class ChangeEmail(forms.Form):
email1 = forms.EmailField(label=u'Type new Email')
email2 = forms.EmailField(label=u'Type new Email again')
class CreateModelForm(forms.Form):
title = forms.CharField(required=True, label='Enter the Title of the model')
description = forms.CharField(required=True, label='Enter a description of the model')
docfile = forms.FileField(
label='Select a file',
help_text='max. 42 megabytes'
)
# Can you edit anyones form while its just hiddeninput?
class DocsEditor(forms.ModelForm):
id = forms.IntegerField(widget = forms.HiddenInput(), required=True)
mlmodel = forms.ModelChoiceField(queryset=MLModel.objects.all(), widget=forms.HiddenInput(), required=False)
documentation = MarkdownxFormField()
class Meta:
model = ModelDocumentation
fields = ('id', 'mlmodel', 'documentation')
class CreateClientForm(forms.Form):
client_name = forms.CharField(max_length=30, required=True, label='Client Name')
ml_model = forms.ModelChoiceField(queryset=MLModel.objects.exclude(reviewed=False), to_field_name="title")
class ManageUsersForm(forms.Form):
key = forms.CharField(max_length=500, widget=forms.HiddenInput())
|
[
"sebastian.penttinen@abo.fi"
] |
sebastian.penttinen@abo.fi
|
e03b7ef67849e583abb795e43e173297706316ff
|
798960eb97cd1d46a2837f81fb69d123c05f1164
|
/symphony/cli/pyworkforce/graphql/input/check_list_category.py
|
8ab69aef97666923166c55f03daa8d9166c133bc
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
kyaaqba/magma
|
36d5fa00ce4f827e6ca5ebd82d97a3d36e5f5b5b
|
fdb7be22a2076f9a9b158c9670a9af6cad68b85f
|
refs/heads/master
| 2023-01-27T12:04:52.393286 | 2020-08-20T20:23:50 | 2020-08-20T20:23:50 | 289,102,268 | 0 | 0 |
NOASSERTION
| 2020-08-20T20:18:42 | 2020-08-20T20:18:41 | null |
UTF-8
|
Python
| false | false | 590 |
py
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from functools import partial
from gql.gql.datetime_utils import DATETIME_FIELD
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from ..input.check_list_item import CheckListItemInput
@dataclass
class CheckListCategoryInput(DataClassJsonMixin):
title: str
checkList: List[CheckListItemInput]
id: Optional[str] = None
description: Optional[str] = None
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
dc582fe201aa5a3ac223587e8b0766e019d4993f
|
6252e82d76c11e3d66c727bda674cccf60976ba8
|
/books/views.py
|
364152523f71552ddd9acf7a49074a3ab92863f7
|
[] |
no_license
|
abhilash01393/IFT593AppliedProject
|
5b9ab264a21281c20603076b70bdbfe063d09818
|
84ac6906a692ffe8e7cfac2af974425cd9aebf49
|
refs/heads/master
| 2020-08-15T23:05:50.934347 | 2019-10-16T00:35:48 | 2019-10-16T00:35:48 | 215,421,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 592 |
py
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, "index.html")
def asl_detailed(request):
return render(request, "asl_detailed.html")
def other_works(request):
return render(request, "other_works.html")
def about(request):
return render(request, "about.html")
def gallery(request):
return render(request, "gallery.html")
def blog(request):
return render(request, "blog.html")
def events(request):
return render(request, "event.html")
def contact(request):
return render(request, "contact.html")
|
[
"abhilash.01393@gmail.com"
] |
abhilash.01393@gmail.com
|
d845692f192b0c1297c5a2fc66b8ecae3db58e94
|
9172dc2d96d05bae202b53c8d6d87b10dc814e22
|
/src/pythonDevice/pythonping/icmp.py
|
a5e3aeb73b7fb9ad7dafce7f7ed1a8a74224a097
|
[
"MIT"
] |
permissive
|
somervda/ourProbes
|
f03612ed8a98e58778fd4f301205ba41a456e59f
|
8678b2dfde15016fef087f9eca6bd33bab8fec12
|
refs/heads/master
| 2023-03-11T01:01:16.151654 | 2022-02-24T16:05:10 | 2022-02-24T16:05:10 | 221,022,828 | 0 | 0 |
MIT
| 2023-03-07T11:28:40 | 2019-11-11T16:28:59 |
TypeScript
|
UTF-8
|
Python
| false | false | 7,390 |
py
|
import os
import socket
import struct
import select
import time
def checksum(data):
"""Creates the ICMP checksum as in RFC 1071
:param data: Data to calculate the checksum ofs
:type data: bytes
:return: Calculated checksum
:rtype: int
Divides the data in 16-bits chunks, then make their 1's complement sum"""
subtotal = 0
for i in range(0, len(data)-1, 2):
subtotal += ((data[i] << 8) + data[i+1]) # Sum 16 bits chunks together
if len(data) % 2: # If length is odd
subtotal += (data[len(data)-1] << 8) # Sum the last byte plus one empty byte of padding
while subtotal >> 16: # Add carry on the right until fits in 16 bits
subtotal = (subtotal & 0xFFFF) + (subtotal >> 16)
check = ~subtotal # Performs the one complement
return ((check << 8) & 0xFF00) | ((check >> 8) & 0x00FF) # Swap bytes
class ICMPType:
"""Represents an ICMP type, as combination of type and code
ICMP Types should inherit from this class so that the code can identify them easily.
This is a static class, not meant to be instantiated"""
def __init__(self):
raise TypeError('ICMPType may not be instantiated')
class Types(ICMPType):
class EchoReply(ICMPType):
type_id = 0
ECHO_REPLY = (type_id, 0,)
class DestinationUnreachable(ICMPType):
type_id = 3
NETWORK_UNREACHABLE = (type_id, 0,)
HOST_UNREACHABLE = (type_id, 1,)
PROTOCOL_UNREACHABLE = (type_id, 2,)
PORT_UNREACHABLE = (type_id, 3,)
FRAGMENTATION_REQUIRED = (type_id, 4,)
SOURCE_ROUTE_FAILED = (type_id, 5,)
NETWORK_UNKNOWN = (type_id, 6,)
HOST_UNKNOWN = (type_id, 7,)
SOURCE_HOST_ISOLATED = (type_id, 8,)
NETWORK_ADMINISTRATIVELY_PROHIBITED = (type_id, 9,)
HOST_ADMINISTRATIVELY_PROHIBITED = (type_id, 10,)
NETWORK_UNREACHABLE_TOS = (type_id, 11,)
HOST_UNREACHABLE_TOS = (type_id, 12,)
COMMUNICATION_ADMINISTRATIVELY_PROHIBITED = (type_id, 13,)
HOST_PRECEDENCE_VIOLATION = (type_id, 14,)
PRECEDENCE_CUTOFF = (type_id, 15,)
class SourceQuench(ICMPType):
type_id = 4
SOURCE_QUENCH = (type_id, 0,)
class Redirect(ICMPType):
type_id = 5
FOR_NETWORK = (type_id, 0,)
FOR_HOST = (type_id, 1,)
FOR_TOS_AND_NETWORK = (type_id, 2,)
FOR_TOS_AND_HOST = (type_id, 3,)
class EchoRequest(ICMPType):
type_id = 8
ECHO_REQUEST = (type_id, 0,)
class RouterAdvertisement(ICMPType):
type_id = 9
ROUTER_ADVERTISEMENT = (type_id, 0,)
class RouterSolicitation(ICMPType):
type_id = 10
ROUTER_SOLICITATION = (type_id, 0)
# Aliases
ROUTER_DISCOVERY = ROUTER_SOLICITATION
ROUTER_SELECTION = ROUTER_SOLICITATION
class TimeExceeded(ICMPType):
type_id = 11
TTL_EXPIRED_IN_TRANSIT = (type_id, 0)
FRAGMENT_REASSEMBLY_TIME_EXCEEDED = (type_id, 1)
class BadIPHeader(ICMPType):
type_id = 12
POINTER_INDICATES_ERROR = (type_id, 0)
MISSING_REQUIRED_OPTION = (type_id, 1)
BAD_LENGTH = (type_id, 2)
class Timestamp(ICMPType):
type_id = 13
TIMESTAMP = (type_id, 0)
class TimestampReply(ICMPType):
type_id = 14
TIMESTAMP_REPLY = (type_id, 0)
class InformationRequest(ICMPType):
type_id = 15
INFORMATION_REQUEST = (type_id, 0)
class InformationReply(ICMPType):
type_id = 16
INFORMATION_REPLY = (type_id, 0)
class AddressMaskRequest(ICMPType):
type_id = 17
ADDRESS_MASK_REQUEST = (type_id, 0)
class AddressMaskReply(ICMPType):
type_id = 18
ADDRESS_MASK_REPLY = (type_id, 0)
class Traceroute(ICMPType):
type_id = 30
INFORMATION_REQUEST = (type_id, 30)
class ICMP:
LEN_TO_PAYLOAD = 41 # Ethernet, IP and ICMP header lengths combined
def __init__(self, message_type=Types.EchoReply, payload=None, identifier=None, sequence_number=1):
"""Creates an ICMP packet
:param message_type: Type of ICMP message to send
:type message_type: Union[ICMPType, (int, int), int]
:param payload: utf8 string or bytes payload
:type payload: Union[str, bytes]
:param identifier: ID of this ICMP packet
:type identifier: int"""
self.message_code = 0
if issubclass(message_type, ICMPType):
self.message_type = message_type.type_id
elif isinstance(message_type, tuple):
self.message_type = message_type[0]
self.message_code = message_type[1]
elif isinstance(message_type, int):
self.message_type = message_type
if payload is None:
payload = bytes('1', 'utf8')
elif isinstance(payload, str):
payload = bytes(payload, 'utf8')
self.payload = payload
if identifier is None:
identifier = os.getpid()
self.id = identifier & 0xFFFF # Prevent identifiers bigger than 16 bits
self.sequence_number = sequence_number
self.received_checksum = None
@property
def packet(self):
"""The raw packet with header, ready to be sent from a socket"""
return self._header(check=self.expected_checksum) + self.payload
def _header(self, check=0):
"""The raw ICMP header
:param check: Checksum value
:type check: int
:return: The packed header
:rtype: bytes"""
# TODO implement sequence number
return struct.pack("bbHHh",
self.message_type,
self.message_code,
check,
self.id,
self.sequence_number)
@property
def is_valid(self):
"""True if the received checksum is valid, otherwise False"""
if self.received_checksum is None:
return True
return self.expected_checksum == self.received_checksum
@property
def expected_checksum(self):
"""The checksum expected for this packet, calculated with checksum field set to 0"""
return checksum(self._header() + self.payload)
@property
def header_length(self):
"""Length of the ICMP header"""
return len(self._header())
@staticmethod
def generate_from_raw(raw):
"""Creates a new ICMP representation from the raw bytes
:param raw: The raw packet including payload
:type raw: bytes
:return: An ICMP instance representing the packet
:rtype: ICMP"""
packet = ICMP()
packet.unpack(raw)
return packet
def unpack(self, raw):
"""Unpacks a raw packet and stores it in this object
:param raw: The raw packet, including payload
:type raw: bytes"""
self.message_type, \
self.message_code, \
self.received_checksum, \
self.id, \
sequence = struct.unpack("bbHHh", raw[20:28])
self.payload = raw[28:]
|
[
"39265971+somervda@users.noreply.github.com"
] |
39265971+somervda@users.noreply.github.com
|
f954b645511f03fecf40aceb25a85aad5330a746
|
5e0852a7f5a734f061c9943eb177a808f70ff377
|
/venv/Lib/site-packages/django/test/testcases.py
|
206e35f06e99e14984f1725bc55aad679f6497bd
|
[] |
no_license
|
krishna120801/YourThoughts
|
44efa58aa6eff429b20a0d87da900017b40658e6
|
4231da4062c87a6ebf2c4461746c22f99ad2a64d
|
refs/heads/master
| 2022-12-27T01:47:42.920631 | 2020-10-16T18:28:18 | 2020-10-16T18:28:18 | 304,707,236 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 60,770 |
py
|
import asyncio
import difflib
import json
import posixpath
import sys
import threading
import unittest
from collections import Counter
from contextlib import contextmanager
from copy import copy
from difflib import get_close_matches
from functools import wraps
from unittest.suite import _DebugResult
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse,
)
from urllib.request import url2pathname
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import AsyncClient, Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils.functional import classproperty
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Put value into a list if it's not already one. Return an empty list if
value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
'%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1)
)
)
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if self.rendered_templates:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names)
)
else:
message += ' No templates was rendered.'
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise AssertionError(self.message)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
async_client_class = AsyncClient
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase '
'subclasses. Either subclass TestCase or TransactionTestCase to ensure '
'proper test isolation or add %(alias)r to %(test)s.databases to silence '
'this failure.'
)
_disallowed_connection_methods = [
('connect', 'connections'),
('temporary_connection', 'connections'),
('cursor', 'queries'),
('chunked_cursor', 'queries'),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls._add_databases_failures()
@classmethod
def _validate_databases(cls):
if cls.databases == '__all__':
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % (
cls.__module__,
cls.__qualname__,
alias,
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += ' Did you mean %r?' % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
'test': '%s.%s' % (cls.__module__, cls.__qualname__),
'alias': alias,
'operation': operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
@classmethod
def tearDownClass(cls):
cls._remove_databases_failures()
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super().tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self._setup_and_call(result)
def debug(self):
"""Perform the same as __call__(), without catching the exception."""
debug_result = _DebugResult()
self._setup_and_call(debug_result, debug=True)
def _setup_and_call(self, result, debug=False):
"""
Perform the following in order: pre-setup, run test, post-teardown,
skipping pre/post hooks if test is set to be skipped.
If debug=True, reraise any errors in setup and use super().debug()
instead of __call__() to run the test.
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
# Convert async test methods.
if asyncio.iscoroutinefunction(testMethod):
setattr(self, self._testMethodName, async_to_sync(testMethod))
if not skipped:
try:
self._pre_setup()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
if debug:
super().debug()
else:
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
self.async_client = self.async_client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, msg_prefix='',
fetch_redirect_response=True):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
# Get the redirection page, using the same client that was used
# to obtain the original response.
extra = response.client.extra or {}
redirect_response = response.client.get(
path,
QueryDict(query),
secure=(scheme == 'https'),
**extra,
)
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
self.assertURLEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def assertURLEqual(self, url1, url2, msg_prefix=''):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment))
self.assertEqual(
normalize(url1), normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Assert that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors or 'none')
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Assert that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
not non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
not non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this templates with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Assert that the templates with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a templates used to render"
" the response. Actual templates(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Assert that the templates with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises, 'exception', expected_exception, expected_message,
*args, **kwargs
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns, 'warning', expected_warning, expected_message,
*args, **kwargs
)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, 'required': False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages['required']]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
str(dom1).splitlines(), str(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in this test. '
'Add %(alias)r to %(test)s.databases to ensure proper test isolation '
'and silence this failure.'
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias for alias in connections
if alias in cls.databases and (
include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = connections.all() if aliases is None else (connections[alias] for alias in aliases)
return all(conn.features.supports_transactions for conn in conns)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (args and isinstance(args[0], unittest.TestCase) and
connection.alias not in getattr(args[0], 'databases', {})):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database." % (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, 'databases', None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database." % (
name, test_item, connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features),
'skipIfDBFeature',
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features),
'skipUnlessDBFeature',
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
'skipUnlessAnyDBFeature',
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live http server while the tests are running."""
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def setUpClass(cls):
super().setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.inc_thread_sharing()
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super().tearDownClass()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._lockfile.close()
|
[
"ksahu5505@gmail.com"
] |
ksahu5505@gmail.com
|
f8334ff2a60276a5931b9054fb616a9d7fde7506
|
31190ae0dc5d8b2d781c78ffe39904b3d522b747
|
/General/Reading_TXT_File/reading_txt_file.py
|
32bfdf510aa4529352e0fbd2f8339cd56e893519
|
[] |
no_license
|
ldcarney/Student_Work
|
d7ac795e2a78010c11d0ee848a7fa27c4c36c27b
|
9fae2a4df468e9470320f9772a38c8e82b396cb3
|
refs/heads/main
| 2023-02-26T09:24:32.131684 | 2021-01-28T01:43:45 | 2021-01-28T01:43:45 | 333,561,061 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,264 |
py
|
# CS1300 Fall 2017
# Author: Liam Carney
#reading through text file
def read_users(file_name):
user = {}
try:
f = open(file_name)
for line in f:
ratings = [ ]
line = line.strip().split()
name = line[0]
for i in line [1:]:
ratings.append (int(i))
user[name] = ratings
return user
except:
return None
def read_books(file_name):
read = []
try:
f = open(file_name)
for line in f:
line = line.strip() .split(',')
read.append([line[1], line[0]])
#book_list[0] = line
return read
except :
return None
def calculate_average_rating(user):
list = []
users = user.keys()
ind = users[0]
for i in range(len(user[ind])):
count = 0
sum1 = 0
for u in users:
if user[u][i] != 0:
sum1 = sum1 + user[u][i]
count = count + 1
avg = sum1/float(count)
print(avg)
list.append(avg)
return list
def lookup_average_rating(book_index, book_list, ave_rating_list):
return ("({:.2f}) {} by {}").format(ave_rating_list[book_index], book_list[book_index][0],book_list[book_index][1])
#PART_2 follow here
class Recommender:
#Constructor here
def __init__(self, file_name, user_file):
self.book_list = []
self.user_dictionary = {}
self.average_rating_list = []
self.read_books(file_name)
self.read_users(user_file)
self.calculate_average_rating()
def read_books(self, file_name):
try:
f = open(file_name)
for line in f:
line = line.strip().split(',')
self.book_list.append([line[1], line[0]])
except IOError as e:
return None
def read_users(self, file_name):
try:
f = open(file_name)
for line in f:
ratings = [ ]
line = line.strip().split()
name = line[0]
for i in line[1:]:
ratings.append(int(i))
self.user_dictionary[name] = ratings
except IOError as e:
return None
def calculate_average_rating(self):
self.average_rating_list = []
users = self.user_dictionary.keys()
ind = users[0]
for i in range(len(self.user_dictionary[ind])):
count = 0
sum1 = 0
for u in users:
if self.user_dictionary[u][i] != 0:
sum1 = sum1 + self.user_dictionary[u][i]
count1 = count + 1
avg = sum1/float(count)
print(avg)
self.average_rating_list.append(avg)
return self.average_rating_list
def lookup_average_rating(self, book_index):
return ("({:.2f}) {} by {}").format(self.average_rating_list[book_index], self.book_list[book_index][0], self.book_list[book_index][1])
def calc_similarity(self, user1, user2):
r1 = self.user_dictionary[user1]
r2 = self.user_dictionary[user2]
dp = 0
for i in range (len(r1)):
dp = dp + r1[i] * r2[i]
return dp
def get_most_similar_user(self, current_user_id):
highest = 0
best_matched = ""
users = self.user_dictionary.keys()
for user in users:
if user != current_user_id:
score = self.calc_similarity(user, current_user_id)
if score > highest:
highest = score
best_matched = user
return best_matched
def recommend_books(self, current_user_id):
return recommendations_list
def main():
book_list = read_books("book.txt")
user_dict = read_users("ratings.txt")
ave_rating_list = calculate_average_rating(user_dict)
print(lookup_average_rating(0, book_list, ave_rating_list))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
ldcarney.noreply@github.com
|
02de8197a9ad10a26b78049cde20ec558b846bf6
|
3e3df153b0f5fc909195f544ee3390d4170cf4b0
|
/sets.py
|
2777cdb846dccc3b0b0524307e5be3f7435b4fec
|
[] |
no_license
|
agung037/python101
|
d61a30e5c42ad17a102fe6814a8edc371bdd087f
|
aa99f366f4ef377c8833ceb65ad8d51f5c06c1f4
|
refs/heads/master
| 2023-04-19T19:32:47.321623 | 2021-05-24T16:48:11 | 2021-05-24T16:48:11 | 370,046,298 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
# Create an empty set
s = set()
#add elemets to set
s.add(1)
s.add(2)
s.add(3)
s.add(4)
print(s)
print(f"The set has {len(s)} elements")
|
[
"agungk878@gmail.com"
] |
agungk878@gmail.com
|
998364c6abc641c877b02a569f6db4b16d99aa60
|
5b51e4a72055b09b9b1a9963c7f3bb88a6457a9f
|
/tast02.py
|
039cf7f856ca78b494b24dfb5f3fe5099b0557a9
|
[] |
no_license
|
nii5/lesson03
|
adc64fd78ed828ebb4eb6944178d6d90befa1988
|
febceb26c81bf3911d351d30b6a8b1f9379abe78
|
refs/heads/master
| 2022-10-21T21:45:23.075190 | 2020-06-19T12:50:30 | 2020-06-19T12:50:30 | 272,795,054 | 0 | 0 | null | 2020-06-19T12:50:53 | 2020-06-16T19:29:59 |
Python
|
UTF-8
|
Python
| false | false | 230 |
py
|
def my_func(name,surname, year,city,email,phone):
print(name,surname, year,city,email,phone)
my_func(name='Иван',surname='Иванов', year=' 1990',city='Иваново',email='ivanov@mail.ru',phone='+79624564556')
|
[
"noreply@github.com"
] |
nii5.noreply@github.com
|
6a51492ded638f3df2d2790b30ce3d10c9e269b9
|
5eddc2a278cb8f54da00db186c784e03a7b3011f
|
/csaapi/apps/farm_site/services.py
|
5fa5b1565b3fc591b00038169efacb6de7334198
|
[] |
no_license
|
quinceleaf/csa-member-management
|
350a48262cead1f03199c5c021a958fb410a791b
|
8df57aa190935e79916b64d2a3de9e4e6c2d357d
|
refs/heads/main
| 2023-06-18T11:10:47.633613 | 2021-07-20T03:47:55 | 2021-07-20T03:47:55 | 387,568,813 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 88 |
py
|
def create_subscription(*, )
pass
# fan out payments
# fan out deliveries
|
[
"brian@quinceleaf.dev"
] |
brian@quinceleaf.dev
|
0f1624657bfcfaafd9fa23f44a27e00775d0dc28
|
4bfa09b427ed1ec3c3f645f66e32f1e85ba59699
|
/scripts/mell/utils/io_utils.py
|
8578e04b89514318275f7ca58e7569d5d0d9340d
|
[
"Apache-2.0"
] |
permissive
|
Chengkai-Huang/EasyTransfer
|
c3328f33d408783cc08eebf5a4ed43becf3d5dec
|
6909238c45b5708968f955b7d971a79b25434597
|
refs/heads/master
| 2023-07-19T02:20:36.864088 | 2021-09-15T09:45:48 | 2021-09-15T09:45:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 26,004 |
py
|
# coding=utf-8
# Copyright (c) 2020 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import time
import shutil
import hashlib
from io import StringIO, BytesIO
from functools import lru_cache
from contextlib import contextmanager
from typing import List, Union
from datetime import datetime, timedelta
from tqdm import tqdm
from tqdm.utils import CallbackIOWrapper
from .logger import logger
class IO:
@staticmethod
def register(options):
pass
def open(self, path: str, mode: str = 'r', encoding: str = 'utf-8'):
raise NotImplementedError
def exists(self, path: str) -> bool:
raise NotImplementedError
def move(self, src: str, dst: str):
raise NotImplementedError
def copy(self, src: str, dst: str):
raise NotImplementedError
def copytree(self, src: str, dst: str):
raise NotImplementedError
def makedirs(self, path: str, exist_ok=True):
raise NotImplementedError
def remove(self, path: str):
raise NotImplementedError
def rmtree(self, path: str):
raise NotImplementedError
def listdir(self, path: str, recursive=False, full_path=False, contains=None):
raise NotImplementedError
def isdir(self, path: str) -> bool:
raise NotImplementedError
def isfile(self, path: str) -> bool:
raise NotImplementedError
def abspath(self, path: str) -> str:
raise NotImplementedError
def last_modified(self, path: str) -> datetime:
raise NotImplementedError
def last_modified_str(self, path: str) -> str:
raise NotImplementedError
def size(self, path: str) -> int:
raise NotImplementedError
def md5(self, path: str) -> str:
hash_md5 = hashlib.md5()
with self.open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
re_remote = re.compile(r'(oss|https?)://')
def islocal(self, path: str) -> bool:
return not self.re_remote.match(path.lstrip())
def is_writable(self, path):
new_dir = ''
if self.islocal(path) and not self.exists(path):
new_dir = path
while True:
parent = os.path.dirname(new_dir)
if self.exists(parent):
break
new_dir = parent
self.makedirs(path)
flag = self._is_writable(path)
if new_dir and self.exists(new_dir):
self.remove(new_dir)
return flag
@lru_cache(maxsize=8)
def _is_writable(self, path):
import oss2
try:
tmp_file = os.path.join(path, f'.tmp.{time.time()}')
with self.open(tmp_file, 'w') as f:
f.write('test line.')
self.remove(tmp_file)
except (OSError, oss2.exceptions.RequestError, oss2.exceptions.ServerError):
return False
return True
class DefaultIO(IO):
__name__ = 'DefaultIO'
def _check_path(self, path):
if not self.islocal(path):
raise RuntimeError(
'OSS Credentials must be provided to use oss_io_config. ')
def open(self, path, mode='r', encoding="utf-8"):
self._check_path(path)
path = self.abspath(path)
if mode.endswith('b'):
return open(path, mode=mode)
else:
return open(path, mode=mode, encoding=encoding)
def exists(self, path):
self._check_path(path)
path = self.abspath(path)
return os.path.exists(path)
def move(self, src, dst):
self._check_path(src)
self._check_path(dst)
src = self.abspath(src)
dst = self.abspath(dst)
if src == dst:
return
shutil.move(src, dst)
def copy(self, src, dst):
self._check_path(src)
self._check_path(dst)
src = self.abspath(src)
dst = self.abspath(dst)
try:
shutil.copyfile(src, dst)
except shutil.SameFileError:
pass
def copytree(self, src, dst):
self._check_path(src)
self._check_path(dst)
src = self.abspath(src).rstrip('/')
dst = self.abspath(dst).rstrip('/')
if src == dst:
return
self.makedirs(dst)
created_dir = {dst}
for file in self.listdir(src, recursive=True):
src_file = os.path.join(src, file)
dst_file = os.path.join(dst, file)
dst_dir = os.path.dirname(dst_file)
if dst_dir not in created_dir:
self.makedirs(dst_dir)
created_dir.add(dst_dir)
self.copy(src_file, dst_file)
def makedirs(self, path, exist_ok=True):
self._check_path(path)
path = self.abspath(path)
os.makedirs(path, exist_ok=exist_ok)
def remove(self, path):
self._check_path(path)
path = self.abspath(path)
if os.path.isdir(path):
self.rmtree(path)
else:
os.remove(path)
def rmtree(self, path):
shutil.rmtree(path)
def listdir(self, path, recursive=False, full_path=False, contains: Union[str, List[str]] = None):
self._check_path(path)
path = self.abspath(path)
if isinstance(contains, str):
contains = [contains]
elif not contains:
contains = ['']
if recursive:
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
if not full_path:
prefix_len = len(path.rstrip('/')) + 1
files = [file[prefix_len:] for file in files]
else:
files = os.listdir(path)
if full_path:
files = [os.path.join(path, file) for file in files]
files = [file for file in files if any(keyword in file for keyword in contains)]
return files
def isdir(self, path):
self._check_path(path)
return os.path.isdir(path)
def isfile(self, path):
self._check_path(path)
return os.path.isfile(path)
def abspath(self, path):
self._check_path(path)
return os.path.abspath(path)
def last_modified(self, path):
return datetime.fromtimestamp(float(self.last_modified_str(path)))
def last_modified_str(self, path):
self._check_path(path)
return str(os.path.getmtime(path))
def size(self, path: str) -> int:
return os.stat(path).st_size
class OSSIO(DefaultIO):
"Mixed IO module to support both system-level and OSS IO methods"
__name__ = 'OSSIO'
def __init__(self,
access_key_id: str,
access_key_secret: str,
hosts: Union[str, List[str]],
buckets: Union[str, List[str]]):
from oss2 import Auth, Bucket, ObjectIterator
super().__init__()
self.ObjectIterator = ObjectIterator
self.auth = Auth(access_key_id, access_key_secret)
if isinstance(buckets, str):
buckets = [buckets]
if isinstance(hosts, str):
hosts = [hosts for i in range(len(buckets))]
else:
assert len(hosts) == len(buckets), 'number of hosts and number of buckets should be the same'
self.buckets = {
bucket_name: Bucket(self.auth, host, bucket_name)
for host, bucket_name in zip(hosts, buckets)
}
self.oss_pattern = re.compile(r'oss://([^/]+)/(.+)')
def _split_name(self, path):
m = self.oss_pattern.match(path)
if not m:
raise IOError(f'invalid oss path: "{path}", should be "oss://<bucket_name>/path"')
bucket_name, path = m.groups()
path = path.replace('//', '/')
return bucket_name, path
def _split(self, path):
bucket_name, path = self._split_name(path)
try:
bucket = self.buckets[bucket_name]
except KeyError:
raise IOError(f'Bucket {bucket_name} not registered in oss_io_config')
return bucket, path
def open(self, full_path, mode='r', encoding='utf-8'):
if not full_path.startswith('oss://'):
return super().open(full_path, mode)
bucket, path = self._split(full_path)
with mute_stderr():
path_exists = bucket.object_exists(path)
if 'w' in mode:
if path_exists:
bucket.delete_object(path)
if 'b' in mode:
return BinaryOSSFile(bucket, path)
return OSSFile(bucket, path)
elif mode == 'a':
position = bucket.head_object(path).content_length if path_exists else 0
return OSSFile(bucket, path, position=position)
else:
if not path_exists:
raise FileNotFoundError(full_path)
obj = bucket.get_object(path)
# # auto cache large files to avoid memory issues
# if obj.content_length > 200 * 1024 ** 2: # 200M
# path = cache_file(full_path)
# return super().open(path, mode)
if obj.content_length > 200 * 1024 ** 2: # 200M
with tqdm(total=obj.content_length, unit='B', unit_scale=True, unit_divisor=1024, leave=False,
desc='reading ' + os.path.basename(full_path)) as t:
obj = CallbackIOWrapper(t.update, obj, "read")
data = obj.read()
else:
import time
data = obj.read()
if mode == 'rb':
return NullContextWrapper(BytesIO(data))
else:
assert mode == 'r'
return NullContextWrapper(StringIO(data.decode()))
def exists(self, path):
if not path.startswith('oss://'):
return super().exists(path)
bucket, _path = self._split(path)
if not path.endswith('/'):
# if file exists
exists = self._obj_exists(bucket, _path)
else:
try:
self.listdir(path)
exists = True
except FileNotFoundError:
exists = False
return exists
def _obj_exists(self, bucket, path):
with mute_stderr():
return bucket.object_exists(path)
def move(self, src, dst):
if not src.startswith('oss://') and not dst.startswith('oss://'):
return super().move(src, dst)
if src == dst:
return
self.copy(src, dst)
self.remove(src)
def copy(self, src, dst):
raw_src, raw_dst = str(src), str(dst)
try:
cloud_src = src.startswith('oss://')
cloud_dst = dst.startswith('oss://')
if not cloud_src and not cloud_dst:
return super().copy(src, dst)
if src == dst:
return
# download
if cloud_src and not cloud_dst:
target_dir, _ = os.path.split(dst)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
bucket, src = self._split(src)
obj = bucket.get_object(src)
if obj.content_length > 100 * 1024 ** 2: # 100M
with oss_progress('downloading') as callback:
bucket.get_object_to_file(src, dst, progress_callback=callback)
else:
bucket.get_object_to_file(src, dst)
return
bucket, dst = self._split(dst)
# upload
if cloud_dst and not cloud_src:
src_size = os.stat(src).st_size
if src_size > 5 * 1024 ** 3: # 5G
raise RuntimeError(f'A file > 5G cannot be uploaded to OSS. Please split your file first.\n{src}')
if src_size > 100 * 1024 ** 2: # 100M
with oss_progress('uploading') as callback:
bucket.put_object_from_file(dst, src, progress_callback=callback)
else:
bucket.put_object_from_file(dst, src)
return
# copy between oss paths
src_bucket, src = self._split(src)
total_size = src_bucket.head_object(src).content_length
if src_bucket.get_bucket_location().location != bucket.get_bucket_location().location:
import tempfile
local_tmp = os.path.join(tempfile.gettempdir(), src)
self.copy(f'oss://{src_bucket.bucket_name}/{src}', local_tmp)
self.copy(local_tmp, f'oss://{bucket.bucket_name}/{dst}')
self.remove(local_tmp)
return
if total_size < 1024 ** 3 or src_bucket != bucket: # 1GB
bucket.copy_object(src_bucket.bucket_name, src, dst)
else:
# multipart copy
from oss2.models import PartInfo
from oss2 import determine_part_size
part_size = determine_part_size(total_size, preferred_size=100 * 1024)
upload_id = bucket.init_multipart_upload(dst).upload_id
parts = []
part_number = 1
offset = 0
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
byte_range = (offset, offset + num_to_upload - 1)
result = bucket.upload_part_copy(bucket.bucket_name, src, byte_range, dst, upload_id,
part_number)
parts.append(PartInfo(part_number, result.etag))
offset += num_to_upload
part_number += 1
bucket.complete_multipart_upload(dst, upload_id, parts)
except Exception as e:
print("haha")
print("{}".format(e))
print("Copy failed because oss auth not fully opened. Using first download then upload...")
try:
self.download(raw_src, ".easy_distill_tmp_file")
self.upload(".easy_distill_tmp_file", raw_dst)
print("Copying done")
except Exception as e:
print("{}".format(e))
def copytree(self, src, dst):
cloud_src = src.startswith('oss://')
cloud_dst = dst.startswith('oss://')
if not cloud_src and not cloud_dst:
return super().copytree(src, dst)
if cloud_dst:
src_files = self.listdir(src, recursive=True)
max_len = min(max(map(len, src_files)), 50)
with tqdm(src_files, desc='uploading', leave=False) as progress:
for file in progress:
progress.set_postfix({'file': f'{file:-<{max_len}}'[:max_len]})
self.copy(os.path.join(src, file), os.path.join(dst, file))
else:
assert cloud_src and not cloud_dst
self.makedirs(dst)
created_dir = {dst}
src_files = self.listdir(src, recursive=True)
max_len = min(max(map(len, src_files)), 50)
with tqdm(src_files, desc='downloading', leave=False) as progress:
for file in progress:
src_file = os.path.join(src, file)
dst_file = os.path.join(dst, file)
dst_dir = os.path.dirname(dst_file)
if dst_dir not in created_dir:
self.makedirs(dst_dir)
created_dir.add(dst_dir)
progress.set_postfix({'file': f'{file:-<{max_len}}'[:max_len]})
self.copy(src_file, dst_file)
def listdir(self, path, recursive=False, full_path=False, contains: Union[str, List[str]] = None):
if not path.startswith('oss://'):
return super().listdir(path, recursive, full_path, contains)
if isinstance(contains, str):
contains = [contains]
elif not contains:
contains = ['']
bucket, path = self._split(path)
path = path.rstrip('/') + '/'
files = [obj.key for obj in self.ObjectIterator(bucket, prefix=path, delimiter='' if recursive else '/')]
try:
files.remove(path)
except ValueError:
pass
if not files:
if not self.isdir(path):
raise FileNotFoundError(f'No such directory: oss://{bucket.bucket_name}/{path}')
if full_path:
files = [f'oss://{bucket.bucket_name}/{file}' for file in files]
else:
files = [file[len(path):] for file in files]
files = [file for file in files if any(keyword in file for keyword in contains)]
return files
def _remove_obj(self, path):
bucket, path = self._split(path)
with mute_stderr():
bucket.delete_object(path)
def remove(self, path):
if not path.startswith('oss://'):
return super().remove(path)
if self.isfile(path):
self._remove_obj(path)
else:
return self.rmtree(path)
def rmtree(self, path):
if not path.startswith('oss://'):
return super().rmtree(path)
# have to delete its content first before delete the directory itself
for file in self.listdir(path, recursive=True, full_path=True):
print(f'delete {file}')
self._remove_obj(file)
if self.exists(path):
# remove the directory itself
if not path.endswith('/'):
path += '/'
self._remove_obj(path)
def makedirs(self, path, exist_ok=True):
# there is no need to create directory in oss
if not path.startswith('oss://'):
return super().makedirs(path)
def isdir(self, path):
if not path.startswith('oss://'):
return super().isdir(path)
return self.exists(path.rstrip('/') + '/')
def isfile(self, path):
if not path.startswith('oss://'):
return super().isdir(path)
return self.exists(path) and not self.isdir(path)
def abspath(self, path):
if not path.startswith('oss://'):
return super().abspath(path)
return path
def authorize(self, path):
if not path.startswith('oss://'):
raise ValueError('Only oss path can use "authorize"')
import oss2
bucket, path = self._split(path)
bucket.put_object_acl(path, oss2.OBJECT_ACL_PUBLIC_READ)
def last_modified(self, path):
if not path.startswith('oss://'):
return super().last_modified(path)
return datetime.strptime(self.last_modified_str(path), r'%a, %d %b %Y %H:%M:%S %Z') + timedelta(hours=8)
def last_modified_str(self, path):
if not path.startswith('oss://'):
return super().last_modified_str(path)
bucket, path = self._split(path)
return bucket.get_object_meta(path).headers['Last-Modified']
def size(self, path: str) -> int:
if not path.startswith('oss://'):
return super().size(path)
bucket, path = self._split(path)
return int(bucket.get_object_meta(path).headers['Content-Length'])
def download(self, oss_path, local_path):
bucket, path = self._split(oss_path)
bucket.get_object_to_file(path, local_path)
def upload(self, local_path, oss_path):
bucket, path = self._split(oss_path)
bucket.put_object_from_file(path, local_path)
@contextmanager
def oss_progress(desc):
progress = None
def callback(i, n):
nonlocal progress
if progress is None:
progress = tqdm(total=n, unit='B', unit_scale=True, unit_divisor=1024,
leave=False, desc=desc, mininterval=1.0, maxinterval=5.0)
progress.update(i - progress.n)
yield callback
if progress is not None:
progress.close()
def parse_oss_buckets(buckets):
if "http" in buckets:
import requests
import traceback
try:
r = requests.get(buckets, allow_redirects=True)
bucket_name = None
endpoint = None
access_key_id = None
access_key_secret = None
for line in r.content.decode("utf-8").split("\n"):
if not line.strip():
continue
key, val = line.strip().split("=")
if key == "bucket":
bucket_name = val
elif key == "host":
endpoint = val
elif key == "access_key_id":
access_key_id = val
elif key == "access_key_secret":
access_key_secret = val
else:
raise RuntimeError
assert bucket_name is not None and endpoint is not None and \
access_key_id is not None and access_key_secret is not None
except Exception:
traceback.print_exc()
raise RuntimeError("Fetch AK file %s failed" % buckets)
else:
bucket_name, role_arn_and_host = buckets.split("?")
access_key_id, access_key_secret, endpoint = role_arn_and_host.split("&")
bucket_name = bucket_name.replace("oss://", "").strip("'").strip('"').split("/")[0]
access_key_id = access_key_id.split("=")[-1]
access_key_secret = access_key_secret.split("=")[-1]
endpoint = "http://" + endpoint.split("=")[-1]
return access_key_id, access_key_secret, [endpoint], [bucket_name]
class OSSFile:
def __init__(self, bucket, path, position=0):
self.position = position
self.bucket = bucket
self.path = path
self.buffer = StringIO()
def write(self, content):
# without a "with" statement, the content is written immediately without buffer
# when writing a large batch of contents at a time, this will be quite slow
import oss2
buffer = self.buffer.getvalue()
if buffer:
content = buffer + content
self.buffer.close()
self.buffer = StringIO()
try:
result = self.bucket.append_object(self.path, self.position, content)
self.position = result.next_position
except oss2.exceptions.PositionNotEqualToLength:
raise RuntimeError(
f'Race condition detected. It usually means multiple programs were writing to the same file'
f'oss://{self.bucket.bucket_name}/{self.path} (Error 409: PositionNotEqualToLength)')
except (oss2.exceptions.RequestError, oss2.exceptions.ServerError) as e:
self.buffer.write(content)
logger.info(str(e) + f'when writing to oss://{self.bucket.bucket_name}/{self.path}. Content buffered.')
raise RuntimeError
def flush(self, retry=0):
import oss2
try:
self.bucket.append_object(self.path, self.position, self.buffer.getvalue())
except oss2.exceptions.RequestError as e:
if 'timeout' not in str(e) or retry > 2:
raise
# retry if timeout
logger.info('| OSSIO timeout. Retry uploading...')
import time
time.sleep(5)
self.flush(retry + 1)
except oss2.exceptions.ObjectNotAppendable as e:
from . import io
logger.info(str(e) + '\nTrying to recover..\n')
full_path = f'oss://{self.bucket.bucket_name}/{self.path}'
with io.open(full_path) as f:
prev_content = f.read()
io.remove(full_path)
self.position = 0
content = self.buffer.getvalue()
self.buffer.close()
self.buffer = StringIO()
self.write(prev_content)
self.write(content)
def close(self):
self.flush()
def seek(self, position):
self.position = position
def __enter__(self):
return self.buffer
def __exit__(self, *args):
self.flush()
class BinaryOSSFile:
def __init__(self, bucket, path):
self.bucket = bucket
self.path = path
self.buffer = BytesIO()
def __enter__(self):
return self.buffer
def __exit__(self, *args):
value = self.buffer.getvalue()
if len(value) > 100 * 1024 ** 2: # 100M
with oss_progress('uploading') as callback:
self.bucket.put_object(self.path, value, progress_callback=callback)
else:
self.bucket.put_object(self.path, value)
class NullContextWrapper:
def __init__(self, obj):
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __iter__(self):
return self._obj.__iter__()
def __next__(self):
return self._obj.__next__()
def __enter__(self):
return self
def __exit__(self, *args):
pass
@contextmanager
def ignore_io_error(msg=''):
import oss2
try:
yield
except (oss2.exceptions.RequestError, oss2.exceptions.ServerError) as e:
logger.info(str(e) + ' ' + msg)
raise RuntimeError
@contextmanager
def mute_stderr():
cache = sys.stderr
sys.stderr = StringIO()
try:
yield None
finally:
sys.stderr = cache
|
[
"zhangtaolin@B-133KQ05P-0208.local"
] |
zhangtaolin@B-133KQ05P-0208.local
|
9ac78261b3e0bfe904692b30ec71925efb1b2fd5
|
e203ddace08580170e3b4de9c79588209e857c1c
|
/books.py
|
23233198dc918f7183dbddd721d36fc2b0141ebf
|
[] |
no_license
|
stradtkt/OOPTreehouse-Python
|
e17f3fd48840049b8b741aa0e30e54d1409804b2
|
84e0ef2142118bf44c416a3b1dde3519ff57fd15
|
refs/heads/main
| 2023-02-26T15:03:27.053205 | 2021-02-04T13:04:26 | 2021-02-04T13:04:26 | 334,620,181 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
class Book:
def __init__(self, title, author):
self.title = title
self.author = author
def __str__(self):
return '{}: {}'.format(self.title, self.author)
class Bookcase:
def __init__(self, books=None):
self.books = books
@classmethod
def create_bookcase(cls, book_list):
books = []
for title, author in book_list:
books.append(Book(title, author))
return cls(books)
|
[
"stradtkt22@gmail.com"
] |
stradtkt22@gmail.com
|
3f831349dbe5ba3bfa9fc95b134a94e9b0619124
|
a8a7f22f610e333f095a3424e4fd9208f9a3c407
|
/python class clg/avg.py
|
bd8e03bdfbcaf4c6846c5eadcd6384c7e59d5353
|
[] |
no_license
|
saikrupa82/mypyth
|
b4d8a68302bf2e664cad2d416fee8dcb3a6c5896
|
c3ab9d9befe467641588e720f9ca658a85cc9f7a
|
refs/heads/master
| 2022-12-27T13:29:06.374075 | 2020-10-01T14:48:40 | 2020-10-01T14:48:40 | 300,315,085 | 0 | 0 | null | 2020-10-01T14:47:30 | 2020-10-01T14:47:29 | null |
UTF-8
|
Python
| false | false | 327 |
py
|
s=int(input("Enter any number"))
ls=[]
k,max,min=0,0,0
f=str(s)
if f.isdigit:
for i in range(1,s+1):
k+=i
ls.append(i)
print(ls)
print("Average of number is",float(k/s))
max=ls[0]
for i in ls:
if max<i:
max=i
print("Maximum number is ",max)
min=ls[0]
for i in ls:
if min>i:
min=i
print("Maximum number is ",min)
|
[
"noreply@github.com"
] |
saikrupa82.noreply@github.com
|
4a72e08adf4020d79af80bfb0d8b17b893194e84
|
264c8e2bb321113144dc29d9d8fbc16a12a6b9e2
|
/Ex_Files_Python_Data_Structures/Exercise Files/03_03_begin/reverse_string.py
|
701831cea42a8f48788b9ddbd48eee7d7ba7a89c
|
[] |
no_license
|
prakashatul1/interview_prep
|
5d945cfd26d647c1dafdb6aeb1d1a84cd9732d16
|
91c93f32c76336f5aafc1884688746f59695ba0d
|
refs/heads/master
| 2023-06-15T21:08:51.600718 | 2021-07-16T09:18:09 | 2021-07-16T09:18:09 | 378,338,075 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 372 |
py
|
"""
Python Data Structures - A Game-Based Approach
Stack challenge
Robin Andrews - https://compucademy.net/
"""
import stack
string = "gninraeL nIdekniL htiw tol a nraeL"
reversed_string = ""
s = stack.Stack()
# Your solution here.
for char in string:
s.push(char)
while not s.is_empty():
reversed_string += s.pop()
print(reversed_string)
|
[
"atul.prakash@nobroker.in"
] |
atul.prakash@nobroker.in
|
24ee0c3b5ba31c62359bb82634292671f9df0b24
|
e6b4b9dcca11d6a8abd110cd681b2712f9843030
|
/src/env/dm_control/dm_control/composer/observation/observable/base_test.py
|
0f519c2ba2e7da274db7fe54fd6ede820fd6dc34
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
nicklashansen/svea-vit
|
a1b1d74fba88aaa94c876d354e7d6ed60cd3f064
|
33d3ea2682409ee82bf9c5129ceaf06ab01cd48e
|
refs/heads/main
| 2023-07-21T18:35:08.439052 | 2023-07-11T20:09:50 | 2023-07-11T20:09:50 | 379,015,671 | 16 | 3 |
MIT
| 2023-07-11T20:09:52 | 2021-06-21T17:43:32 |
Python
|
UTF-8
|
Python
| false | false | 5,914 |
py
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for observable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl.testing import absltest
from dm_control import mujoco
from dm_control.composer.observation import fake_physics
from dm_control.composer.observation.observable import base
import numpy as np
import six
_MJCF = """
<mujoco>
<worldbody>
<light pos="0 0 1"/>
<body name="body" pos="0 0 0">
<joint name="my_hinge" type="hinge" pos="-.1 -.2 -.3" axis="1 -1 0"/>
<geom name="my_box" type="box" size=".1 .2 .3" rgba="0 0 1 1"/>
<geom name="small_sphere" type="sphere" size=".12" pos=".1 .2 .3"/>
</body>
<camera name="world" mode="targetbody" target="body" pos="1 1 1" />
</worldbody>
</mujoco>
"""
class _FakeBaseObservable(base.Observable):
def _callable(self, physics):
pass
class ObservableTest(absltest.TestCase):
def testBaseProperties(self):
fake_observable = _FakeBaseObservable(update_interval=42,
buffer_size=5,
delay=10,
aggregator=None,
corruptor=None)
self.assertEqual(fake_observable.update_interval, 42)
self.assertEqual(fake_observable.buffer_size, 5)
self.assertEqual(fake_observable.delay, 10)
fake_observable.update_interval = 48
self.assertEqual(fake_observable.update_interval, 48)
fake_observable.buffer_size = 7
self.assertEqual(fake_observable.buffer_size, 7)
fake_observable.delay = 13
self.assertEqual(fake_observable.delay, 13)
enabled = not fake_observable.enabled
fake_observable.enabled = not fake_observable.enabled
self.assertEqual(fake_observable.enabled, enabled)
def testGeneric(self):
physics = fake_physics.FakePhysics()
repeated_observable = base.Generic(
fake_physics.FakePhysics.repeated, update_interval=42)
repeated_observation = repeated_observable.observation_callable(physics)()
self.assertEqual(repeated_observable.update_interval, 42)
np.testing.assert_array_equal(repeated_observation, [0, 0])
def testMujocoFeature(self):
physics = mujoco.Physics.from_xml_string(_MJCF)
hinge_observable = base.MujocoFeature(
kind='qpos', feature_name='my_hinge')
hinge_observation = hinge_observable.observation_callable(physics)()
np.testing.assert_array_equal(
hinge_observation, physics.named.data.qpos['my_hinge'])
box_observable = base.MujocoFeature(
kind='geom_xpos', feature_name='small_sphere', update_interval=5)
box_observation = box_observable.observation_callable(physics)()
self.assertEqual(box_observable.update_interval, 5)
np.testing.assert_array_equal(
box_observation, physics.named.data.geom_xpos['small_sphere'])
observable_from_callable = base.MujocoFeature(
kind='geom_xpos', feature_name=lambda: ['my_box', 'small_sphere'])
observation_from_callable = (
observable_from_callable.observation_callable(physics)())
np.testing.assert_array_equal(
observation_from_callable,
physics.named.data.geom_xpos[['my_box', 'small_sphere']])
def testMujocoCamera(self):
physics = mujoco.Physics.from_xml_string(_MJCF)
camera_observable = base.MujocoCamera(
camera_name='world', height=480, width=640, update_interval=7)
self.assertEqual(camera_observable.update_interval, 7)
camera_observation = camera_observable.observation_callable(physics)()
np.testing.assert_array_equal(
camera_observation, physics.render(480, 640, 'world'))
self.assertEqual(camera_observation.shape,
camera_observable.array_spec.shape)
self.assertEqual(camera_observation.dtype,
camera_observable.array_spec.dtype)
camera_observable.height = 300
camera_observable.width = 400
camera_observation = camera_observable.observation_callable(physics)()
self.assertEqual(camera_observable.height, 300)
self.assertEqual(camera_observable.width, 400)
np.testing.assert_array_equal(
camera_observation, physics.render(300, 400, 'world'))
self.assertEqual(camera_observation.shape,
camera_observable.array_spec.shape)
self.assertEqual(camera_observation.dtype,
camera_observable.array_spec.dtype)
def testCorruptor(self):
physics = fake_physics.FakePhysics()
def add_twelve(old_value, random_state):
del random_state # Unused.
return [x + 12 for x in old_value]
repeated_observable = base.Generic(
fake_physics.FakePhysics.repeated, corruptor=add_twelve)
corrupted = repeated_observable.observation_callable(
physics=physics, random_state=None)()
np.testing.assert_array_equal(corrupted, [12, 12])
def testInvalidAggregatorName(self):
name = 'invalid_name'
with six.assertRaisesRegex(self, KeyError, 'Unrecognized aggregator name'):
_ = _FakeBaseObservable(update_interval=3, buffer_size=2, delay=1,
aggregator=name, corruptor=None)
if __name__ == '__main__':
absltest.main()
|
[
"hello@nicklashansen.com"
] |
hello@nicklashansen.com
|
712ada88cdb4974d7863e34891f5a95fe1380f20
|
8c3aa1195ac5ce8619bef2443bcc253d8866d436
|
/Classes & functions.py
|
283e3762565a90b17333e92c9791de6c91c2ed52
|
[] |
no_license
|
saad-cherkaoui-ikbal/Streamlit-Dashboard
|
bc0a79b431e5bfb2959e21cbdd8bd86b835b9060
|
c5ac86fab947e7c63088fcde66e290c0e925731a
|
refs/heads/main
| 2023-09-05T01:26:00.604662 | 2021-10-12T09:32:00 | 2021-10-12T09:32:00 | 413,021,917 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,089 |
py
|
def get_table(name, connection, columns):
query = ' SELECT * FROM {}; '.format(name)
return pd.DataFrame(execute_query(connection, query), columns=columns)
class LibraryTable:
def __init__(self, name, df):
self.name = name
self.df = df
def span(self, period, column):
self.df = self.df[(self.df[column] > period[0]) & (self.df[column] < period[1])]
return self.df
def name_check(self, columns, name):
if len(columns) == 1:
stuff = self.df[columns[0]].apply(lambda z: z.split(' '))
if any(stuff.apply(lambda z: name.lower() in [w.lower() for w in z])):
self.df = self.df[stuff.apply(lambda z: name.lower() in [w.lower() for w in z] + [''])]
return self.df
stuff = self.df[columns].apply(lambda z: z.apply(lambda y: y.split(' ')))
stuff_bool = stuff.apply(lambda z: z.apply(lambda y: name.lower() in [w.lower() for w in y] + ['']))
if any(stuff_bool):
self.df = self.df[stuff_bool.T.apply(lambda z: any(z))]
return self.df
|
[
"noreply@github.com"
] |
saad-cherkaoui-ikbal.noreply@github.com
|
a48262df7b31b657505b623ed8035c6792e85210
|
c0ffc02a5c72bea9a86d15e4a1ff01a7b67b6858
|
/2nd11.py
|
6c0435486d72ef0e986b5f69acf172236e6c785b
|
[] |
no_license
|
db2398/2nd
|
6f39d05a0b3f9f2aba050f35f9a9e83ba7e1511f
|
404942c046ab894df1b52016ac7d4d49651f8295
|
refs/heads/master
| 2020-06-11T16:53:30.291296 | 2019-07-01T09:13:00 | 2019-07-01T09:13:00 | 194,029,475 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55 |
py
|
c,d=map(int,input().split())
result=c**d
print(result)
|
[
"noreply@github.com"
] |
db2398.noreply@github.com
|
d2dc956bbc48eb170fbbda451cf3630d7b8168b1
|
5545d3c3e910ccb5b45b2277a71ad3c3ea3caedc
|
/jamenson/runtime/Attic/runtime.py
|
85f8fe28ad0310322de14198533d79ebdb9fe6a4
|
[
"Apache-2.0"
] |
permissive
|
matthagy/Jamenson
|
61de19c71da6e133bf7d8efbb933a1036cf1e6f5
|
18a0fdd60b3d56ed4a6d4e792132535324490634
|
refs/heads/master
| 2016-09-11T04:31:28.895242 | 2013-04-04T00:14:44 | 2013-04-04T00:14:44 | 1,781,863 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,969 |
py
|
'''objects used by runtime
'''
from itertools import count
import string
class symbol(object):
#instance cache for `is based comparisions and `id based hashing
_cache = {}
__slots__ = ['printForm']
@classmethod
def raw(cls, printForm):
self = object.__new__(cls)
self.printForm = printForm
return self
def __new__(cls, printForm):
try:
return cls._cache[printForm]
except KeyError:
self = cls._cache[printForm] = cls.raw(printForm)
return self
def __repr__(self):
return 'symbol(%s)' % (self.printForm)
def __str__(self):
return bprint(self)
def __reduce__(self):
if gensymbolp(self):
return (gensym, (self.printForm[2:],))
else:
return (symbol, (self.printForm,))
def reset_gensym_counter(start=0):
global gensym_counter
gensym_counter = iter(count(start)).next
reset_gensym_counter()
def gensym(base='gensym'):
return symbol.raw('#:%s%d' % (base,gensym_counter()))
def gensymbolp(op):
return op.printForm not in symbol._cache
class cons(object):
__slots__ = 'car cdr'.split()
def __init__(self, car, cdr):
self.car = car
self.cdr = cdr
def __iter__(self):
op = self
while op is not nil:
if not isinstance(op, cons):
raise TypeError("iterating over non-cons cdr")
yield op.car
op = op.cdr
def __nonzero__(self):
return self is not nil
def __repr__(self):
return str(self)
#if self is nil:
# return 'nil'
#return 'cons(%r, %r)' % (self.car, self.cdr)
def __str__(self):
return bprint(self)
def __reduce__(self):
if self is nil:
return (load_nil, ())
else:
return (cons, (self.car, self.cdr))
def __eq__(self, other):
if not isinstance(other, cons):
return NotImplemented
return self is other or (self.car == other.car and
self.cdr == other.cdr)
nil = cons(None, None)
nil.car = nil
nil.cdr = nil
def load_nil():
return nil
def clist(*seq):
head = acc = nil
for op in seq:
cell = cons(op, nil)
if acc is nil:
head = cell
else:
acc.cdr = cell
acc = cell
return head
def bprint(op):
acc = []
bprint_collect_parts(acc.append, set(), op)
return ''.join(acc)
noQuoteChars = set(string.ascii_letters +
string.digits +
string.punctuation + ' ') - set('"')
escapeChars = {
'\n': '\\n',
'\t': '\\t',
'"': '\\"'}
qsymbol = symbol('%quote')
def bprint_collect_parts(emit, memo, op):
if isinstance(op, symbol):
emit(op.printForm)
elif op is nil:
emit('nil')
elif isinstance(op, cons):
if op.car is qsymbol:
assert op.cdr.cdr is nil, 'bad quote %r' % (op.cdr,)
emit("'")
bprint_collect_parts(emit, memo, op.cdr.car)
return
if id(op) in memo:
emit('#<circular cons>')
return
memo.add(id(op))
emit('(')
first = True
while op is not nil:
if first:
first = False
else:
emit(' ')
bprint_collect_parts(emit, memo, op.car)
if isinstance(op.cdr, cons):
op = op.cdr
else:
emit(' . ')
bprint_collect_parts(emit, memo, op.cdr)
break
emit(')')
elif isinstance(op, (int,long,float)):
emit(str(op))
elif op is None or op is False or op is True:
emit(str(op).lower())
elif isinstance(op, str):
emit('"')
for c in op:
if c in noQuoteChars:
emit(c)
elif c in escapeChars:
emit(escapeChars[c])
else:
emit('\\x%x' % ord(c))
emit('"')
else:
emit('#<')
emit(repr(op))
emit('>')
class MacroFunction(object):
__slots__ = ['func', 'robust']
def __init__(self, func, robust=False):
self.func = func
self.robust = robust
def __call__(self, *args, **kwds):
raise RuntimeError("cannot directly call macro %s" % self.func.__name__)
def macroExpand(self, translator, *args, **kwds):
return self.func(translator, *args, **kwds)
def __getstate__(self):
return self.func, self.robust
def __setstate__(self, state):
self.func, self.robust = state
import types
class obj(object):
def __init__(self, **kwds):
vars(self).update(kwds)
def __repr__(self):
return '(%s %s)' % (self.__class__.__name__,
' '.join(":%s %r" % t
for t in vars(self).iteritems()))
|
[
"hagy@gatech.edu"
] |
hagy@gatech.edu
|
6a5fda1edd88fd3fb25d951554199d007eb23988
|
bfcef2c0e5ade994dd78b1c737bde3af7ea9315b
|
/tests/integration/usage_statistics/test_usage_statistics_messages.py
|
3f04c5a1be6c7b3b8f17658d92021cc1bf2d00a5
|
[
"Apache-2.0"
] |
permissive
|
NathanFarmer/great_expectations
|
abe7103c6aa28f79c1d636cad78d892525702e67
|
0c6b7c5b7467a4cc282339748aec093a6f825757
|
refs/heads/develop
| 2023-06-19T23:18:50.786886 | 2021-07-18T23:33:05 | 2021-07-18T23:33:05 | 351,501,770 | 0 | 0 |
Apache-2.0
| 2021-04-15T19:19:54 | 2021-03-25T16:21:57 | null |
UTF-8
|
Python
| false | false | 62,557 |
py
|
"""Test usage statistics transmission client-side."""
import copy
from typing import Any, Dict, List
import pytest
import requests
from great_expectations.data_context import BaseDataContext
USAGE_STATISTICS_QA_URL = (
"https://qa.stats.greatexpectations.io/great_expectations/v1/usage_statistics"
)
def generate_messages_with_defaults(
defaults: Dict[str, Any], message_stubs: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""
Create a list of messages by overriding defaults with message_stubs
Args:
defaults: Dict of default message items
message_stubs: Unique parts of message
Returns:
List of messsages same len(message_stubs) combining defaults overridden by message stubs
"""
output_list = []
for message_stub in message_stubs:
defaults_copy = copy.deepcopy(defaults)
defaults_copy.update(message_stub)
output_list.append(defaults_copy)
return output_list
def test_generate_messages_with_defaults():
defaults = {
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
}
message_stubs = [
{
"event": "cli.checkpoint.new",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.checkpoint.new",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.new",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.new.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.new.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
]
output = generate_messages_with_defaults(
defaults=defaults, message_stubs=message_stubs
)
expected = [
{
"event": "cli.checkpoint.new",
"event_payload": {},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.checkpoint.new",
"event_payload": {"api_version": "v2"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.new",
"event_payload": {"api_version": "v3"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.new.begin",
"event_payload": {"api_version": "v3"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.new.end",
"event_payload": {"api_version": "v3"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.18.manual_testing",
},
]
assert output == expected
"""
valid_usage_statistics_messages should include a list of messages that we want to ensure are valid.
Whenever a new kind of message is added, an example of that message should be included here.
Each message will be sent to the server to ensure it is accepted.
"""
valid_usage_statistics_messages = {
"data_context.__init__": [
{
"event_payload": {
"platform.system": "Darwin",
"platform.release": "19.3.0",
"version_info": "sys.version_info(major=3, minor=7, micro=4, releaselevel='final', serial=0)",
"anonymized_datasources": [
{
"anonymized_name": "f57d8a6edae4f321b833384801847498",
"parent_class": "SqlAlchemyDatasource",
"sqlalchemy_dialect": "postgresql",
}
],
"anonymized_stores": [
{
"anonymized_name": "078eceafc1051edf98ae2f911484c7f7",
"parent_class": "ExpectationsStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
{
"anonymized_name": "313cbd9858dd92f3fc2ef1c10ab9c7c8",
"parent_class": "ValidationsStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
{
"anonymized_name": "2d487386aa7b39e00ed672739421473f",
"parent_class": "EvaluationParameterStore",
"anonymized_store_backend": {
"parent_class": "InMemoryStoreBackend"
},
},
],
"anonymized_validation_operators": [
{
"anonymized_name": "99d14cc00b69317551690fb8a61aca94",
"parent_class": "ActionListValidationOperator",
"anonymized_action_list": [
{
"anonymized_name": "5a170e5b77c092cc6c9f5cf2b639459a",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "0fffe1906a8f2a5625a5659a848c25a3",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "101c746ab7597e22b94d6e5f10b75916",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"anonymized_data_docs_sites": [
{
"parent_class": "SiteBuilder",
"anonymized_name": "eaf0cf17ad63abf1477f7c37ad192700",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
"anonymized_site_index_builder": {
"parent_class": "DefaultSiteIndexBuilder",
"show_cta_footer": True,
},
}
],
"anonymized_expectation_suites": [
{
"anonymized_name": "238e99998c7674e4ff26a9c529d43da4",
"expectation_count": 8,
"anonymized_expectation_type_counts": {
"expect_column_value_lengths_to_be_between": 1,
"expect_table_row_count_to_be_between": 1,
"expect_column_values_to_not_be_null": 2,
"expect_column_distinct_values_to_be_in_set": 1,
"expect_column_kl_divergence_to_be_less_than": 1,
"expect_table_column_count_to_equal": 1,
"expect_table_columns_to_match_ordered_list": 1,
},
}
],
},
"event": "data_context.__init__",
"success": True,
"version": "1.0.0",
"event_time": "2020-03-28T01:14:21.155Z",
"data_context_id": "96c547fe-e809-4f2e-b122-0dc91bb7b3ad",
"data_context_instance_id": "445a8ad1-2bd0-45ce-bb6b-d066afe996dd",
"ge_version": "0.11.9.manual_test",
},
# "new-style" expectation type system
{
"event_payload": {
"platform.system": "Darwin",
"platform.release": "19.3.0",
"version_info": "sys.version_info(major=3, minor=7, micro=4, releaselevel='final', serial=0)",
"anonymized_datasources": [
{
"anonymized_name": "f57d8a6edae4f321b833384801847498",
"parent_class": "SqlAlchemyDatasource",
"sqlalchemy_dialect": "postgresql",
}
],
"anonymized_stores": [
{
"anonymized_name": "078eceafc1051edf98ae2f911484c7f7",
"parent_class": "ExpectationsStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
{
"anonymized_name": "313cbd9858dd92f3fc2ef1c10ab9c7c8",
"parent_class": "ValidationsStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
{
"anonymized_name": "2d487386aa7b39e00ed672739421473f",
"parent_class": "EvaluationParameterStore",
"anonymized_store_backend": {
"parent_class": "InMemoryStoreBackend"
},
},
],
"anonymized_validation_operators": [
{
"anonymized_name": "99d14cc00b69317551690fb8a61aca94",
"parent_class": "ActionListValidationOperator",
"anonymized_action_list": [
{
"anonymized_name": "5a170e5b77c092cc6c9f5cf2b639459a",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "0fffe1906a8f2a5625a5659a848c25a3",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "101c746ab7597e22b94d6e5f10b75916",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"anonymized_data_docs_sites": [
{
"parent_class": "SiteBuilder",
"anonymized_name": "eaf0cf17ad63abf1477f7c37ad192700",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
"anonymized_site_index_builder": {
"parent_class": "DefaultSiteIndexBuilder",
"show_cta_footer": True,
},
}
],
"anonymized_expectation_suites": [
{
"anonymized_name": "238e99998c7674e4ff26a9c529d43da4",
"expectation_count": 8,
"anonymized_expectation_counts": [
{
"expectation_type": "expect_column_value_lengths_to_be_between",
"count": 1,
},
{
"expectation_type": "expect_table_row_count_to_be_between",
"count": 1,
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"count": 2,
},
{
"expectation_type": "expect_column_distinct_values_to_be_in_set",
"count": 1,
},
{
"expectation_type": "expect_column_kl_divergence_to_be_less_than",
"count": 1,
},
{
"expectation_type": "expect_table_column_count_to_equal",
"count": 1,
},
{
"expectation_type": "expect_table_columns_to_match_ordered_list",
"count": 1,
},
],
}
],
},
"event": "data_context.__init__",
"success": True,
"version": "1.0.0",
"event_time": "2020-03-28T01:14:21.155Z",
"data_context_id": "96c547fe-e809-4f2e-b122-0dc91bb7b3ad",
"data_context_instance_id": "445a8ad1-2bd0-45ce-bb6b-d066afe996dd",
"ge_version": "0.13.0.manual_test",
},
],
"data_asset.validate": [
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [
"path",
"datasource",
"data_asset_name",
],
"anonymized_expectation_suite_name": "dbb859464809a03647feb14a514f12b8",
"anonymized_datasource_name": "a41caeac7edb993cfbe55746e6a328b5",
},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:36:26.422Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
}
],
"data_context.add_datasource": [
{
"event_payload": {
"anonymized_name": "c9633f65c36d1ba9fbaa9009c1404cfa",
"parent_class": "PandasDatasource",
},
"event": "data_context.add_datasource",
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:08:16.030Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
}
],
"data_context.build_data_docs": [
{
"event_payload": {},
"event": "data_context.build_data_docs",
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:08:24.349Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
}
],
"data_context.open_data_docs": [
{
"event_payload": {},
"event": "data_context.open_data_docs",
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:08:28.070Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
}
],
"data_context.save_expectation_suite": [
{
"event_payload": {
"anonymized_expectation_suite_name": "4b6bf73298fcc2db6da929a8f18173f7"
},
"event": "data_context.save_expectation_suite",
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:08:23.570Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
}
],
"datasource.sqlalchemy.connect": [
{
"event": "datasource.sqlalchemy.connect",
"event_payload": {
"anonymized_name": "6989a7654d0e27470dc01292b6ed0dea",
"sqlalchemy_dialect": "postgresql",
},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T00:38:32.664Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.5.manual_testing",
}
],
# BaseDataContext.test_yaml_config() MESSAGES
"data_context.test_yaml_config": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2021-06-18T14:36:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": class_name,
"diagnostic_info": [],
},
"ge_version": "0.13.20.manual_testing",
}
for class_name in BaseDataContext.ALL_TEST_YAML_CONFIG_SUPPORTED_TYPES
]
+ [
{
"event": "data_context.test_yaml_config",
"success": False,
"event_payload": {
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": class_name,
"diagnostic_info": [],
},
"ge_version": "0.13.20.manual_testing",
}
for class_name in BaseDataContext.ALL_TEST_YAML_CONFIG_SUPPORTED_TYPES
]
# Diagnostic Message Types
+ [
{
"event": "data_context.test_yaml_config",
"success": False,
"event_payload": {
"diagnostic_info": ["__substitution_error__"],
},
"ge_version": "0.13.20.manual_testing",
},
{
"event": "data_context.test_yaml_config",
"success": False,
"event_payload": {
"diagnostic_info": ["__yaml_parse_error__"],
},
"ge_version": "0.13.20.manual_testing",
},
{
"event": "data_context.test_yaml_config",
"success": True,
"event_payload": {
"diagnostic_info": ["__custom_subclass_not_core_ge__"],
},
"ge_version": "0.13.20.manual_testing",
},
{
"event": "data_context.test_yaml_config",
"success": True,
"event_payload": {
"diagnostic_info": ["__class_name_not_provided__"],
},
"ge_version": "0.13.20.manual_testing",
},
{
"event": "data_context.test_yaml_config",
"success": False,
"event_payload": {
"diagnostic_info": ["__class_name_not_provided__"],
},
"ge_version": "0.13.20.manual_testing",
},
]
# Store Message Types
+ [
{
"event": "data_context.test_yaml_config",
"success": True,
"event_payload": {
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": "ExpectationsStore",
"anonymized_store_backend": {
"parent_class": "InMemoryStoreBackend"
},
},
"ge_version": "0.13.20.manual_testing",
}
]
# Datasource Message Types
+ [
{
"event": "data_context.test_yaml_config",
"success": True,
"event_payload": {
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": "Datasource",
"anonymized_execution_engine": {
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": "PandasExecutionEngine",
},
"anonymized_data_connectors": [
{
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": "InferredAssetFilesystemDataConnector",
}
],
},
"ge_version": "0.13.20.manual_testing",
}
]
# DataConnector Message Types
+ [
{
"event": "data_context.test_yaml_config",
"success": True,
"event_payload": {
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": "ConfiguredAssetFilesystemDataConnector",
},
"ge_version": "0.13.20.manual_testing",
}
]
# Checkpoint Message Types
+ [
{
"event": "data_context.test_yaml_config",
"success": True,
"event_payload": {
"anonymized_name": "fake_anonymized_name_for_testing",
"parent_class": "Checkpoint",
},
"ge_version": "0.13.20.manual_testing",
}
],
),
# CLI INIT COMMANDS
"cli.init.create": [
{
"event": "cli.init.create",
"event_payload": {},
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:06:47.697Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
},
{
"event": "cli.init.create",
"event_payload": {"api_version": "v2"},
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:06:47.697Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
},
{
"event": "cli.init.create",
"event_payload": {"api_version": "v3"},
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:06:47.697Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
},
],
# CLI PROJECT COMMANDS
"cli.project.check_config": [
{
"event": "cli.project.check_config",
"event_payload": {},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:42:34.068Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.project.check_config",
"event_payload": {"api_version": "v2"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:42:34.068Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.project.check_config",
"event_payload": {"api_version": "v3"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:42:34.068Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
],
"cli.project.upgrade": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T00:20:37.828Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.project.upgrade.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.project.upgrade.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.project.upgrade.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
# CLI STORE COMMANDS
"cli.store.list": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:56:53.908Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.store.list",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.store.list",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.store.list",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.store.list.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.store.list.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.store.list.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
# CLI DATASOURCE COMMANDS
"cli.datasource.list": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.datasource.list",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.datasource.list",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.datasource.list",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.datasource.list.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.datasource.list.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.datasource.list.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.datasource.new": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.datasource.new",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.datasource.new",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.datasource.new",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.datasource.new.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.datasource.new.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.datasource.new.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.datasource.delete": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.datasource.delete",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.datasource.delete",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.datasource.delete",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.datasource.delete.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.datasource.delete.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.datasource.delete.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.datasource.delete.end",
"event_payload": {"api_version": "v3", "cancelled": True},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.datasource.profile": [
{
"event": "cli.datasource.profile",
"event_payload": {},
"success": False,
"version": "1.0.0",
"event_time": "2020-08-05T01:03:17.567Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.datasource.profile",
"event_payload": {"api_version": "v2"},
"success": False,
"version": "1.0.0",
"event_time": "2020-08-05T01:03:17.567Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.datasource.profile",
"event_payload": {"api_version": "v3"},
"success": False,
"version": "1.0.0",
"event_time": "2020-08-05T01:03:17.567Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
],
# CLI NEW_DS_CHOICE COMMANDS
"cli.new_ds_choice": [
{
"event": "cli.new_ds_choice",
"event_payload": {"type": "pandas"},
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:08:08.963Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
},
{
"event": "cli.new_ds_choice",
"event_payload": {"type": "pandas", "api_version": "v2"},
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:08:08.963Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
},
{
"event": "cli.new_ds_choice",
"event_payload": {"type": "pandas", "api_version": "v3"},
"success": True,
"version": "1.0.0",
"event_time": "2020-06-25T16:08:08.963Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
},
],
# CLI SUITE COMMANDS
"cli.suite.demo": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.suite.demo",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.suite.demo",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.demo",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.demo.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.demo.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.demo.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.suite.list": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.suite.list",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.suite.list",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.list",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.list.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.list.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.list.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.suite.new": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.suite.new",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.suite.new",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.new",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.new.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.new.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.new.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.suite.edit": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.suite.edit",
"event_payload": {
"anonymized_expectation_suite_name": "0604e6a8f5a1da77e0438aa3b543846e"
},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.suite.edit",
"event_payload": {
"anonymized_expectation_suite_name": "0604e6a8f5a1da77e0438aa3b543846e",
"api_version": "v2",
},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.edit",
"event_payload": {
"anonymized_expectation_suite_name": "0604e6a8f5a1da77e0438aa3b543846e",
"api_version": "v3",
},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.edit.begin",
"event_payload": {
"anonymized_expectation_suite_name": "0604e6a8f5a1da77e0438aa3b543846e",
"api_version": "v3",
},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.edit.end",
"event_payload": {
"anonymized_expectation_suite_name": "0604e6a8f5a1da77e0438aa3b543846e",
"api_version": "v3",
},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.edit.end",
"success": False,
"event_payload": {
"anonymized_expectation_suite_name": "0604e6a8f5a1da77e0438aa3b543846e",
"api_version": "v3",
},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.suite.delete": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.suite.delete",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.suite.delete",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.delete",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.delete.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.delete.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.delete.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.suite.delete.end",
"event_payload": {"api_version": "v3", "cancelled": True},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.suite.scaffold": [
{
"event": "cli.suite.scaffold",
"event_payload": {},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-05T00:58:51.961Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-05T00:58:51.961Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v3"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-05T00:58:51.961Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
],
# CLI CHECKPOINT COMMANDS
"cli.checkpoint.new": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.checkpoint.new",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.checkpoint.new",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.new",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.new.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.new.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.new.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.checkpoint.script": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.checkpoint.script",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.checkpoint.script",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.script",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.script.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.script.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.script.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.checkpoint.run": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.checkpoint.run",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.checkpoint.run",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.run",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.run.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.checkpoint.list": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.checkpoint.list",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.checkpoint.list",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.list",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.list.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.list.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.list.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.checkpoint.delete": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T22:50:58.837Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.checkpoint.delete",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.checkpoint.delete",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.delete",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.checkpoint.delete.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.delete.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.delete.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.checkpoint.delete.end",
"event_payload": {"api_version": "v3", "cancelled": True},
"ge_version": "0.13.18.manual_testing",
},
],
),
# CLI VALIDATION_OPERATOR COMMANDS
"cli.validation_operator.list": [
{
"event": "cli.validation_operator.list",
"event_payload": {},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:32:33.635Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.validation_operator.list",
"event_payload": {"api_version": "v2"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:32:33.635Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
],
"cli.validation_operator.run": [
{
"event": "cli.validation_operator.run",
"event_payload": {},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:33:15.664Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.validation_operator.run",
"event_payload": {"api_version": "v2"},
"success": True,
"version": "1.0.0",
"event_time": "2020-08-03T23:33:15.664Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
"ge_version": "0.13.0.manual_testing",
},
],
# CLI DOCS COMMANDS
"cli.docs.build": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T00:25:27.088Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.docs.build",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.docs.build",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.docs.build",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.docs.build.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.build.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.build.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.build.end",
"event_payload": {"api_version": "v3", "cancelled": True},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.docs.clean": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-05T00:36:50.979Z",
"data_context_id": "2a948908-ec42-47f2-b972-c07bb0393de4",
"data_context_instance_id": "e7e0916d-d527-437a-b89d-5eb8c36d408f",
},
message_stubs=[
{
"event": "cli.docs.clean",
"event_payload": {},
"ge_version": "0.11.9+25.g3ca555c.dirty",
},
{
"event": "cli.docs.clean",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0+25.g3ca555c.dirty",
},
{
"event": "cli.docs.clean",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0+25.g3ca555c.dirty",
},
{
"event": "cli.docs.clean.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.clean.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.clean.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.clean.end",
"event_payload": {"api_version": "v3", "cancelled": True},
"ge_version": "0.13.18.manual_testing",
},
],
),
"cli.docs.list": generate_messages_with_defaults(
defaults={
"success": True,
"version": "1.0.0",
"event_time": "2020-08-04T00:20:37.828Z",
"data_context_id": "00000000-0000-0000-0000-000000000002",
"data_context_instance_id": "10000000-0000-0000-0000-000000000002",
},
message_stubs=[
{
"event": "cli.docs.list",
"event_payload": {},
"ge_version": "0.11.9.manual_testing",
},
{
"event": "cli.docs.list",
"event_payload": {"api_version": "v2"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.docs.list",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.0.manual_testing",
},
{
"event": "cli.docs.list.begin",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.list.end",
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
{
"event": "cli.docs.list.end",
"success": False,
"event_payload": {"api_version": "v3"},
"ge_version": "0.13.18.manual_testing",
},
],
),
}
test_messages = []
message_test_ids = []
for message_type, messages in valid_usage_statistics_messages.items():
for idx, test_message in enumerate(messages):
test_messages += [test_message]
message_test_ids += [f"{message_type}_{idx}"]
@pytest.mark.aws_integration
@pytest.mark.parametrize("message", test_messages, ids=message_test_ids)
def test_usage_statistics_message(message):
"""known message formats should be valid"""
res = requests.post(USAGE_STATISTICS_QA_URL, json=message, timeout=2)
assert res.status_code == 201
assert res.json() == {"event_count": 1}
|
[
"noreply@github.com"
] |
NathanFarmer.noreply@github.com
|
bac70c4b403b6d00550ca2b2b4486cafdada1383
|
575e08de2aa527cf2b6c4200b9aba36169194bc9
|
/main.py
|
84fb70c89a92795d2fad29afb22937bfa1bd126c
|
[
"BSD-3-Clause"
] |
permissive
|
hengying/RetroComputer
|
99b2f532dcde3123a70d6557ce83154f06033624
|
bfc718af6bc80770781d1fd128c8ec739cf3626f
|
refs/heads/main
| 2023-03-14T10:07:26.582936 | 2021-03-01T11:42:12 | 2021-03-01T11:42:12 | 343,344,142 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,954 |
py
|
import os
import sys
import time
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import pygame
from config import Config
from screen import Screen
from utils import *
class RetroComputer():
def __init__(self):
make_sure_basic_folder_exist()
pygame.init()
self._config = Config()
if self._config.win_pos_x is not None and self._config.win_pos_y is not None:
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (self._config.win_pos_x, self._config.win_pos_y)
self._surface = pygame.display.set_mode((self._config.win_width, self._config.win_height), pygame.NOFRAME)
self._clock = pygame.time.Clock()
if not self._config.show_cursor:
pygame.mouse.set_visible(False)
self.__font = pygame.font.Font('fonts/graph-35-pix-clone.ttf', 48)
self._screen = Screen(self)
def run(self):
try:
while True:
self._update()
for e in pygame.event.get():
if e.type == pygame.QUIT:
raise StopIteration
if e.type == pygame.KEYDOWN:
self._key_down(e)
self._paint()
pygame.display.flip()
self._clock.tick(self._config.fps)
except StopIteration:
self.quit()
def quit(self):
self._do_quit()
def _do_quit(self):
self._screen.end()
#self._config.save_config_file()
pygame.quit()
sys.exit()
def _update(self):
self._screen.update()
def _paint(self):
self._screen.paint()
def _key_down(self, event):
self._screen.key_down(event)
@property
def font(self):
return self.__font
@property
def surface(self):
return self._surface
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
c = RetroComputer()
c.run()
|
[
"noreply@github.com"
] |
hengying.noreply@github.com
|
b63ecbf008cc931600dc5ee653e2dfb5af49df6e
|
65454c2ddbd9d342ed3dc3dae538074f3dbea980
|
/torchdefenses/trainer/empirical_defenses/gradalign.py
|
65d5dcf2bf2c6bd4124603baf578ab19e29b6154
|
[
"MIT"
] |
permissive
|
porom004/adversarial-defenses-pytorch
|
387a3eb58a4b6f551b59504dac3d5d1e0bad106b
|
96948b2d30014826ecac45c701b1772ad04df416
|
refs/heads/master
| 2023-08-21T13:19:14.318792 | 2021-10-13T05:52:03 | 2021-10-13T05:52:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,750 |
py
|
import torch
import torch.nn as nn
from ..advtrainer import AdvTrainer
class GradAlign(AdvTrainer):
r"""
GradAlign in 'Understanding and Improving Fast Adversarial Training'
[https://arxiv.org/abs/2007.02617]
[https://github.com/tml-epfl/understanding-fast-adv-training]
Attributes:
self.model : model.
self.device : device where model is.
self.optimizer : optimizer.
self.scheduler : scheduler (Automatically updated).
self.max_epoch : total number of epochs.
self.max_iter : total number of iterations.
self.epoch : current epoch starts from 1 (Automatically updated).
self.iter : current iters starts from 1 (Automatically updated).
* e.g., is_last_batch = (self.iter == self.max_iter)
self.record_keys : names of items returned by do_iter.
Arguments:
model (nn.Module): model to train.
eps (float): strength of the attack or maximum perturbation.
alpha (float): alpha in the paper.
grad_align_cos_lambda (float): parameter for the regularization term.
"""
def __init__(self, model, eps, alpha, grad_align_cos_lambda):
super().__init__("GradAlign", model)
self.record_keys = ["Loss", "CALoss", "GALoss"] # Must be same as the items returned by self._do_iter
self.eps = eps
self.alpha = alpha
self.grad_align_cos_lambda = grad_align_cos_lambda
def _do_iter(self, train_data):
r"""
Overridden.
"""
images, labels = train_data
X = images.to(self.device)
Y = labels.to(self.device)
# Calculate loss_gradalign
X_new = torch.cat([X.clone(), X.clone()], dim=0)
Y_new = torch.cat([Y.clone(), Y.clone()], dim=0)
delta1 = torch.empty_like(X).uniform_(-self.eps, self.eps)
delta2 = torch.empty_like(X).uniform_(-self.eps, self.eps)
delta1.requires_grad = True
delta2.requires_grad = True
X_new[:len(X)] += delta1
X_new[len(X):] += delta2
X_new = torch.clamp(X_new, 0, 1)
logits_new = self.model(X_new)
loss_gn = nn.CrossEntropyLoss()(logits_new, Y_new)
grad1, grad2 = torch.autograd.grad(loss_gn, [delta1, delta2],
retain_graph=False, create_graph=False)
X_adv = X_new[:len(X)] + self.alpha*grad1.sign()
delta = torch.clamp(X_adv - X, min=-self.eps, max=self.eps).detach()
X_adv = torch.clamp(X + delta, min=0, max=1).detach()
grads_nnz_idx = ((grad1**2).sum([1, 2, 3])**0.5 != 0) * ((grad2**2).sum([1, 2, 3])**0.5 != 0)
grad1, grad2 = grad1[grads_nnz_idx], grad2[grads_nnz_idx]
grad1_norms = self._l2_norm_batch(grad1)
grad2_norms = self._l2_norm_batch(grad2)
grad1_normalized = grad1 / grad1_norms[:, None, None, None]
grad2_normalized = grad2 / grad2_norms[:, None, None, None]
cos = torch.sum(grad1_normalized * grad2_normalized, (1, 2, 3))
loss_gradalign = torch.tensor([0]).to(self.device)
if len(cos) > 0:
cos_aggr = cos.mean()
loss_gradalign = (1.0 - cos_aggr)
# Calculate loss_ce
logits_adv = self.model(X_adv)
loss_ce_adv = nn.CrossEntropyLoss()(logits_adv, Y)
cost = loss_ce_adv + self.grad_align_cos_lambda * loss_gradalign
self.optimizer.zero_grad()
cost.backward()
self.optimizer.step()
return cost.item(), loss_ce_adv.item(), loss_gradalign.item()
def _l2_norm_batch(self, v):
norms = (v ** 2).sum([1, 2, 3]) ** 0.5
# norms[norms == 0] = np.inf
return norms
|
[
"24K.Harry@gmail.com"
] |
24K.Harry@gmail.com
|
7ccf523ca4f20e6de072af26193bf1213af1a9df
|
cfabf705d4316d05a1177536f9bf8f0b27f48460
|
/arff2parf.py
|
44ee0869b08ed48ea6f7b6b163fac567e298169b
|
[] |
no_license
|
insilico/converters
|
e0ee87c68df0dee9974df3e96b6346cb391cbad0
|
d7333514102cbf1e17e134eb72a877fd9593a767
|
refs/heads/master
| 2020-06-05T02:24:15.024883 | 2011-02-24T16:36:52 | 2011-02-24T16:36:52 | 1,407,302 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 445 |
py
|
#!/usr/bin/env python
import sys
import csv
import re
def splitrow(row):
return [row[340 * n : 340 * (n + 1)] for n in range(len(row) / 340 + 1)]
input = open( sys.argv[1], 'r')
output = open( sys.argv[2], 'w')
data = re.compile("^@data$", re.I)
for line in input:
output.write(line)
if data.match(line):
break
tab = csv.reader(input)
for row in tab:
rows = splitrow(row)
output.write('\n'.join([','.join(r) for r in rows]) + "\n")
|
[
"effigies@gmail.com"
] |
effigies@gmail.com
|
3ce16f7e2a49813c44454a7ef78cb8393ec0ea0c
|
7ad29229d9677d65ac309afd0e041483f612297b
|
/2nd_homework/E. Чемпионат по метанию коровьих лепешек/main.py
|
e06e0e14d47793b668e674a27f3168e189d3f547
|
[] |
no_license
|
bigbob004/yandex-algos-training
|
13fb68e99cf9590970c3ccfc517fd382a702bcef
|
f1e91550b1e899556cea73c004e9cd0ad68df280
|
refs/heads/main
| 2023-08-23T13:57:45.270762 | 2021-10-06T09:26:45 | 2021-10-06T09:26:45 | 413,980,075 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,195 |
py
|
#1. Собрать позиции победителей в один список
#2. Собрать позиции "пятёрок" в один список
#3. Найти ту самую "пятёрку"
#4. Найти кол-во эл-ов, которые больше нашей "пятёрки"
N = int(input())
def search_pos_of_winner(lst):
winner_pos = 0
for i in range(1, N):
if lst[i] > lst[winner_pos]:
winner_pos = i
return winner_pos
def is_last_digit_five(number):
return number % 10 == 5
def is_neighboring_number_less(neighbor, number):
if neighbor < number:
return True
return False
def pos_of_Vasya(lst, winner_pos):
answer = -1
for i in range(0, N - 1):
if is_last_digit_five(lst[i]) and is_neighboring_number_less(lst[i + 1], lst[i]):
if winner_pos < i and (answer == -1 or lst[i] > lst[answer]):
answer = i
return answer
answer = 0
lst = list(map(int, input().split()))
win_pos = search_pos_of_winner(lst)
Vasya = pos_of_Vasya(lst, win_pos)
if Vasya != -1:
for item in lst:
if item > lst[Vasya]:
answer += 1
answer += 1
print(answer)
|
[
"bigbobi004@gmail.com"
] |
bigbobi004@gmail.com
|
042e1d38d801465d0ca7ae7a6feda110a7e5825c
|
5cea76d53779d466f19a5cf0b51e003586cc4a7b
|
/python开发技术详解/源文件/02/2.4/2.4.1/number_type.py
|
12972ea330682a3ae610b87a14be45e5770f2447
|
[] |
no_license
|
evan886/python
|
40152fdb4885876189580141abe27a983d04e04d
|
d33e996e93275f6b347ecc2d30f8efe05accd10c
|
refs/heads/master
| 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 |
JavaScript
|
GB18030
|
Python
| false | false | 326 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 下面的两个i并不是同一个对象
i = 1
print id(i)
i = 2
print id(i)
# 整型
i = 1
print type(i)
# 长整型
l = 9999999990
print type(l)
# 浮点型
f = 1.2
print type(f)
# 布尔型
b = True
print type(b)
# 复数类型
c = 7 + 8j
print type(c)
|
[
"evan886@gmail.com"
] |
evan886@gmail.com
|
bd9dd0ef76d0b1c41df9777a8d25bc8965333481
|
46da9e9446f3c540d10502f7e788e48796c95f76
|
/csvData.py
|
74ab379ab72a1682147ba98d39a6ca9cfc8af046
|
[] |
no_license
|
pscigala/csv-viewer
|
c38cdbe94e70b07939868c18b84670c3fa4488c7
|
462d68c0a99cadfa1802501d6143e47880e574db
|
refs/heads/master
| 2020-07-22T10:26:17.602485 | 2019-09-08T20:37:08 | 2019-09-08T20:37:08 | 207,167,909 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,256 |
py
|
import csv
import unittest
class CSVData(object):
def __init__(self, filepath, encoding='utf8', delimiter=';', quotechar='"') -> None:
self.filepath = filepath
self.encoding = encoding
self.delimiter = delimiter
self.quotechar = quotechar
self.rows = []
lines = CSVData.read_file_lines(self.filepath, self.encoding)
reader = csv.reader(lines, delimiter=self.delimiter, quotechar=self.quotechar)
for i, row in enumerate(reader):
row = list(map(lambda r: r.strip(), row))
if i == 0:
self.header = row
else:
self.rows.append(row)
@staticmethod
def read_file_lines(filepath, encoding):
file = open(filepath, encoding=encoding)
lines = file.readlines()
file.close()
return lines
def get_column(self, header_name: str):
index = self.header.index(header_name)
cols = []
for row in self.rows:
cols.append(row[index])
return cols
def get_column_by_index(self, header_index: int):
cols = []
for row in self.rows:
cols.append(row[header_index])
return cols
def pretty_print(self):
longest = len(max(self.header, key=len))
for row in self.rows:
l = len(max(row, key=len))
if longest < l:
longest = l
max_line = (longest + 3) * (len(self.header)) + 1
pretty = "".rjust(max_line, '-')
pretty += '\n'
pretty += '| '
pretty += '| '.join(list(map(lambda s: str(s).ljust(longest + 1), self.header)))
pretty += '| '
pretty += '\n'
pretty += "".rjust(max_line, '-')
pretty += '\n'
for row in self.rows:
pretty += '| '
pretty += '| '.join(list(map(lambda s: str(s).ljust(longest + 1), row)))
pretty += '| '
pretty += '\n'
pretty += "".rjust(max_line, '-')
pretty += '\n'
return pretty
class CSVDataTest(unittest.TestCase):
TEST_FILE = 'testfile.csv'
ENCODING = 'utf-8'
def test_csv_parse_header(self):
data = CSVData(filepath=self.TEST_FILE, encoding=self.ENCODING)
header = data.header
self.assertEqual('1', header[0])
self.assertEqual('2', header[1])
self.assertEqual('3', header[2])
self.assertEqual('4', header[3])
def test_csv_parse_rows(self):
data = CSVData(filepath=self.TEST_FILE, encoding=self.ENCODING)
rows = data.rows
self.assertEqual('row1-col1', rows[0][0])
self.assertEqual('row1-col2', rows[0][1])
self.assertEqual('row1-col3', rows[0][2])
self.assertEqual('row1-col4', rows[0][3])
self.assertEqual('row4-col1', rows[3][0])
self.assertEqual('row4-col2', rows[3][1])
self.assertEqual('row4-col3', rows[3][2])
self.assertEqual('row4-col4', rows[3][3])
def test_csv_column_by_header_name(self):
data = CSVData(filepath=self.TEST_FILE, encoding=self.ENCODING)
column1 = data.get_column('1')
column4 = data.get_column('4')
self.assertEqual('row1-col1', column1[0])
self.assertEqual('row2-col1', column1[1])
self.assertEqual('row3-col1', column1[2])
self.assertEqual('row4-col1', column1[3])
self.assertEqual('row1-col4', column4[0])
self.assertEqual('row2-col4', column4[1])
self.assertEqual('row3-col4', column4[2])
self.assertEqual('row4-col4', column4[3])
def test_csv_column_by_header_index(self):
data = CSVData(filepath=self.TEST_FILE, encoding=self.ENCODING)
column1 = data.get_column_by_index(0)
column4 = data.get_column_by_index(3)
self.assertEqual('row1-col1', column1[0])
self.assertEqual('row2-col1', column1[1])
self.assertEqual('row3-col1', column1[2])
self.assertEqual('row4-col1', column1[3])
self.assertEqual('row1-col4', column4[0])
self.assertEqual('row2-col4', column4[1])
self.assertEqual('row3-col4', column4[2])
self.assertEqual('row4-col4', column4[3])
if __name__ == '__main__':
unittest.main()
|
[
"przemyslaw.scigala@gmail.com"
] |
przemyslaw.scigala@gmail.com
|
ea10017a3a78b171bcbeb31c3805a6864c6ecf09
|
290128c39d4519ffde2df454018a025363b5deda
|
/Assignment5/sub.py
|
cbc9fa1f6418863ed5661df3640b6a971dafeab8
|
[] |
no_license
|
Shikhar-S/DataAnalytics
|
bf68c49724c2d4669cadefae99e54eea0dd6b240
|
65bc144a033925cf4057fff8fd1cf9a8d4af2e12
|
refs/heads/master
| 2020-07-09T15:06:51.498478 | 2019-12-19T16:13:40 | 2019-12-19T16:13:40 | 204,000,166 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,789 |
py
|
import pandas as pd
df=pd.read_csv('../data/Raw_Data_GeneSpring.csv', sep='\t')
def get_value(category,idx):
#category def
#1 male non smoker
#2 male smoker
#3 women non smoker
#4 women smoker
columns=[]
if category==1:
columns=df.columns[1:13]
elif category==2:
columns=df.columns[13:25]
elif category==3:
columns=df.columns[25:37]
elif category==4:
columns=df.columns[37:49]
return df.iloc[idx][columns].values
import numpy as np
import scipy.stats as stats
def setup_models():
A=np.zeros((48,4))
for i in range(4):
for j in range(12):
A[j+(i*12),i]=1
A_null=np.zeros((48,4))
for i in range(4):
for j in range(12):
if i==0:
A_null[j+(i*12),0]=1
A_null[j+(i*12),1]=1
elif i==1:
A_null[j+(i*12),0]=1
A_null[j+(i*12),2]=1
elif i==2:
A_null[j+(i*12),3]=1
A_null[j+(i*12),1]=1
elif i==3:
A_null[j+(i*12),3]=1
A_null[j+(i*12),2]=1
return (A,A_null)
def setup_1way_model():
B=np.zeros((24,2))
for i in range(2):
for j in range(12):
B[j+i*12,i]=1
B_null=np.zeros((24,1))+1
return (B,B_null)
def get_sum_sq(A,h):
k=h.shape[0]
P=np.matmul(np.transpose(A),A)
S=np.linalg.pinv(P)
Inner=np.eye(k)-np.matmul(np.matmul(A,S),np.transpose(A))
sum_sq=np.matmul(np.matmul(np.transpose(h),Inner),h)
return sum_sq
def generate_p_value(h,A,A_null):
denom=get_sum_sq(A,h)
if denom<1e-8:
return 1
A_rank=np.linalg.matrix_rank(A)
A_null_rank=np.linalg.matrix_rank(A_null)
F=((get_sum_sq(A_null,h)/denom)-1)*((h.shape[0]-A_rank)/(A_rank-A_null_rank)) #(48-4/4-3)
p=1-stats.f.cdf(F,A_rank-A_null_rank,h.shape[0]-A_rank)
return p
def compute_gene_type(i):
#1- down in women
#2- up in women
#3- down in men
#4- up in men
data=[]
for category in range(1,5):
data.append(get_value(category,i))
means=[np.mean(x) for x in data]
delta_men=means[0]-means[1]
delta_women=means[2]-means[3]
ans=[-1,-1] #-1 denotes no list
if significant_difference(i,male=True):
ans[0]=2 if delta_men>0 else 3
if significant_difference(i,male=False):
ans[1]=0 if delta_women>0 else 1
return ans
def significant_difference(i,male=True):
B,B_null=setup_1way_model()
h=None
if male:
h=df.iloc[i][df.columns[1:25]].values
else:
h=df.iloc[i][df.columns[25:49]].values
p_value_anova=generate_p_value(h,B,B_null)
if p_value_anova<0.05:
return True
print('Computing p values, please wait.')
P=[]
A,A_null=setup_models()
for idx,row in df.iterrows():
h=row[df.columns[1:49]].values
if idx%10000==0:
print('On row,',idx)
P.append(generate_p_value(h,A,A_null))
import matplotlib.pyplot as plt
plt.hist(P, bins = 20)
plt.show()
print('Since histogram is clustered towards 1, no better estimate for n_0 is justifiable than n.')
shortlisted_genes=[]
for i,p in enumerate(P):
if(p<0.05): #as instructed on forum
shortlisted_genes.append(i)
print('shortlisted_genes count:',len(shortlisted_genes))
DNA_REPAIR=["ABL1","ALKBH1","APEX1","APTX","ASF1A","ATM","ATP23","ATR","ATRX","ATXN3","BLM","BRCA1","BRCA2","BTG2","CCNO","CDKN2D","CEBPG","CIB1","CSNK1D","CSNK1E","DDB1","DDB2","ERCC1","ERCC2","ERCC3","ERCC4","ERCC5","ERCC6","ERCC8","EXO1","FANCA","FANCC","FANCG","FEN1","GADD45A","GADD45G","GTF2H1","GTF2H4","HMGB1","HMGB1P10","HMGB2","HUS1","IGHMBP2","KAT5","LIG1","LIG3","LIG4","MLH1","MMS19","MNAT1","MPG","MRE11","MSH2","MSH3","MSH5","MSH6","MUTYH","NBN","NHEJ1","NTHL1","OGG1","PARP1","PARP3","PMS1","PMS2","PMS2P1","PNKP","POLA1","POLD1","POLE","POLE2","POLG","POLH","POLI","POLL","POLQ","PRKCG","RAD1","RAD17","RAD21","RAD23A","RAD23B","RAD50","RAD51","RAD51B","RAD51C","RAD52","RAD54B","RAD54L","RAD9A","RBBP8","RECQL","RECQL4","RECQL5","REV1","RFC3","RPA1","RPAIN","RUVBL2","SETX","SMC1A","SMUG1","SOD1","SUMO1","TDG","TNP1","TP53","TP73","TREX2","UBE2A","UBE2B","UBE2N","UBE2V1","UBE2V2","UNG","UPF1","UVRAG","VCP","WRNIP1","XAB2","XPC","XRCC2","XRCC3","XRCC4","XRCC6"]
FREE_RADICAL=["ADPRHL2","APOA4","ATP7A","BMP7","CCS","CD36","DHFR","DHFRP1","ERCC6","FANCC","FBLN5","GCH1","GLRX2","MIR21","MPO","MT3","NFE2L2","NOS3","NQO1","PARK7","PRDX1","PRDX2","RGN","SOD1","SOD2","SOD3","SZT2","TNF","TXNRD2","UCP2","UCP3"]
CYTOTOXICITY=["ARAF","BID","BRAF","CASP3","CD244","CD247","CD48","CHP1","CHP2","CSF2","FAS","FASLG","FCER1G","FCGR3A","FCGR3B","FYN","GRB2","GZMB","HCST","HLA-A","HLA-B","HLA-C","HLA-E","HLA-G","HRAS","ICAM1","ICAM2","IFNA1","IFNA10","IFNA13","IFNA14","IFNA16","IFNA17","IFNA2","IFNA21","IFNA4","IFNA5","IFNA6","IFNA7","IFNA8","IFNAR1","IFNAR2","IFNB1","IFNG","IFNGR1","IFNGR2","ITGAL","ITGB2","KIR2DL1","KIR2DL2","KIR2DL3","KIR2DL4","KIR2DL5A","KIR2DS1","KIR2DS3","KIR2DS4","KIR2DS5","KIR3DL1","KIR3DL2","KLRC1","KLRC2","KLRC3","KLRD1","KLRK1","KRAS","LAT","LCK","LCP2","MAP2K1","MAP2K2","MAPK1","MAPK3","MICA","MICB","NCR1","NCR2","NCR3","NFAT5","NFATC1","NFATC2","NFATC3","NFATC4","NRAS","PAK1","PIK3CA","PIK3CB","PIK3CD","PIK3CG","PIK3R1","PIK3R2","PIK3R3","PIK3R5","PLCG1","PLCG2","PPP3CA","PPP3CB","PPP3CC","PPP3R1","PPP3R2","PRF1","PRKCA","PRKCB","PRKCG","PTK2B","PTPN11","PTPN6","RAC1","RAC2","RAC3","RAET1E","RAET1G","RAET1L","RAF1","SH2D1A","SH2D1B","SH3BP2","SHC1","SHC2","SHC3","SHC4","SOS1","SOS2","SYK","TNF","TNFRSF10A","TNFRSF10B","TNFRSF10C","TNFRSF10D","TNFSF10","TYROBP","ULBP1","ULBP2","ULBP3","VAV1","VAV2","VAV3","ZAP70"]
XENOBIOTIC=["AADAC","ACAA1","ACSL1","ACSM1","ACSM2B","ACY1","ACY3","AHR","AHRR","AIP","AKR1C1","AKR7A2","AKR7A3","AKR7L","ALDH3A1","AOC1","AOC2","AOC3","ARNT","ARNT2","AS3MT","BCHE","BPHL","CBR3","CES1","CES2","CES3","CMBL","CRYZ","CYB5B","CYB5R3","CYP1A1","CYP1A2","CYP1B1","CYP26A1","CYP26B1","CYP2A13","CYP2A6","CYP2A7","CYP2B6","CYP2C18","CYP2C19","CYP2C8","CYP2C9","CYP2D6","CYP2D7","CYP2E1","CYP2F1","CYP2G1P","CYP2J2","CYP2R1","CYP2S1","CYP2U1","CYP2W1","CYP3A4","CYP3A5","CYP3A7","CYP46A1","DPEP1","EPHX1","EPHX2","FMO1","FMO2","FMO3","GGT1","GLYAT","GRIN1","GSTA4","GSTM1","GSTM2","GSTM3","GSTM4","GSTO1","GSTO2","GSTP1","HNF4A","HSP90AB1","LPO","MARC1","MARC2","MGST1","MGST2","MGST3","N6AMT1","NAT1","NAT2","NQO1","NQO2","NR1I2","PON3","POR","PTGES3","PTGS1","RORA","RORC","S100A12","STAR","SULT1A1","SULT1A2","SULT1A3","SULT1B1","UGT1A1","UGT1A10","UGT1A3","UGT1A4","UGT1A5","UGT1A6","UGT1A7","UGT1A8","UGT1A9","UGT2B11","UGT2B15","UGT2B28"]
dna_repair=[0 for i in range(4)]
free_radical=[0 for i in range(4)]
cytotoxicity=[0 for i in range(4)]
xenobiotic=[0 for i in range(4)]
for i in shortlisted_genes:
Gene=df.iloc[i]['GeneSymbol']
z=compute_gene_type(i)
flag=False
if Gene in DNA_REPAIR:
if z[0]!=-1:
dna_repair[z[0]]+=1
if z[1]!=-1:
dna_repair[z[1]]+=1
flag=True
if Gene in FREE_RADICAL:
if z[0]!=-1:
free_radical[z[0]]+=1
if z[1]!=-1:
free_radical[z[1]]+=1
flag=True
if Gene in CYTOTOXICITY:
if z[0]!=-1:
cytotoxicity[z[0]]+=1
if z[1]!=-1:
cytotoxicity[z[1]]+=1
flag=True
if Gene in XENOBIOTIC:
if z[0]!=-1:
xenobiotic[z[0]]+=1
if z[1]!=-1:
xenobiotic[z[1]]+=1
flag=True
# if flag:
# print(z)
print('Intersection Counts- Down in women, Up in women, Down in men, Up in women')
print("DNA Repair",dna_repair)
print("Free radical response",free_radical)
print("Natural killer cell cytotoxicity",cytotoxicity)
print("Xenobiotic Metabolism",xenobiotic)
|
[
"shikhar.coder@gmail.com"
] |
shikhar.coder@gmail.com
|
8326caf28faa6d4e215952aa5353afffbf149778
|
acc2aeef64115eb4561f74b305ca74e54316c265
|
/21个项目玩转深度学习/chapter1 MNIST机器学习入门/softmax_regression.py
|
a888cb55efadc594012d0289ee5d2484c4eae019
|
[] |
no_license
|
wbqjyjy/Deep-Learning-Program
|
c02a8bc36016f225a890bd419738a4c191541dc1
|
2802462d98f362e7c414fe13247cb5561c69ad24
|
refs/heads/master
| 2020-04-08T14:46:03.134962 | 2019-05-12T16:44:12 | 2019-05-12T16:44:12 | 159,450,926 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 888 |
py
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
x=tf.placeholder(tf.float32,[None,784])
y_=tf.placeholder(tf.float32,[None,10])
W=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
y=tf.nn.softmax(tf.matmul(x,W)+b)
cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y)))
train_step=tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
sess=tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs,batch_ys = mnist.train.next_batch(100)
sess.run(train_step,feed_dict ={x:batch_xs,y_:batch_ys})
cerrect_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels}))
|
[
"noreply@github.com"
] |
wbqjyjy.noreply@github.com
|
a8798dca19c7e26be97a3647cdfc35d8dce39efe
|
9af364af59511b0df435b8914b226d4f5eebe4fa
|
/source/data/ChunkAdaptor.py
|
b1979e548971b19871667d771a1933e445fe91f2
|
[] |
no_license
|
sudnya/bert-word-embeddings
|
e7ed3b09bc4db1a9ff92784cac2856f74a2ef5f7
|
09f2d913f6b32101a43c1da84adfe47205824f6d
|
refs/heads/master
| 2022-12-04T18:19:32.495031 | 2019-08-03T07:09:34 | 2019-08-03T07:09:34 | 170,256,409 | 0 | 0 | null | 2022-09-23T22:21:45 | 2019-02-12T05:14:49 |
Python
|
UTF-8
|
Python
| false | false | 915 |
py
|
import logging
logger = logging.getLogger(__name__)
class ChunkAdaptor:
def __init__(self, config, source):
self.config = config
self.source = source
def next(self):
chunk = [self.source.next() for i in range(self.getChunkSize())]
logger.debug("Produced chunk of tokens: " + str(chunk))
return chunk
def getChunkSize(self):
if not "size" in self.config["adaptor"]["chunking"]:
return 16
return int(self.config["adaptor"]["chunking"]["size"])
def size(self):
return self.source.size() // self.getChunkSize()
def reset(self):
self.source.reset()
def setMaximumSize(self, size):
self.source.setMaximumSize(size * self.getChunkSize())
def shuffleDocuments(self):
self.source.shuffleDocuments()
def clone(self):
return ChunkAdaptor(self.config, self.source.clone())
|
[
"solusstultus@gmail.com"
] |
solusstultus@gmail.com
|
f04777412a8523157317d3eac4f93709fc5b3593
|
1da23d3bc4a7e21d81fe26c6b9f2b7f50711239b
|
/server/rating/calculation/online.py
|
54cb691486cf77569c23edf725df62292f77533f
|
[
"MIT"
] |
permissive
|
eIGato/mahjong-portal
|
42dc62d3f98656ba15c02c3060f351f03ac3304a
|
550a2a872c4287adab6ce30c3440dc2141430a20
|
refs/heads/master
| 2021-07-10T01:52:35.089662 | 2020-10-21T11:45:40 | 2020-10-21T11:45:40 | 212,129,601 | 0 | 0 |
MIT
| 2019-10-01T15:19:36 | 2019-10-01T15:19:36 | null |
UTF-8
|
Python
| false | false | 573 |
py
|
from player.models import Player
from rating.calculation.rr import RatingRRCalculation
from tournament.models import Tournament, TournamentResult
class RatingOnlineCalculation(RatingRRCalculation):
TOURNAMENT_TYPES = [Tournament.ONLINE]
SECOND_PART_MIN_TOURNAMENTS = 3
def get_players(self):
player_ids = TournamentResult.objects.filter(tournament__tournament_type=Tournament.ONLINE).values_list(
"player_id", flat=True
)
return Player.objects.filter(id__in=player_ids).exclude(is_replacement=True).exclude(is_hide=True)
|
[
"lisikhin@gmail.com"
] |
lisikhin@gmail.com
|
1a02a63eff7c3ef26a010da92039d124a9140a6d
|
34afc32346a49730bd5216ad394df10939d051f9
|
/no121.py
|
9fe21e90c2d13f32a4b5016507b26ca178e7c11e
|
[] |
no_license
|
hyoseok-bang/leetcode
|
471137703f571d3371cde32f0adfa3733faa89be
|
be12a4ad66d7c7ca8558ad74d04d0d7c6acc9846
|
refs/heads/main
| 2023-07-03T22:11:09.187203 | 2021-08-09T20:49:41 | 2021-08-09T20:49:41 | 372,629,488 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
profit = 0
min_price = sys.maxsize
for p in prices:
min_price = min(min_price, p)
profit = max(profit, p - min_price)
return profit
|
[
"noreply@github.com"
] |
hyoseok-bang.noreply@github.com
|
766acc5663cd498b1b0e9bc3c0a1d75f176b8b8b
|
83003007b7bc12493e2bca2b5c78be5ea86df56c
|
/Day56-Day70/Day60/rabbit.py
|
df44054acbf7a81a072a6cb377f8dbb2ea4dd6e6
|
[] |
no_license
|
a6361117/code
|
fa7fe2f33c522ad38d92e6c429b50ef8a271bb1e
|
bd8bf877416acc5400dbda90212b7e83020ff643
|
refs/heads/master
| 2022-09-07T22:22:24.765271 | 2020-05-26T14:27:47 | 2020-05-26T14:27:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,264 |
py
|
#绘制兔
from turtle import *
speed(10)
#兔的面部
color('pink')
pensize(5)
circle(radius=100)#脸
#眼睛
pencolor('black')
#左眼
pu()
goto(-45,92)
pd()
begin_fill()
color((0,0,0),(0,0,0.1))
circle(radius=15)
#右眼
pu()
goto(45,92)
pd()
circle(radius=15)
end_fill()
#鼻子
pu()
goto(20,60)
color('pink')
pd()
begin_fill()
goto(-20,60)
goto(0,45)
goto(20,60)
end_fill()
#嘴
goto(0,45)
goto(0,40)
seth(-90)
circle(10,120)
pu()
goto(0,40)
seth(-90)
pd()
circle(-10,120)
#小兔的耳朵
#左耳
pu()
goto(-60,180)#
seth(200)
pd()
circle(radius=350,extent=90)
goto(-98,110)
#右耳
pu()
goto(60,180)#
seth(-20)
pd()
circle(radius=-350,extent=90)
goto(98,110)
#小兔的身体
pu()
goto(20,3)
seth(-25)
pd()
circle(radius=-250,extent=25)
circle(radius=-135,extent=260)
seth(50)
circle(radius=-250,extent=25)
##小兔的胳膊
#左臂
pu()
seth(180)
goto(-30,-3)
pd()
#小短胳膊
##circle(radius=270,extent=20)
##circle(radius=20,extent=190)
circle(radius=248,extent=30)
circle(radius=29,extent=185)
#右臂
pu()
seth(0)
goto(30,-3)
pd()
circle(radius=-248,extent=30)
circle(radius=-27,extent=184)
##小兔的脚
##左脚
pu()
goto(-162,-260)#
pd()
seth(0)
circle(radius=41)
#右脚
pu()
goto(164,-260)
pd()
circle(radius=41)
done()
|
[
"46365521+Becky-nuo@users.noreply.github.com"
] |
46365521+Becky-nuo@users.noreply.github.com
|
0d42df14da57ccb906ed88596dbb60332f4b0f41
|
c99fcf98cd8e0ecf65794c452847c7329c51fc5c
|
/calender/migrations/0003_auto_20170130_1347.py
|
8dfaeb8305e484b6390dd2e51ae5058f5a679d73
|
[] |
no_license
|
iitians/Productive-Calender
|
5889f30243bc8bc90646e77738dbe37c9c861e28
|
60e9f03fdc3a02179145456a4e2de255f35a0261
|
refs/heads/master
| 2022-09-18T07:45:31.315020 | 2020-06-05T22:47:32 | 2020-06-05T22:47:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 989 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-30 13:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calender', '0002_auto_20170130_1341'),
]
operations = [
migrations.RenameField(
model_name='calender',
old_name='end',
new_name='end_date',
),
migrations.RenameField(
model_name='calender',
old_name='start',
new_name='start_date',
),
migrations.AddField(
model_name='calender',
name='end_time',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='calender',
name='start_time',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
]
|
[
"anirudhgoel.delhi@gmail.com"
] |
anirudhgoel.delhi@gmail.com
|
fc3617765023ab1000296d388685479f6ba1ca6f
|
743d1918178e08d4557abed3a375c583130a0e06
|
/src/CPSCAnalysis/getCPSCRelated.py
|
e63093d367e5958dd952311a6b852f55229f43a2
|
[] |
no_license
|
aquablue1/dns_probe
|
2a027c04e0928ec818a82c5bf04f485a883cfcb3
|
edd4dff9bea04092ac76c17c6e77fab63f9f188f
|
refs/heads/master
| 2020-03-25T19:40:07.346354 | 2018-11-17T05:31:43 | 2018-11-17T05:31:43 | 144,094,014 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,508 |
py
|
"""
" Get the original CPSC related DNS traffic from original data files.
" Since CPSC DNS (ns1/2.cpsc.ucalgary.ca) mostly involved in the inbound traffic.
" Therefore only the inbound traffic is considered.
" By Zhengping on 2018-08-10
"""
from src.util.FolderReader import folderReader
from src.util.FileReader import fileReader
from src.util.FileWriter import batchFileWriter
from src.util.DNSFieldLocMap import FieldToLoc
import os
def doHourlyCPSCRelatedGen(inputFilename):
inputFile = fileReader(inputFilename)
checkedNames = ["ns1.cpsc.ucalgary.ca", "ns2.cpsc.ucalgary.ca", "mirror.cpsc.ucalgary.ca"]
ret_list = []
for line in inputFile:
queriedName = line.split("\t")[FieldToLoc["query"]]
if queriedName in checkedNames:
ret_list.append(line)
return ret_list
def doDailyCPSCRelatedGen(inputFolder, outputFolder):
filenames = folderReader(inputFolder, date)
outputHandler = batchFileWriter(outputFolder)
for filename in filenames:
outputFilename = "CPSCRow_%s" % filename.split("/")[-1]
hourlyRowData = doHourlyCPSCRelatedGen(filename)
for line in hourlyRowData:
outputHandler.writeString(outputFilename, line+"\n")
if __name__ == '__main__':
date = "2018-07-01"
inputFolder = "../../data/%s/inbound" % date
outputFolder = "../../result/CPSCRow/%s/" % date
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
doDailyCPSCRelatedGen(inputFolder, outputFolder)
|
[
"94apieceofcake@gmail.com"
] |
94apieceofcake@gmail.com
|
24614005cd41266af635b51583d8dba3e5567313
|
37fba5d10e8ddefb336abc63d855e40495ec3b56
|
/DISCOS.py
|
fa18381d76880d51ab55564c749896e1b2b7a22a
|
[] |
no_license
|
Jeysi2004/GRUPO-5
|
437ade4486f735b96c11f1b06ed5efc019ae9349
|
1895bc6616d9b017898e3e28bf8ebfdc0cc30e1f
|
refs/heads/main
| 2023-05-24T08:48:32.452892 | 2021-06-09T02:32:46 | 2021-06-09T02:32:46 | 375,191,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,584 |
py
|
precio= input("PRECIOS UNITARIOS: \n 1-Rock=63.00"
"\n 4-Salsa=56.00"
"\n 3-Pop=87.00"
"\n 5-Folclore=120.50")
marca=input("Compra(Salsa, Rock, Pop, Folclore): ")
costo=float(input("Precio Unitario: "))
cant=int(input("Cantidad de discos: "))
if cant==4:
if marca=="Salsa" or "Folclore" or "Pop" or "Rock":
xdes=costo*cant-(costo*0.06)
print("Recibe un descuento de 6%, el TOTAL a pagar es:",xdes)
elif marca== "Pop":
if cant>=6 and cant<=10:
fdes=costo*cant-(costo*0.08)
print ("Recibe un descuento de 8%, de obsequio un POSTER y el TOTAL a pagar es: ",fdes)
elif cant>10:
gdes=costo*cant-(costo*0.102)
print("Recibe un descuento de 10.2%, de obsequio un POSTER y el TOTAL a pagar es: ",gdes)
elif marca== "Rock":
if cant>=6 and cant<=10:
cdes=costo*cant-(costo*0.08)
print ("Recibe un descuento de 8%, de obsequio un POSTER y el TOTAL a pagar es: ",cdes)
elif cant>10:
ggdes=costo*cant-(costo*0.102)
print("Recibe un descuento de 10.2%, de obsequio un POSTER y el TOTAL a pagar es: ",ggdes)
elif cant>=5 and cant<10:
if marca=="Salsa" or "Folclore":
xxdes=costo*cant-(costo*0.08)
print("Recibe un descuento de 8%, el TOTAL a pagar es:",xxdes)
elif cant>10:
if marca=="Salsa" or "Folclore":
ffdes=costo*cant-(costo*0.102)
print("Recibe un descuento de 10%, el TOTAL a pagar es:",ffdes)
|
[
"noreply@github.com"
] |
Jeysi2004.noreply@github.com
|
184d5f30fcf054b45ec55b2a790f414e2ee52064
|
2f9a846f82ac0f5dba333423b4bab1f47ae7a131
|
/c4_contextmanagers/part2.py
|
7dbd365049eb2fdecc9cf019d753dc1ed8b3d109
|
[] |
no_license
|
Delia86/2PEP21G01
|
f4e2f48964ae5637683d777ff73d5948df688bab
|
d2863d31cdbc30f75e6b25b03510a0206969d82f
|
refs/heads/master
| 2023-07-29T15:33:05.538821 | 2021-09-11T15:35:31 | 2021-09-11T15:35:31 | 381,104,015 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 324 |
py
|
## Generate grahps using plotly
import matplotlib.pyplot as plt
fig1,(ay1,ay2)=plt.subplots(nrows=2,ncols=1,sharex='all')
fig1.dpi=200.0
ay1.plot([1,2,3], [5,5,5],label='test1')
ay1.legend()
ay2.plot([3,4,5],[4,4,5],label='test2')
ay2.legend()
plt.xlabel('seconds')
plt.ylabel('$')
plt.title('Money over time')
plt.show()
|
[
"deliadragos@gmailcm"
] |
deliadragos@gmailcm
|
0166c637b079f3bdf04833cf01c9e51e8c1cdb8f
|
fb51e1c19c657356ab53a1068ec9b711c9389c8f
|
/talk/meta_expl.py
|
d0bb4918327b7ebb02a2192eb0124b14a450ae31
|
[] |
no_license
|
zefciu/hic-sunt-pythones
|
c6fa4be6155c3b1576fb5267cef4e81a00489a5a
|
504d4fdb331e6c59dfa432d2445275b18609e894
|
refs/heads/master
| 2021-01-10T19:53:53.644911 | 2011-09-24T08:33:50 | 2011-09-24T08:33:50 | 2,051,327 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 181 |
py
|
class NazwaKlasy(Bazowa1, Bazowa2):
atr1 = 'wartosc1'
atr2 = 'wartosc2'
NazwaKlasy = type('NazwaKlasy', (Bazowa1, Bazowa2), {
'atr1': 'wartosc1', 'atr1': 'wartosc2'
})
|
[
"zefciu@gmail.com"
] |
zefciu@gmail.com
|
134557e0e1e1eb54fe528b9a86944b36b65cba5f
|
baf1542b14ab4aa96bc328adef255435d031984d
|
/torchfcn/trainer.py
|
64aaa191f7821db2ee71ff956538be908f10f01e
|
[] |
no_license
|
YifanPTAH/GaitRecFooler
|
ec438ac3e0c903d0f5a534a6c3838f6ba220b875
|
e3839693efce759d9fe66f9dabb8eda2868476cf
|
refs/heads/master
| 2020-06-24T01:00:03.636362 | 2019-08-20T06:59:04 | 2019-08-20T06:59:04 | 198,800,940 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,088 |
py
|
import datetime
from distutils.version import LooseVersion
import math
import os
import os.path as osp
import shutil
import fcn
import numpy as np
import pytz
import scipy.misc
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import tqdm
import torchfcn
def cross_entropy2d(input, target, weight=None, size_average=True):
# input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# log_p: (n, c, h, w)
if LooseVersion(torch.__version__) < LooseVersion('0.3'):
# ==0.2.X
log_p = F.log_softmax(input)
else:
# >=0.3
log_p = F.log_softmax(input, dim=1)
# log_p: (n*h*w, c)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
log_p = log_p.view(-1, c)
# target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, reduction='sum')
if size_average:
loss /= mask.data.sum()
return loss
class Trainer(object):
def __init__(self, cuda, model, optimizer,
train_loader, val_loader, out, max_iter,
size_average=False, interval_validate=None):
self.cuda = cuda
self.model = model
self.optim = optimizer
self.train_loader = train_loader
self.val_loader = val_loader
self.timestamp_start = \
datetime.datetime.now(pytz.timezone('Asia/Tokyo'))
self.size_average = size_average
if interval_validate is None:
self.interval_validate = len(self.train_loader)
else:
self.interval_validate = interval_validate
self.out = out
if not osp.exists(self.out):
os.makedirs(self.out)
self.log_headers = [
'epoch',
'iteration',
'train/loss',
'train/acc',
'train/acc_cls',
'train/mean_iu',
'train/fwavacc',
'valid/loss',
'valid/acc',
'valid/acc_cls',
'valid/mean_iu',
'valid/fwavacc',
'elapsed_time',
]
if not osp.exists(osp.join(self.out, 'log.csv')):
with open(osp.join(self.out, 'log.csv'), 'w') as f:
f.write(','.join(self.log_headers) + '\n')
self.epoch = 0
self.iteration = 0
self.max_iter = max_iter
self.best_mean_iu = 0
def validate(self):
training = self.model.training
self.model.eval()
n_class = len(self.val_loader.dataset.class_names)
val_loss = 0
visualizations = []
label_trues, label_preds = [], []
for batch_idx, (data, target) in tqdm.tqdm(
enumerate(self.val_loader), total=len(self.val_loader),
desc='Valid iteration=%d' % self.iteration, ncols=80,
leave=False):
if self.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
with torch.no_grad():
score = self.model(data)
loss = cross_entropy2d(score, target,
size_average=self.size_average)
loss_data = loss.data.item()
if np.isnan(loss_data):
raise ValueError('loss is nan while validating')
val_loss += loss_data / len(data)
imgs = data.data.cpu()
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
lbl_true = target.data.cpu()
for img, lt, lp in zip(imgs, lbl_true, lbl_pred):
img, lt = self.val_loader.dataset.untransform(img, lt)
label_trues.append(lt)
label_preds.append(lp)
if len(visualizations) < 9:
viz = fcn.utils.visualize_segmentation(
lbl_pred=lp, lbl_true=lt, img=img, n_class=n_class)
visualizations.append(viz)
metrics = torchfcn.utils.label_accuracy_score(
label_trues, label_preds, n_class)
out = osp.join(self.out, 'visualization_viz')
if not osp.exists(out):
os.makedirs(out)
out_file = osp.join(out, 'iter%012d.jpg' % self.iteration)
scipy.misc.imsave(out_file, fcn.utils.get_tile_image(visualizations))
val_loss /= len(self.val_loader)
with open(osp.join(self.out, 'log.csv'), 'a') as f:
elapsed_time = (
datetime.datetime.now(pytz.timezone('Asia/Tokyo')) -
self.timestamp_start).total_seconds()
log = [self.epoch, self.iteration] + [''] * 5 + \
[val_loss] + list(metrics) + [elapsed_time]
log = map(str, log)
f.write(','.join(log) + '\n')
mean_iu = metrics[2]
is_best = mean_iu > self.best_mean_iu
if is_best:
self.best_mean_iu = mean_iu
torch.save({
'epoch': self.epoch,
'iteration': self.iteration,
'arch': self.model.__class__.__name__,
'optim_state_dict': self.optim.state_dict(),
'model_state_dict': self.model.state_dict(),
'best_mean_iu': self.best_mean_iu,
}, osp.join(self.out, 'checkpoint.pth.tar'))
if is_best:
shutil.copy(osp.join(self.out, 'checkpoint.pth.tar'),
osp.join(self.out, 'model_best.pth.tar'))
if training:
self.model.train()
def train_epoch(self):
self.model.train()
n_class = len(self.train_loader.dataset.class_names)
for batch_idx, (data, target) in tqdm.tqdm(
enumerate(self.train_loader), total=len(self.train_loader),
desc='Train epoch=%d' % self.epoch, ncols=80, leave=False):
iteration = batch_idx + self.epoch * len(self.train_loader)
if self.iteration != 0 and (iteration - 1) != self.iteration:
continue # for resuming
self.iteration = iteration
if self.iteration % self.interval_validate == 0:
self.validate()
assert self.model.training
if self.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
self.optim.zero_grad()
score = self.model(data)
loss = cross_entropy2d(score, target,
size_average=self.size_average)
loss /= len(data)
loss_data = loss.data.item()
if np.isnan(loss_data):
raise ValueError('loss is nan while training')
loss.backward()
self.optim.step()
metrics = []
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
lbl_true = target.data.cpu().numpy()
acc, acc_cls, mean_iu, fwavacc = \
torchfcn.utils.label_accuracy_score(
lbl_true, lbl_pred, n_class=n_class)
metrics.append((acc, acc_cls, mean_iu, fwavacc))
metrics = np.mean(metrics, axis=0)
with open(osp.join(self.out, 'log.csv'), 'a') as f:
elapsed_time = (
datetime.datetime.now(pytz.timezone('Asia/Tokyo')) -
self.timestamp_start).total_seconds()
log = [self.epoch, self.iteration] + [loss_data] + \
metrics.tolist() + [''] * 5 + [elapsed_time]
log = map(str, log)
f.write(','.join(log) + '\n')
if self.iteration >= self.max_iter:
break
def train(self):
max_epoch = int(math.ceil(1. * self.max_iter / len(self.train_loader)))
for epoch in tqdm.trange(self.epoch, max_epoch,
desc='Train', ncols=80):
self.epoch = epoch
self.train_epoch()
if self.iteration >= self.max_iter:
break
|
[
"48731983+YifanPTAH@users.noreply.github.com"
] |
48731983+YifanPTAH@users.noreply.github.com
|
4545436b5a1a523425b7532ee6097dcbd00bd541
|
00cc3fb459545493c28c8ffe4d5fb91868bf2065
|
/boot.py
|
29cd480b9a5b38118b8f6079c974f17a5dfcc7df
|
[
"MIT"
] |
permissive
|
chrigu/lopy-temp
|
22a2d7ff5e15a85f3768a5d98c9956e5f4822273
|
3d85fa19600a86a32beb183101c8f3fc754431f7
|
refs/heads/master
| 2021-09-08T00:50:52.332154 | 2018-03-04T18:25:51 | 2018-03-04T18:25:51 | 113,616,696 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 381 |
py
|
# boot.py -- run on boot-up
import os
from machine import UART, reset_cause, SOFT_RESET
from pycom import heartbeat, rgbled
from utime import sleep
from wlan import setup_wifi
uart = UART(0, 115200)
os.dupterm(uart)
heartbeat(False)
rgbled(0x0000aa)
sleep(3)
rgbled(0x000000)
sleep(1)
rgbled(0x0000aa)
sleep(3)
rgbled(0x000000)
if reset_cause() != SOFT_RESET:
setup_wifi()
|
[
"noreply@github.com"
] |
chrigu.noreply@github.com
|
c15ff70830104dc267e24f059b88cd1002f1879d
|
ecae7275fd43ec93ca5771083e05ae864685faf9
|
/DataScience/pandas/2column1.py
|
eb1bc2f91c3de96c00fb9272b9179e11d6d5d730
|
[] |
no_license
|
shamoldas/pythonBasic
|
104ca8d50099c2f511802db1f161f6d050f879cc
|
3a7252a15f6d829f55700ec2ff7f7d153f3ec663
|
refs/heads/main
| 2023-01-09T06:38:55.357476 | 2020-11-11T12:27:31 | 2020-11-11T12:27:31 | 311,960,017 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 314 |
py
|
# importing pandas
import pandas as pd
df = pd.DataFrame({'Last': ['Gaitonde', 'Singh', 'Mathur'],
'First': ['Ganesh', 'Sartaj', 'Anjali']})
print('Before Join')
print(df, '\n')
print('After join')
df['Name'] = df['First'].str.cat(df['Last'], sep =" ")
print(df)
|
[
"noreply@github.com"
] |
shamoldas.noreply@github.com
|
b1b481e1ad09bdc739f2c6b721fd7a4a0a1eb3bc
|
aa7088c648dbda56e100309d908a5e1eb22aec4f
|
/ImageBazar/asgi.py
|
70ad069147bec186a9ae15f524792e5ec6325179
|
[] |
no_license
|
ShohagKumar/ImageBazar-Django
|
8f5884d92a3504bc695cdc0a5fc393c5e0d5ef77
|
de3166af66e1e9807b1a116fa8e22a8f0732f1b6
|
refs/heads/master
| 2023-02-21T17:25:15.288991 | 2021-01-21T11:52:02 | 2021-01-21T11:52:02 | 331,611,267 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
ASGI config for ImageBazar project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ImageBazar.settings')
application = get_asgi_application()
|
[
"shohagkumar6@gmail.com"
] |
shohagkumar6@gmail.com
|
8a43b7fe439f2bc7f0df4f94e56eaebf640c6f34
|
34ef90bc80f3d6291dfd7775bc8a7b5b5e6dc282
|
/dotcloud/0.4.2/cli/remote.py
|
caefcb4d88d63c552a0fa342ea1e11fe93012d6e
|
[] |
no_license
|
elrapha/DotCloudWin
|
2d56a1c7b709f8a1992365b51e3573b619760e7b
|
a8c957c8adff1abf3bac0ba6b3aa8e5bb1399168
|
refs/heads/master
| 2020-04-08T07:50:01.468402 | 2012-03-26T21:35:26 | 2012-03-26T21:35:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,598 |
py
|
## Copyright (c) 2010 dotCloud Inc.
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
## THE SOFTWARE.
import os
import subprocess
import utils
import config
import local
class Remote(object):
def __init__(self):
self._verbose = True
self._ssh_master = None
self._ssh_options = (
'ssh', '-t',
'-i', "'" + config.CONFIG_KEY + "'",
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'PasswordAuthentication=no',
'-o', 'ServerAliveInterval=10'
)
def set_url(self, url):
parts = utils.parse_url(url)
(self._user, self._host, self._port) = (parts['user'] or 'dotcloud', parts['host'], parts['port'] or '22')
self._url = url
def set_verbose(self, flag):
self._verbose = flag
def info(self, *args):
if not self._verbose:
return
utils.info(*args)
def die(self, progname):
utils.die('Error: "{0}" failed to be executed. Please make sure it is properly installed.'.format(progname))
def warning_ssh(self):
utils.warning('Warning: The SSH connection failed')
utils.warning('Please try again. If the problem persists, send an email to support@dotcloud.com.')
utils.warning('Also please check that your are allowed to make an SSH connection to a custom port.')
def _escape(self, s):
for c in ('`', '$', '"'):
s = s.replace(c, '\\' + c)
return s
def _ssh(self, cmd, **kwargs):
p_args = self._ssh_options + (
'-l', self._user,
'-p', self._port,
self._host,
'bash -l -c "{0}"'.format(self._escape(cmd))
)
return subprocess.Popen(p_args, **kwargs)
def _scp(self, src, dest):
scp = (
'scp', '-P', self._port, '-r',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
src, dest
)
return subprocess.call(scp, close_fds=True)
def key(self, data):
with open(config.CONFIG_KEY, 'w') as f:
f.write(data)
if not utils.is_windows():
os.fchmod(f.fileno(), 0600)
def sftp(self):
sftp = (
'sftp',
'-o', 'Port={0}'.format(self._port),
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'{user}@{host}'.format(user=self._user, host=self._host)
)
return subprocess.call(sftp, close_fds=True)
def push(self, src, dest='.'):
self.info('# push {0} {1}'.format(src, dest))
return self._scp(src, '{user}@{host}:{dest}'.format(user=self._user, host=self._host, dest=dest))
def pull(self, src, dest='.'):
self.info('# pull {0} {1}'.format(src, dest))
return self._scp('{user}@{host}:{src}'.format(user=self._user, host=self._host, src=src), dest)
def run(self, *args):
cmd = ' '.join(args)
self.info('# {0}'.format(cmd))
return self._ssh(cmd).wait()
def run_script(self, script):
proc = self._ssh('/bin/bash', stdin=subprocess.PIPE)
proc.stdin.write(script)
proc.communicate()
def rsync(self, local_dir, destination, args):
self.info('# rsync')
excludes = args.get('excludes')
url = utils.parse_url(destination)
ssh = ' '.join(self._ssh_options)
ssh += ' -p {0}'.format(url['port'])
if not os.path.isfile(local_dir) and not local_dir.endswith('/'):
local_dir += '/'
ignore_file = os.path.join(local_dir, '.dotcloudignore')
ignore_opt = ('--exclude-from', ignore_file)
if not os.path.exists(ignore_file):
ignore_opt = tuple()
rsync = ('rsync', '-lpthrvz', '--delete', '--safe-links') + \
tuple('--exclude={0}'.format(e) for e in excludes) + ignore_opt + \
('-e', ssh, local_dir,
'{user}@{host}:{dest}/'.format(user=url['user'],
host=url['host'], dest=url['path']))
try:
ret = subprocess.call(rsync, close_fds=True)
if ret != 0:
self.warning_ssh()
return ret
except OSError:
self.die('rsync')
def hg(self, local_dir, destination, args):
self.info('# hg')
with utils.cd(local_dir):
try:
ssh = ' '.join(self._ssh_options)
args = ('hg', 'push', '--ssh', ssh, '-f', destination)
ret = subprocess.call(args, close_fds=True)
if ret != 0:
self.warning_ssh()
return ret
except OSError:
self.die('hg')
def git(self, local_dir, destination, args):
self.info('# git')
with utils.cd(local_dir):
try:
os.environ['GIT_SSH'] = '__dotcloud_git_ssh'
os.environ['DOTCLOUD_SSH_KEY'] = config.CONFIG_KEY
ret = subprocess.call(('git', 'push', '-f', '--all',
destination), close_fds=True)
if ret != 0:
self.warning_ssh()
return ret
except OSError:
self.die('git')
def upload_method(self, local_dir):
if os.path.isdir(os.path.join(local_dir, '.hg')):
return 'hg'
if os.path.isdir(os.path.join(local_dir, '.git')):
return 'git'
return 'rsync'
def upload(self, local_dir, destination, args):
if args.get('check'):
local_dir = self.check_pushdir(local_dir)
if args.get('verify_key'):
self.verify_key(args.get('verify_key'))
self.info('# upload {0} {1}'.format(local_dir, destination))
method = args.get('force_method') or self.upload_method(local_dir)
if method == 'hg':
return self.hg(local_dir, destination, args.get('hg', {}))
if method == 'git':
return self.git(local_dir, destination, args.get('git', {}))
return self.rsync(local_dir, destination, args.get('rsync', {}))
def verify_key(self, key):
f = open(config.CONFIG_KEY).read()
if not f.replace('\n', '') == key.replace('\n', ''):
utils.die('Error: you seem to have a stale key file "{0}"\n'
'Remove the file or run "dotcloud setup" command again.'.format(config.CONFIG_KEY))
def check_pushdir(self, local_dir):
orig = dir = os.path.realpath(local_dir)
if ':' in orig: # win32?
return self.check_pushdir_win32(local_dir)
while True:
if os.path.isdir(dir) and os.path.exists(os.path.join(dir, 'dotcloud.yml')):
if dir != orig:
utils.info('# Found dotcloud.yml: Using {0} as a base directory'.format(dir))
return dir
prev = dir
dir = os.path.realpath(os.path.join(dir, os.path.pardir))
if dir == prev:
local.confirm('Could not find dotcloud.yml file in {0} and parent directories. Proceed?'.format(orig))
break
return orig
def check_pushdir_win32(self, local_dir):
if not os.path.exists(os.path.join(local_dir, 'dotcloud.yml')):
local.confirm('Could not find dotcloud.yml file in {0}. Proceed?'.format(local_dir))
return local_dir
|
[
"kalman.speier@gmail.com"
] |
kalman.speier@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.