blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3cc66bad7e993b2c3bf0995d8562331d79ffa43
|
cde99415e12f638db52bc0aaade71d45333d4149
|
/diaryproject2/urls.py
|
9010d7ad2577a640d12ee217f2114854a18f09a5
|
[] |
no_license
|
lilda/diaryproject
|
3ae23ecfc7ecdd29e700e560f1778a1fef0a1e02
|
b1b69bd5fa5f2092cfa07eec6e8c64f4959120e5
|
refs/heads/master
| 2020-06-11T05:10:28.266541 | 2019-06-26T08:08:46 | 2019-06-26T08:08:46 | 193,858,704 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,428 |
py
|
"""diaryproject2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import diary.views
import account.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', diary.views.home, name="home"),
path('delete/<int:diary_id>', diary.views.delete, name="delete"),
path('update/<int:diary_id>', diary.views.update, name="update"),
path('detail/<int:diary_id>', diary.views.detail, name="detail"),
path('create/', diary.views.create, name="create"),
path('like/<int:diary_id>', diary.views.like, name="like"),
path('join', account.views.join, name="join"),
path('login', account.views.login, name="login"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"ubuntu@ip-172-31-45-122.ap-northeast-1.compute.internal"
] |
ubuntu@ip-172-31-45-122.ap-northeast-1.compute.internal
|
de13e65252fe4e1b6b1c8f2d16715729aa333a7d
|
b76204c0010f7ad724f890af32454d1e88e414d1
|
/作业/第十一天作业/04-守护进程.py
|
306bdfca88bc5cd677247b324220dba1bb0110a7
|
[
"MIT"
] |
permissive
|
zhangjiang1203/Python-
|
773f4594939401d38c0f7fdb803d4b44891e4fc4
|
5e1df18be3d70bbe5403860e2f4775737b71ca81
|
refs/heads/master
| 2022-10-30T10:44:38.649037 | 2022-10-18T12:52:45 | 2022-10-18T12:52:45 | 117,959,754 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 617 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-02-14 17:34
# @Author : zhangjiang
# @Site :
# @File : 04-守护进程.py
# @Software: PyCharm
from multiprocessing import Process
import time
def foo():
print(123)
time.sleep(1)
print('end123')
def bar():
print(456)
time.sleep(3)
print("end456")
if __name__ == "__main__":
p1 = Process(target=foo)
p2 = Process(target=bar)
# 进程守护 守护的是主线程 主线程执行完毕之后 子线程就不会执行 p1就不会执行
p1.daemon = True
p1.start()
p2.start()
print("main---")
|
[
"zhangjiang@imcoming.cn"
] |
zhangjiang@imcoming.cn
|
720e5e4753ddde2bd70941bc0d1d893de5e1bd04
|
b8c1de69b62d086cf337f721479de2ad794deb44
|
/Day05_Loop.py
|
e063e614ba4d8b3988442030df094d0b827ce6a2
|
[] |
no_license
|
yusukeaoki1223/HackerRank
|
c9ced47c3fcf64dae379576c3426723496f84dd6
|
77a2236c89a3607b0cd382712623eaadee453902
|
refs/heads/master
| 2020-07-08T13:37:47.848519 | 2019-09-12T06:06:16 | 2019-09-12T06:06:16 | 203,691,213 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 255 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 01:33:25 2019
@author: yusuke.aoki
"""
import math
import os
import random
import re
import sys
n = int(input())
for i in range(1,11):
k=n*i
print(str(n)+"x"+str(i)+"="+str(k))
|
[
"noreply@github.com"
] |
yusukeaoki1223.noreply@github.com
|
2453ce3be0af4e8b4049634c771762e901a47884
|
853d4cec42071b76a80be38c58ffe0fbf9b9dc34
|
/venv/Lib/site-packages/pandas/tests/window/conftest.py
|
2af61b6375353f76bf7ec11644270c6810535eae
|
[] |
no_license
|
msainTesting/TwitterAnalysis
|
5e1646dbf40badf887a86e125ef30a9edaa622a4
|
b1204346508ba3e3922a52380ead5a8f7079726b
|
refs/heads/main
| 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 984 |
py
|
import pytest
@pytest.fixture(params=[True, False])
def raw(request):
return request.param
@pytest.fixture(
params=[
"triang",
"blackman",
"hamming",
"bartlett",
"bohman",
"blackmanharris",
"nuttall",
"barthann",
]
)
def win_types(request):
return request.param
@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"])
def win_types_special(request):
return request.param
@pytest.fixture(
params=["sum", "mean", "median", "max", "min", "var", "std", "kurt", "skew"]
)
def arithmetic_win_operators(request):
return request.param
@pytest.fixture(params=["right", "left", "both", "neither"])
def closed(request):
return request.param
@pytest.fixture(params=[True, False])
def center(request):
return request.param
@pytest.fixture(params=[None, 1])
def min_periods(request):
return request.param
|
[
"msaineti@icloud.com"
] |
msaineti@icloud.com
|
72a7bf0b181c09cc73c9f849f0115fcc099eca52
|
2a61fd43eefba838aa05d424be543cf1b656aaa4
|
/model.py
|
05282dace44593e863671d8eb46e2f66aea163a8
|
[] |
no_license
|
AmmieQi/DGCNN
|
a75eaed02f638f43a418d5d3102fa00375eadfdf
|
6d34538463561d43094a7a77b95a2a0d555817e7
|
refs/heads/master
| 2020-05-24T14:08:08.920137 | 2019-03-19T04:52:16 | 2019-03-19T04:52:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,610 |
py
|
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import Conv1d, MaxPool1d, Linear, Dropout
from torch_geometric.nn import GCNConv, global_sort_pool
from torch_geometric.utils import remove_self_loops
class Model(nn.Module):
def __init__(self, num_features, num_classes):
super(Model, self).__init__()
self.conv1 = GCNConv(num_features, 32)
self.conv2 = GCNConv(32, 32)
self.conv3 = GCNConv(32, 32)
self.conv4 = GCNConv(32, 1)
self.conv5 = Conv1d(1, 16, 97, 97)
self.conv6 = Conv1d(16, 32, 5, 1)
self.pool = MaxPool1d(2, 2)
self.classifier_1 = Linear(352, 128)
self.drop_out = Dropout(0.5)
self.classifier_2 = Linear(128, num_classes)
self.relu = nn.ReLU(inplace=True)
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
edge_index, _ = remove_self_loops(edge_index)
x_1 = torch.tanh(self.conv1(x, edge_index))
x_2 = torch.tanh(self.conv2(x_1, edge_index))
x_3 = torch.tanh(self.conv3(x_2, edge_index))
x_4 = torch.tanh(self.conv4(x_3, edge_index))
x = torch.cat([x_1, x_2, x_3, x_4], dim=-1)
x = global_sort_pool(x, batch, k=30)
x = x.view(x.size(0), 1, x.size(-1))
x = self.relu(self.conv5(x))
x = self.pool(x)
x = self.relu(self.conv6(x))
x = x.view(x.size(0), -1)
out = self.relu(self.classifier_1(x))
out = self.drop_out(out)
classes = F.log_softmax(self.classifier_2(out), dim=-1)
return classes
|
[
"leftthomas@qq.com"
] |
leftthomas@qq.com
|
b0042db5c563a76ff741cae0bfd618cea3af86eb
|
fba2f5e0ad3c6cdd50545b8a79798174d8bc92df
|
/ImageSpider/ImageSpider/items.py
|
b4299b45314a8cb6276983411c05205ce9dad389
|
[
"Apache-2.0"
] |
permissive
|
futheads/ScrapyProject
|
b9ebf10861e24cd429db355ed85d152d32ff8b84
|
a3bfb221f914c0bdfb6569a9205be06ce019c507
|
refs/heads/master
| 2020-04-10T00:43:14.640763 | 2019-05-31T10:30:24 | 2019-05-31T10:30:24 | 160,694,142 | 2 | 0 |
Apache-2.0
| 2018-12-08T13:59:12 | 2018-12-06T15:28:23 |
Python
|
UTF-8
|
Python
| false | false | 266 |
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ImagespiderItem(scrapy.Item):
img_url = scrapy.Field()
img_name = scrapy.Field()
|
[
"futhead@163.com"
] |
futhead@163.com
|
751261ea14bfadba46a6b86eb7a510f377e17ac2
|
30b97ab14b92dd205161d2554085973ba5e6b4ba
|
/data/GalaxyStarFormationRateFunction/conversion/convertSobral2013.py
|
20af8b956f345a219ae500d897881e9c119ade38
|
[] |
no_license
|
jemme07/velociraptor-comparison-data
|
ceb172c9403e986e03e54f5e52e75e6b3c8612d3
|
873c955b3a7c2b90e3d63cdc5aba0e3665417a82
|
refs/heads/master
| 2023-08-15T02:43:20.337356 | 2021-10-18T09:29:22 | 2021-10-18T09:29:22 | 380,203,513 | 0 | 0 | null | 2021-06-25T10:28:35 | 2021-06-25T10:28:34 | null |
UTF-8
|
Python
| false | false | 4,547 |
py
|
from velociraptor.observations.objects import ObservationalData
import unyt
import numpy as np
import os
import sys
import re
import itertools as it
def pairwise(iterable):
"""
return successive pairs of elements from the iterable
i.e. (i0, i1), (i1, i2), (i2,i3), ...
it: the iterable to consume.
"""
a, b = it.tee(iterable)
next(b, None)
return zip(a, b)
def parse_latex_value(latex_string):
"""
Take a LaTeX markup in the form ${value}_{-ve error}^{+ve error}$ and extract the
numeric data from it.
latex_string: The string to parse
"""
values = re.findall("(\d+.\d+)", latex_string)
ret = []
for v in values:
# Missing data gets replaced with NaNs
ret.append(float(v))
return ret
def load_file_and_split_by_z(raw_file_name):
"""
Read the data file and do all the mucking around needed to extract a list of the
redshift bins for which the SFRF is tabulated, along with the corresponding SFRF
values and their errors.
The number and spacing of the stellar mass bins vary with z; they are given in the
first column of the returned array.
raw_file_name: the file name of the raw data file to extract the SFRF from
"""
with open(input_filename, "r") as f:
lines = f.readlines()
# find header lines indicating the start of each block of data
header_line_nos = [i for i, line in enumerate(lines) if "z ∼" in line]
header_line_nos.append(len(lines))
# split the full list of lines into one block of lines per redshift bin
split_lines = []
for l1, l2 in pairwise(header_line_nos):
split_lines.append(lines[l1:l2])
z_bins_arr = [
float(re.match("#z ∼ (\d.\d*)", lines[lno]).group(1))
for lno in header_line_nos[:-1]
]
uv_lf_arr = []
for isl, lines in enumerate(split_lines):
uv_lf_arr.append(np.genfromtxt(lines, usecols=(0, 1, 3)))
return z_bins_arr, uv_lf_arr
def process_for_redshift(z, sfrf_at_z):
"""
Output an HDF5 file containing the SFRF at a given redshift.
z: the redshift to produce the SFRF for.
sfrf_at_z: the array containing SFR and Phi_SFR bins at the chosen redshift
"""
processed = ObservationalData()
comment = (
"Assuming Chabrier IMF and Vmax selection. Includes dust corrections as "
"described in Katsianis et. al. 2017, section 2. "
f"h-corrected for SWIFT using Cosmology: {cosmology.name}."
)
citation = "Sobral et. al. (2013)"
bibcode = "2013MNRAS.428.1128S"
name = "SFRF from HiZELS"
plot_as = "points"
redshift = z
h = cosmology.h
SFR_bins = sfrf_at_z[:, 0]
SFR = SFR_bins * unyt.Solar_Mass / unyt.year
# SFRF and errors are stored in datafile with units 10^-2 Mpc^-3 dex^-1
Phi = 1e-2 * sfrf_at_z[:, 1] * unyt.Mpc ** (-3)
# y_scatter should be a 1xN or 2xN array describing offsets from
# the median point 'y'
Phi_err = 1e-2 * sfrf_at_z[:, 2].T * unyt.Mpc ** (-3)
processed.associate_x(
SFR, scatter=None, comoving=True, description="Star Formation Rate"
)
processed.associate_y(Phi, scatter=Phi_err, comoving=True, description="Phi (SFRF)")
processed.associate_citation(citation, bibcode)
processed.associate_name(name)
processed.associate_comment(comment)
processed.associate_redshift(redshift)
processed.associate_plot_as(plot_as)
processed.associate_cosmology(cosmology)
return processed
def stringify_z(z):
"""
Eagle-style text formatting of redshift label.
Example: z=1.5 will be printed as z001p500.
z: The redshift to produce a label for
"""
whole = int(z)
frac = int(1000 * (z - whole))
return f"z{whole:03d}p{frac:03d}"
# Exec the master cosmology file passed as first argument
# These lines are _required_ and you are required to use
# the cosmology specified (this is an astropy.cosmology
# instance)
with open(sys.argv[1], "r") as handle:
exec(handle.read())
input_filename = "../raw/Sobral2013.txt"
output_filename = "Sobral2013_{}.hdf5"
output_directory = "../"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
z_bins, UV_LF = load_file_and_split_by_z(input_filename)
for z, UV_LF_at_z in zip(z_bins, UV_LF):
processed = process_for_redshift(z, UV_LF_at_z)
output_path = f"{output_directory}/{output_filename.format(stringify_z(z))}"
if os.path.exists(output_path):
os.remove(output_path)
processed.write(filename=output_path)
|
[
"calvin.v.sykes@durham.ac.uk"
] |
calvin.v.sykes@durham.ac.uk
|
39700ce52b75a9d1930323bc5ea883ab2a9741bf
|
b96e692639b300a733dad5d7c68cb09ea25baeba
|
/account/views.py
|
b46fe985803a19c706dd0a59789463563ae31a45
|
[
"MIT"
] |
permissive
|
mateuszwwwrobel/Expense_Tracker_Django
|
dce0194eaac6711f76b8edd8b2ae57389739edf9
|
e84bda82433427608e026faa00a634c46a433179
|
refs/heads/main
| 2023-07-03T18:38:00.126977 | 2021-08-11T21:33:22 | 2021-08-11T21:33:22 | 362,215,742 | 0 | 0 |
MIT
| 2021-08-11T21:33:23 | 2021-04-27T18:39:42 |
Python
|
UTF-8
|
Python
| false | false | 4,497 |
py
|
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.contrib.auth import login, logout
from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm
from django.views.generic import TemplateView, View
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from django.contrib import messages
from account.tokens import account_activation_token
from account.forms import SignUpForm
class BeginView(TemplateView):
template_name = 'begin.html'
class SignUpView(View):
def get(self, request):
form = SignUpForm()
context = {
'form': form,
}
return render(request, 'registration/signup.html', context)
def post(self, request):
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = 'Activate Expense Tracker Account'
message = render_to_string('registration/account_activation_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
user.email_user(subject, message)
return redirect('account-activation-sent')
else:
context = {
'form': form,
}
return render(request, 'registration/signup.html', context)
class LoginView(View):
def get(self, request):
form = AuthenticationForm()
context = {
'form': form,
}
return render(request, 'registration/login.html', context)
def post(self, request):
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = User.objects.get(username=request.POST['username'])
login(request, user)
messages.success(request, 'You have logged in successfully!')
return redirect('home')
else:
context = {
'form': form,
}
return render(request, 'registration/login.html', context)
class LogoutView(View):
def get(self, request):
logout(request)
messages.success(request, 'You have logged out!')
return redirect('home')
class ResetPasswordView(View):
def get(self, request):
form = PasswordResetForm()
context = {
'form': form,
}
return render(request, 'registration/reset_password.html', context)
def post(self, request):
form = PasswordResetForm(data=request.POST)
if form.is_valid():
try:
user = User.objects.get(email=request.POST['email'])
except User.DoesNotExist:
error_msg = 'User with specified email does not exists.'
context = {
'form': form,
'error': error_msg,
}
return render(request, 'registration/reset_password.html', context)
else:
form.save(domain_override='127.0.0.1:8000')
return redirect('password-reset-sent')
else:
context = {
'form': form,
}
return render(request, 'registration/reset_password.html', context)
def password_reset_sent(request):
return render(request, 'registration/password_reset_sent.html')
def account_activation_sent(request):
return render(request, 'registration/account_activation_sent.html')
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.email_confirmed = True
user.save()
login(request, user)
return render(request, 'registration/account_activation_complete.html')
else:
return render(request, 'registration/account_activation_invalid.html')
|
[
"mateusz.wwwrobel@gmail.com"
] |
mateusz.wwwrobel@gmail.com
|
cd40dcbab379ed3aa8f6b7fea4d4d38333549d7d
|
044c172f23a60dc2d1471217348a470e0d6eacbb
|
/AdaptiveAlgo/private/services/testInputOutput.py
|
92a780341879ba4961c962ef1181d279677e8880
|
[] |
no_license
|
rcquantuniversity/QuantUniversity
|
01cc1b3d41cfd706b09520c4ca9d855ee10e1d7a
|
7abc4f9cc0afc0869d0fceec0a97f493a6eba9fd
|
refs/heads/master
| 2021-01-23T00:48:31.882959 | 2017-06-30T14:19:08 | 2017-06-30T14:19:08 | 92,845,074 | 1 | 1 | null | 2017-06-16T21:57:25 | 2017-05-30T15:03:23 |
JavaScript
|
UTF-8
|
Python
| false | false | 482 |
py
|
import sys, json
#Read data from stdin
def read_in():
lines = sys.stdin.readlines()
#Since our input would only be having one line, parse our JSON data from that
return json.loads(lines[0])
def main():
#get our data as an array from read_in()
lines = read_in()
#return the sum to the output stream
print "Python Print"
print lines['username']
print lines['secretAccessKey']
print lines['accessKeyID']
#start process
if __name__ == '__main__':
main()
|
[
"rc.quantuniversity@gmail.com"
] |
rc.quantuniversity@gmail.com
|
cf82fede3a4e1d602ccb0a5b1c03d9d032b1bb9b
|
14ab527f45ed944c37d0bb63bf9545897532ffe6
|
/circulation-in-network.py
|
fdc386fac3f66b8fd09ed7b1c906b9dde5d2d75b
|
[] |
no_license
|
flacout/algorithm-bucket
|
6564a6c8618291cdab2e026203d67fe6ec9ffdbe
|
82795fa1aab63e40c05225de217ddd5a53e40651
|
refs/heads/master
| 2020-03-15T19:16:43.489169 | 2018-05-06T04:01:42 | 2018-05-06T04:01:42 | 132,304,622 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,043 |
py
|
# python3
from queue import Queue
class MaxMatching:
def __init__(self):
self.V = 0
self.E = 0
self.OUT = []
self.IN = []
self.lowerBond = []
def read_data(self):
self.V, self.E = map(int, input().split())
self.OUT = [0 for i in range(self.V)]
self.IN = [0 for i in range(self.V)]
vertex_count = self.V+2
graph = FlowGraph(vertex_count)
# add edges from input()
for i in range(self.E):
fro, to, low, cap = map(int, input().split())
graph.add_edge(fro-1, to-1, cap-low)
# add lower bond to out[from] and in[to]
self.OUT[fro-1] += low
self.IN[to-1] += low
self.lowerBond.append(low)
#print(self.OUT)
#print(self.IN)
# add edges cap:IN[v] from Source to all vertex Source is self.V index
# add edges cap:OUT[v] from all vertex to Sink with index self.V+1
for v in range(self.V):
graph.add_edge(self.V, v, self.IN[v])
graph.add_edge(v, self.V+1, self.OUT[v])
return graph
def write_response(self, graph):
for i, bond_edge in enumerate(self.lowerBond):
# edges in graph are stoded in double
circulation = graph.edges[i*2].flow + bond_edge
#print('edge flow '+str(graph.edges[i*2].flow), 'edge bond '+str(bond_edge))
print(circulation)
def solve(self):
graph = self.read_data()
maxFlow = max_flow(graph, self.V, self.V+1)
#print(maxFlow)
if maxFlow == sum (self.OUT):
print('YES')
self.write_response(graph)
else:
print('NO')
return
def max_flow(graph, from_, to):
flow = 0
while True:
path, X = findPath(graph, from_, to)
#print('path', path)
if len(path)== 0 :
return computeFlow(graph, from_)
#print('X', X)
updateGraph(graph, X, path)
#print('flow', flow)
def computeFlow(graph, source):
flow = 0
# get edges_id of source node.
for edge_id in graph.get_ids(source):
# forward (even) case only
if (edge_id%2==0):
edge = graph.get_edge(edge_id)
flow += edge.flow
return flow
######################################################
# MAX-FLOW
######################################################
class Edge:
def __init__(self, u, v, capacity):
self.u = u
self.v = v
self.capacity = capacity
self.flow = 0
# This class implements a bit unusual scheme for storing edges of the graph,
# in order to retrieve the backward edge for a given edge quickly.
class FlowGraph:
def __init__(self, n):
# List of all - forward and backward - edges
self.edges = []
# These adjacency lists store only indices of edges in the edges list
self.graph = [[] for _ in range(n)]
def add_edge(self, from_, to, capacity):
# Note that we first append a forward edge and then a backward edge,
# so all forward edges are stored at even indices (starting from 0),
# whereas backward edges are stored at odd indices.
#forward_edge = Edge(from_, to, capacity)
#backward_edge = Edge(to, from_, 0)
forward_edge = Edge(from_, to, capacity)
backward_edge = Edge(to, from_, capacity)
self.graph[from_].append(len(self.edges))
self.edges.append(forward_edge)
self.graph[to].append(len(self.edges))
self.edges.append(backward_edge)
def size(self):
return len(self.graph)
def get_ids(self, from_):
# get edges id in the self.edge list
return self.graph[from_]
def get_edge(self, id):
# return one edge from a vertex.
return self.edges[id]
def add_flow(self, id, flow):
# To get a backward edge for a true forward edge (i.e id is even), we should get id + 1
# due to the described above scheme. On the other hand, when we have to get a "backward"
# edge for a backward edge (i.e. get a forward edge for backward - id is odd), id - 1
# should be taken.
#
# It turns out that id ^ 1 works for both cases. Think this through!
# ^ is bitwise XOR
# x^1 is x-1 if x odd(impair) x+1 if x even(pair)
self.edges[id].flow += flow # forward edge
self.edges[id ^ 1].flow -= flow #
###########################################################
# BFS
###########################################################
def findPath(graph, from_, to):
#print('enter findPath()')
tree = BFS(graph, from_, to)
#print("tree", tree)
if tree[to] == None :
return [],0
path, X = reconstructPath(from_, to, tree, graph)
#print("path", path)
return path, X
def reconstructPath(from_, to, tree, graph):
# construct path
# find minimum value
min_value = float('inf')
path = []
while to!=from_:
# path contain edges_id
edge_id = tree[to][1]
edge = graph.get_edge(edge_id)
# forward (even) case
if (edge_id%2==0) and ((edge.capacity - edge.flow) < min_value):
min_value = edge.capacity - edge.flow
# backward (odd) case cap + flow = cap + (negative value)
if (edge_id%2!=0) and (-edge.flow < min_value):
min_value = -edge.flow
path.append(edge_id)
to = tree[to][0]
return path, min_value
def BFS(graph, from_, to):
dist = [-1 for _ in range(graph.size())]
prev = [None for _ in range(graph.size())]
dist[from_] = 0
q = Queue()
q.put(from_)
while (not q.empty()):
u = q.get()
for edge_id in graph.get_ids(u):
edge = graph.get_edge(edge_id)
# forward (even) case
# edge.flow has to be inferior to capacity.
if (edge_id%2==0) and (dist[edge.v] == -1) and (edge.flow < edge.capacity):
q.put(edge.v)
dist[edge.v] = dist[u] + 1
prev[edge.v]= (u, edge_id)
# stop when reach the sink
if edge.v==to:
return prev
# backward (odd) case
# edge.flow has to be inferior to 0.
elif (edge_id%2!=0) and (dist[edge.v] == -1) and (edge.flow < 0):
q.put(edge.v)
dist[edge.v] = dist[u] + 1
prev[edge.v]=(u, edge_id)
# stop when reach the sink
if edge.v==to:
return prev
return prev
#######################################################
# update graph edges along the path with value of flow
########################################################
def updateGraph(graph, min_value, path):
#print('enter updateGraph')
for edge_id in path:
graph.add_flow(edge_id, min_value)
return
if __name__ == '__main__':
max_matching = MaxMatching()
max_matching.solve()
|
[
"fabricelacout@gmail.com"
] |
fabricelacout@gmail.com
|
360dacb3fcd48a9b2991ba27ccca7b1f5f04c42c
|
b341e857dea1741be2a19902cf744ae906772902
|
/modules/video/plex/main.py
|
d85d30095055637a9c97d454264cdc7c5b57c8f4
|
[
"MIT"
] |
permissive
|
sdelcore/video-event-notifier-old
|
80563e99e38f04461a385f1e69448e0fa65c89f4
|
16bd322f2b81efbb3e08e63ed407ab098d610c88
|
refs/heads/master
| 2022-07-02T22:17:08.950805 | 2020-05-08T19:25:16 | 2020-05-08T19:25:16 | 255,608,741 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,355 |
py
|
import time
import yaml
from plex import PlexHandler
from mqtthandler import MQTTHandler
CONFIG="../../config.yml"
PLEX_SERVER="media-server"
PLEX_USERNAME=""
PLEX_PASSWORD=""
#issue with plexapi is that the current_time of the video is only updated every 10 seconds
#so if the time isnt updated and we arent paused we need to emulate the status message
# copy prev message and update the current time
if __name__ == "__main__":
# Parse configuration
config = None
with open(CONFIG, 'r') as f:
config = yaml.load(f)
plex_config = config['PleX']
PLEX_SERVER = plex_config["server"]
PLEX_USERNAME = plex_config["username"]
PLEX_PASSWORD = plex_config["password"]
UPDATE_FREQ = plex_config["update frequency"]
mqtt_config = config["MQTT"]
MQTT_BROKER = mqtt_config["url"]
TOPIC = mqtt_config["Topics"]["now playing"]
# Set up communications
mqtt = MQTTHandler("plex-client", MQTT_BROKER, TOPIC)
plex_server = PlexHandler(PLEX_SERVER, PLEX_USERNAME, PLEX_PASSWORD, mqtt, UPDATE_FREQ)
# PLEX SPECIFIC CYCLE CODE
# Ensures .update() gets ran every second
t = time.time()
time_to_wait = 1
while True:
if time.time() - t < time_to_wait:
time_to_wait = 1
continue
plex_server.update()
t = time.time()
|
[
"Spencer.Delcore@molex.com"
] |
Spencer.Delcore@molex.com
|
25006a1b51be4e98a23468c45d7491a9eb928a91
|
04b41dfdbcad57169fc55ee28fd567387beceed8
|
/doc/bibtoutf8.py
|
1700bcd5deb9a27bfe3191673447f6ce105a6866
|
[
"MIT"
] |
permissive
|
shgalus/shg
|
7c61754c8ddb2d955313d2af758420ecf4985b37
|
aef60bde045f9ba781b8d0f57998785ab8822176
|
refs/heads/master
| 2022-09-01T12:15:48.222619 | 2022-08-04T17:03:00 | 2022-08-04T17:03:00 | 23,058,788 | 9 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,524 |
py
|
#!/usr/bin/env python3
"Converts shg.bib to UTF-8."
import re
with open('shg.bib', 'r', encoding='utf-8') as f, \
open('shgutf8.bib', 'w', encoding='utf-8') as g:
for line in f:
line = re.sub(r"{\\k{a}}", 'ą', line)
line = re.sub(r"{\\'{c}}", 'ć', line)
line = re.sub(r"{\\k{e}}", 'ę', line)
line = re.sub(r"{\\l}", 'ł', line)
line = re.sub(r"{\\'{n}}", 'ń', line)
line = re.sub(r"{\\'{o}}", 'ó', line)
line = re.sub(r"{\\'{s}}", 'ś', line)
line = re.sub(r"{\\'{z}}", 'ź', line)
line = re.sub(r"{\\.{z}}", 'ż', line)
line = re.sub(r"{\\k{A}}", 'Ą', line)
line = re.sub(r"{\\'{C}}", 'Ć', line)
line = re.sub(r"{\\k{E}}", 'Ę', line)
line = re.sub(r"{\\L}", 'Ł', line)
line = re.sub(r"{\\'{N}}", 'Ń', line)
line = re.sub(r"{\\'{O}}", 'Ó', line)
line = re.sub(r"{\\'{S}}", 'Ś', line)
line = re.sub(r"{\\'{Z}}", 'Ź', line)
line = re.sub(r"{\\.{Z}}", 'Ż', line)
line = re.sub(r'{\\"{a}}', 'ä', line)
line = re.sub(r'{\\"{o}}', 'ö', line)
line = re.sub(r'{\\"{u}}', 'ü', line)
line = re.sub(r'{\\"{A}}', 'Ä', line)
line = re.sub(r'{\\"{O}}', 'Ö', line)
line = re.sub(r'{\\"{U}}', 'Ü', line)
line = re.sub(r"{\\'{e}}", 'é', line)
line = re.sub(r"{\\'{E}}", 'É', line)
line = re.sub(r"{\\'{i}}", 'í', line)
line = re.sub(r"{\\'{I}}", 'Í', line)
g.write(line)
|
[
"shgalus@gmail.com"
] |
shgalus@gmail.com
|
fa8d6722fd52b5bda4d773e059914c99ba080ebd
|
43099d0d654586219eada7deb9b573d7fe8acb03
|
/app/statistic/stat.py
|
5f18037aad31c53daf8903fd62ef4b660c769fe0
|
[] |
no_license
|
romanlytkin/MyDjangoAppExample
|
11bafd4dd25dd7a137720055354caf627332beaf
|
adbbe53a2b48b5d5bc8a3489b8c69eb5cac2ca50
|
refs/heads/master
| 2021-01-17T13:38:48.163499 | 2016-06-06T07:58:41 | 2016-06-06T07:58:41 | 60,507,320 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,317 |
py
|
import json
from django.template import RequestContext
from django.shortcuts import render, render_to_response
from django.http import Http404, HttpResponse
from app.models import Region, City, Comments
from django.db.models import Count
# Create your views here.
def index(request):
satistics = Region.objects.select_related().values('id', 'region_name', 'comments__region_id').annotate(count=Count('comments__region_id')).filter(count__gt=5)
return render_to_response(
'stat.html',
{
'satistics': satistics
},
context_instance=RequestContext(request)
)
def statbyregion(request, region_id):
satistics = City.objects.select_related().filter(comments__region_id=region_id).values('id', 'city_name', 'comments__city_id').annotate(count=Count('comments__city_id'))
return render_to_response(
'statbyregion.html',
{
'region_id': region_id,
'satistics': satistics
},
context_instance=RequestContext(request)
)
def statbycity(request, region_id, city_id):
comments = Comments.objects.filter(region_id=region_id, city_id=city_id)
return render_to_response(
'view.html',
{
'comments': comments
},
context_instance=RequestContext(request)
)
|
[
"roman.lytkin@progforce.com"
] |
roman.lytkin@progforce.com
|
dc970102fbf5cb07fff9453d09b6678d378d3cd6
|
cf9a8b553a3b5a9a374f97a94ac3bd47ff90b18f
|
/venv/Scripts/easy_install-3.7-script.py
|
87342abcc7376bd1071af72de032b47b4655e4b8
|
[] |
no_license
|
Mariosierracarlos/MemoriaDinamica
|
1555238236c153e2e232326ac8a5c61b66da1ad1
|
b3c92212cf5869928df37fdebd6260245e66f044
|
refs/heads/master
| 2020-03-30T03:02:03.825781 | 2018-09-28T01:05:20 | 2018-09-28T01:05:20 | 150,664,793 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 469 |
py
|
#!C:\Users\portatil\PycharmProjects\MemoriaDinamica\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"mariosierracr2017@gmail.com"
] |
mariosierracr2017@gmail.com
|
224bef18d6f6ff5ddf72ac00dc19d3552f6cacd8
|
9b9a02657812ea0cb47db0ae411196f0e81c5152
|
/repoData/zedshaw-fuqit/allPythonContent.py
|
edaf60f22d15f4461c879f222c3008de3f09f624
|
[] |
no_license
|
aCoffeeYin/pyreco
|
cb42db94a3a5fc134356c9a2a738a063d0898572
|
0ac6653219c2701c13c508c5c4fc9bc3437eea06
|
refs/heads/master
| 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 30,197 |
py
|
__FILENAME__ = read
from fuqit.web import render, error
from config import db
def run(web):
post_id = web.sub_path[1:]
if not post_id: return error(404, "Not Found")
web.post = db.get('post', by_id=post_id)
if web.post:
return render('show_post.html', web)
else:
return error(404, "Not Found")
########NEW FILE########
__FILENAME__ = write
from fuqit.web import render, redirect
from config import db
def GET(web):
return render("write_post.html", web)
def POST(web):
db.insert('post',
title=web.params['title'],
content=web.params['content'])
return redirect("/")
########NEW FILE########
__FILENAME__ = config
from fuqit import data
db = data.database(dbn='sqlite', db='data.sqlite3')
allowed_referer = '.*'
default_mtype = 'text/html'
static_dir = '/static/'
########NEW FILE########
__FILENAME__ = dbtest
from fuqit.web import render, redirect
from config import db
def GET(web):
"""
This shows how to do a simple database setup. You can also just
import the db inside the .html file if you want and don't need
to go to a handler first.
"""
if web.sub_path == '/delete':
db.delete('test', where='id = $id', vars=web.params)
return render("showdb.html", web)
def POST(web):
db.insert('test', title=web.params['title'])
return redirect("/dbtest")
########NEW FILE########
__FILENAME__ = form
def run(web):
headers = [(k,v) for k,v in web.headers.items()]
result = "HEADERS: %r\nPARAMS: %r\nPATH: %r\nMETHOD: %r" % (
headers, web.params, web.path, web.method)
return result, 200, {'content-type': 'text/plain'}
########NEW FILE########
__FILENAME__ = stuff
def test(instuff):
return "OUT %r" % instuff
########NEW FILE########
__FILENAME__ = test
from fuqit import forms
from fuqit.web import render
def GET(web):
"""
Demonstrates using the session and also how to then render another
thing seamlessly. Just call web.app.render() and it'll do all the
resolving gear again, so one method works on statics, modules, jinja2
just like you accessed it from a browser.
"""
web.form = forms.read(web, reset=False)
if web.form.reset:
web.session['count'] = 1
else:
web.session['count'] = web.session.get('count', 1) + 1
return render('renderme.html', web)
########NEW FILE########
__FILENAME__ = config
from fuqit import data
db = data.database(dbn='sqlite', db='data.sqlite3')
allowed_referer = '.*'
default_mtype = 'text/html'
static_dir = '/static/'
########NEW FILE########
__FILENAME__ = commands
# Fuqit Web Framework
# Copyright (C) 2013 Zed A. Shaw
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from modargs import args
import fuqit
import os
import sys
def help_command(**options):
"""
Prints out help for the commands.
fuqit help
You can get help for one command with:
fuqit help -for STR
"""
if "for" in options:
help_text = args.help_for_command(fuqit.commands, options['for'])
if help_text:
print help_text
else:
args.invalid_command_message(fuqit.commands, exit_on_error=True)
else:
print "Available commands:\n"
print ", ".join(args.available_commands(fuqit.commands))
print "\nUse fuqit help -for <command> to find out more."
def init_command(into=None):
"""
Initializes a fuqit app, default directory is 'app'.
fuqit init -into myapp
"""
if not os.path.exists(into):
for newdir in ['/', '/app', '/app/static']:
os.mkdir(into + newdir)
open(into + '/app/__init__.py', 'w').close()
with open(into + '/config.py', 'w') as config:
config.write("from fuqit import data\n\ndb = data.database(dbn='sqlite', db='data.sqlite3')")
with open(into + '/app/index.html', 'w') as index:
index.write('Put your crap in %s/app and hit rephresh.' % into)
print "Your app is ready for hackings in %s" % into
else:
print "The app directory already exists. Try:\n\nfuqit init -into [SOMEDIR]"
def run_command(host='127.0.0.1', port=8000, config_module='config', app='app',
debug=True, chroot="."):
"""
Runs a fuqit server.
fuqit run -host 127.0.0.1 -port 8000 -referer http:// -app app -debug True \
-chroot .
NOTE: In run mode it's meant for developers, so -chroot just does a cd
to the directory. In server mode it actually chroots there. It also
adds the chroot path to the python syspath.
"""
from fuqit import server
sys.path.append(os.path.realpath(chroot))
os.chdir(chroot)
server.run_server(host=host,
port=port,
config_module=config_module,
app=app,
debug=debug)
def start_command(host='127.0.0.1', port=8000, referer='http://', app='app',
debug=True, chroot="."):
"""
Runs the fuqit server as a daemon.
fuqit start -host 127.0.0.1 -port 8000 -referer http:// -app app -debug True
"""
def stop_command():
"""
Stops a running fuqit daemon.
fuqit stop
"""
def status_command():
"""
Tells you if a running fuqit service is running or not.
fuqit status
"""
########NEW FILE########
__FILENAME__ = utils
#!/usr/bin/env python
"""
General Utilities taken from web.py for use with the db.py file.
"""
__all__ = [
"storage", "storify",
"iters",
"safeunicode", "safestr",
"iterbetter",
"threadeddict",
]
import itertools
from threading import local as threadlocal
class storage(dict):
"""
A storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<storage ' + dict.__repr__(self) + '>'
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<storage {'value': 1}>
>>> storify({}, a={}).a
{}
Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
>>> storify({'x': 'a'}, _unicode=True)
<storage {'x': u'a'}>
>>> storify({'x': storage(value='a')}, x={}, _unicode=True)
<storage {'x': <storage {'value': 'a'}>}>
>>> storify({'x': storage(value='a')}, _unicode=True)
<storage {'x': u'a'}>
"""
_unicode = defaults.pop('_unicode', False)
# if _unicode is callable object, use it convert a string to unicode.
to_unicode = safeunicode
if _unicode is not False and hasattr(_unicode, "__call__"):
to_unicode = _unicode
def unicodify(s):
if _unicode and isinstance(s, str): return to_unicode(s)
else: return s
def getvalue(x):
if hasattr(x, 'file') and hasattr(x, 'value'):
return x.value
elif hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
iters = (list, tuple, set, frozenset)
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
class iterbetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
For boolean test, iterbetter peeps at first value in the itertor without effecting the iteration.
>>> c = iterbetter(iter(range(5)))
>>> bool(c)
True
>>> list(c)
[0, 1, 2, 3, 4]
>>> c = iterbetter(iter([]))
>>> bool(c)
False
>>> list(c)
[]
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
if hasattr(self, "_head"):
yield self._head
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError, "already passed "+str(i)
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
def __nonzero__(self):
if hasattr(self, "__len__"):
return len(self) != 0
elif hasattr(self, "_head"):
return True
else:
try:
self._head = self.i.next()
except StopIteration:
return False
else:
return True
class threadeddict(threadlocal):
"""
Thread local storage.
>>> d = threadeddict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
_instances = set()
def __init__(self):
threadeddict._instances.add(self)
def __del__(self):
threadeddict._instances.remove(self)
def __hash__(self):
return id(self)
def clear_all():
"""Clears all threadeddict instances.
"""
for t in list(threadeddict._instances):
t.clear()
clear_all = staticmethod(clear_all)
# Define all these methods to more or less fully emulate dict -- attribute access
# is built into threading.local.
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
has_key = __contains__
def clear(self):
self.__dict__.clear()
def copy(self):
return self.__dict__.copy()
def get(self, key, default=None):
return self.__dict__.get(key, default)
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return self.__dict__.iterkeys()
iter = iterkeys
def values(self):
return self.__dict__.values()
def itervalues(self):
return self.__dict__.itervalues()
def pop(self, key, *args):
return self.__dict__.pop(key, *args)
def popitem(self):
return self.__dict__.popitem()
def setdefault(self, key, default=None):
return self.__dict__.setdefault(key, default)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __repr__(self):
return '<threadeddict %r>' % self.__dict__
__str__ = __repr__
if __name__ == "__main__":
import doctest
doctest.testmod()
########NEW FILE########
__FILENAME__ = forms
from fuqit.web import RequestDict
def read(web, **expected):
results = web.params.copy()
for key, value in expected.items():
if key in results:
try:
if isinstance(value, int):
results[key] = int(results[key])
elif isinstance(value, float):
results[key] = float(results[key])
elif isinstance(value, bool):
results[key] = bool(results[key])
else:
results[key] = results[key]
except ValueError:
# TODO: log these since they might matter
results[key] = value
else:
results[key] = value
return RequestDict(results)
########NEW FILE########
__FILENAME__ = server
# Fuqit Web Framework
# Copyright (C) 2013 Zed A. Shaw
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lust import log, server
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from fuqit import web, tools
DEFAULT_HEADERS = {
'Content-type': 'text/plain'
}
class FuqitHandler(BaseHTTPRequestHandler):
def transform_request(self, request_body=None):
path, params = tools.parse_request(self.path, request_body)
context = tools.build_context(params, self)
body, code, headers = web.process(self.command, path, params, context)
self.generate_response(body, code, headers)
def do_GET(self):
self.transform_request()
def do_POST(self):
clength = int(self.headers['content-length'])
request_body = self.rfile.read(clength)
self.transform_request(request_body)
def generate_response(self, body, code, headers):
headers = headers or DEFAULT_HEADERS
self.send_response(code)
for header, value in headers.items():
self.send_header(header, value)
self.end_headers()
self.wfile.write(body)
def run_server(host='127.0.0.1', port=8000, config_module='config', app='app',
debug=True):
server_address = (host, port)
web.configure(app_module=app, config_module=config_module)
httpd = HTTPServer(server_address, FuqitHandler)
httpd.serve_forever()
class Service(server.Simple):
name = 'fuqit'
should_jail = False
def before_drop_privs(self, args):
pass
def start(self, args):
pass
def run(args, config_file, config_name):
service = Service(config_file=config_file)
log.setup(service.get('log_file'))
service.run(args)
########NEW FILE########
__FILENAME__ = sessions
# Fuqit Web Framework
# Copyright (C) 2013 Zed A. Shaw
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import re
import os
expires_format = "%a, %d-%b-%Y %X GMT"
SESSION_PATTERN = re.compile('FuqitSession\s*=\s*([A-Fa-f0-9]+)')
SESSION_TIMEOUT = 100 # days
SESSION_STORE = {}
def make_random_id():
return os.urandom(64/8).encode('hex_codec')
def get_session_id(headers):
cookies = headers.get('cookie', None)
if cookies:
sid_match = SESSION_PATTERN.search(cookies)
if sid_match:
return sid_match.group(1)
else:
return make_random_id()
else:
return make_random_id()
def set_session_id(headers, session_id):
dt = datetime.timedelta(days=SESSION_TIMEOUT)
diff = datetime.datetime.now() + dt
stamp = diff.strftime(expires_format)
cookies = {'Set-Cookie': 'FuqitSession=%s; version=1; path=/; expires=%s; HttpOnly' % (session_id, stamp),
'Cookie': 'FuqitSession=%s; version=1; path=/; expires=%s' % (session_id, stamp)}
headers.update(cookies)
def load_session(variables):
session_id = get_session_id(variables['headers'])
session = SESSION_STORE.get(session_id, {})
variables['session'] = session
variables['session_id'] = session_id
def save_session(variables, response_headers):
session_id = variables['session_id']
set_session_id(response_headers, session_id)
SESSION_STORE[session_id] = variables['session']
########NEW FILE########
__FILENAME__ = tools
# Fuqit Web Framework
# Copyright (C) 2013 Zed A. Shaw
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import importlib
import mimetypes
import cgi
import os
mimetypes.init()
def module(name, app_name=None):
if app_name:
themodule = importlib.import_module("." + name, package=app_name)
else:
themodule = importlib.import_module(name)
reload(themodule)
return themodule
def build_context(params, handler):
return {'params': params,
'headers': handler.headers,
'path': handler.path,
'method': handler.command,
'client_address': handler.client_address,
'request_version': handler.request_version,
}
def parse_request(path, request_body):
request_params = {}
if '?' in path:
path, params = path.split('?', 1)
params = cgi.parse_qsl(params)
request_params.update(params)
if request_body:
params = cgi.parse_qsl(request_body)
request_params.update(params)
return path, request_params
def make_ctype(ext, default_mtype):
mtype = mimetypes.types_map.get(ext, default_mtype)
return {'Content-Type': mtype}
def find_longest_module(app, name, variables):
base = name[1:]
# need to limit the max we'll try to 20 for safety
for i in xrange(0, 20):
# go until we hit the /
if base == '/' or base == '':
return None, None
modname = base.replace('/', '.')
try:
return base, module(modname, app)
except ImportError, e:
# split off the next chunk to try to load
print "ERROR", e
base, tail = os.path.split(base)
# exhausted the path limit
return None, None
########NEW FILE########
__FILENAME__ = web
# Fuqit Web Framework
# Copyright (C) 2013 Zed A. Shaw
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from jinja2 import Environment, PackageLoader, TemplateNotFound
from fuqit import tools, sessions
import re
import traceback
import os
config = None # this gets set by calling configure below
class RequestDict(dict):
__getattr__ = dict.__getitem__
def render_error(code, message="", variables=None):
try:
return render_template(config.errors_dir + '%d.html' %
code, variables or {}, ext='.html')
except TemplateNotFound:
return message, code, {}
def csrf_check(context):
referer = context['headers'].get('referer', '')
if referer:
return config.allowed_referer.match(referer)
else:
return True
def process(method, path, params, context):
if not csrf_check(context):
return render_error(404, "Not Found")
try:
return render(path, context)
except TemplateNotFound:
print "Jinja2 template missing in path: %r for context %r" % (path, context)
traceback.print_exc()
return render_error(404, "Not Found")
except Exception as e:
traceback.print_exc()
return render_error(500, str(e))
def render_template(path, variables, ext=None):
ext = ext or os.path.splitext(path)[1]
headers = tools.make_ctype(ext, config.default_mtype)
if 'headers' in variables:
sessions.load_session(variables)
context = {'web': variables,
'module': tools.module,
'response_headers': headers,
'config': config,
'db': config.db, # it's so common
}
template = config.env.get_template(path)
result = template.render(**context)
if 'headers' in variables:
sessions.save_session(variables, headers)
return result, 200, headers
def render_module(name, variables):
base, target = tools.find_longest_module(config.app_moudle, name, variables)
if not (base and target):
return render_error(404, "Not Found", variables=variables)
variables['base_path'] = base
variables['sub_path'] = name[len(base)+1:]
sessions.load_session(variables)
context = RequestDict(variables)
if target:
try:
actions = target.__dict__
# TODO: need to white-list context.method
func = actions.get(context.method, None) or actions['run']
except KeyError:
return render_error(500, 'No run method or %s method.' %
context.method)
result = func(context)
session_headers = {}
sessions.save_session(variables, session_headers)
if isinstance(result, tuple):
body, code, headers = result
headers.update(session_headers)
return body, code, headers
else:
session_headers['Content-type'] = config.default_mtype
return result, 200, session_headers
else:
return render_error(404, "Not Found", variables=variables)
def render_static(ext, path):
# stupid inefficient, but that's what you get
headers = tools.make_ctype(ext, config.default_mtype)
try:
return open(path).read(), 200, headers
except IOError:
return render_error(404, "Not Found")
def render(path, variables):
assert config, "You need to call fuqit.web.configure."
root, ext = os.path.splitext(path)
realpath = os.path.realpath(config.app_path + path)
if not realpath.startswith(config.app_path) or ext == ".py":
# prevent access outside the app dir by comparing path roots
return render_error(404, "Not Found", variables=variables)
elif realpath.startswith(config.static_dir):
return render_static(ext, realpath)
elif ext:
# if it has an extension it's a template
return render_template(path, variables, ext=ext)
elif path.endswith('/'):
# if it ends in /, it's a /index.html or /index.py
base = os.path.join(path, 'index')
#! this will be hackable if you get rid of the realpath check at top
if os.path.exists(config.app_path + base + '.html'):
return render_template(base + '.html', variables, ext='.html')
else:
return render_module(path[:-1], variables)
elif os.path.isdir(realpath):
return "", 301, {'Location': path + '/'}
else:
# otherwise it's a module, tack on .py and load or fail
return render_module(path, variables)
def redirect(path):
"""
Simple redirect function for most of the interaction you need to do.
"""
return "", 301, {'Location': path}
def error(code, message):
return render_error(code, message)
def configure(app_module="app", config_module="config"):
global config
if not config:
config = tools.module(config_module)
config.app_module = app_module
config.app_path = os.path.realpath(app_module)
config.errors_dir = config.app_path + '/errors/'
config.env = Environment(loader=PackageLoader(config.app_module, '.'))
config.allowed_referer = re.compile(config.allowed_referer)
config.static_dir = os.path.realpath(config.app_path +
(config.static_dir or '/static/'))
########NEW FILE########
|
[
"dyangUCI@github.com"
] |
dyangUCI@github.com
|
c9a3d5331e37aa8bfcc0cce49e59617909444a81
|
4ac0e563ff72ecfa3eb94de2b202137a3c37e5fa
|
/ask/ask/settings.py
|
a4616502b15e880ab89e9d31273f5f02853ab8c7
|
[] |
no_license
|
ViPoSoft/stepic_web_project
|
a850b0006b72bc6fb4cdff1dbc8b9f088679e8b9
|
5bafd6ba73a8e9e95d1e3603e642c8695757f83b
|
refs/heads/master
| 2020-12-21T17:46:55.778611 | 2016-11-25T08:08:33 | 2016-11-25T08:08:33 | 73,498,904 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,265 |
py
|
"""
Django settings for ask project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#5q78-303)=&_0b#ej&24cluu5-xo&c#yk$!7+wvl%#jvs8np$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'qa',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ask.urls'
WSGI_APPLICATION = 'ask.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'step_base',
'USER': 'vipo',
'PASSWORD': 'vps23',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
[
"noreply@github.com"
] |
ViPoSoft.noreply@github.com
|
d8b6d57a7de29b3d5079fcbad083dff1f0887a97
|
2db924786e5af89a414985c2e7d68e061845c693
|
/Bioinformatics/revc.py
|
fd7297c7ceb5aa85f526492462e30760eec59701
|
[] |
no_license
|
wchung94/Rosalind
|
a970b5342170ca8b7e6353c33c46346326ab4965
|
bf3ce7f315f96073baa9362c00364d662852acbd
|
refs/heads/master
| 2020-07-11T11:08:10.609615 | 2019-09-16T12:43:40 | 2019-09-16T12:43:40 | 204,522,091 | 0 | 0 | null | 2019-08-26T19:15:07 | 2019-08-26T17:01:33 | null |
UTF-8
|
Python
| false | false | 2,859 |
py
|
#!/usr/bin/env python3
"""
Author: Wing Yu Chung
Complementing a Strand of DNA.
"""
# Import statements
from sys import argv
def read_dataset(filetxt):
"""
Turns dataset into string object
Input: txt file with string
Output: string of data from txt file.
"""
text = open(filetxt, 'r')
dataset = text.read()
dataset = dataset.strip()
text.close()
return dataset
def split_dataset(dataset):
"""
Turns dataset string separated by \n into a list
"""
sequence = dataset.split()
return sequence
def extract_sequence(list):
"""
extract longest sequence from list of sequences
Input: list with sequences
Output: string of longest sequence
"""
long_sequence = max(list,key=len)
return long_sequence
def extract_subsequence(list):
"""
extract sub/shortest sequence from list of sequences
Input: list with sequences
Output: string of subsequence
"""
short_sequence = min(list,key=len)
return short_sequence
def detect_position(long,sub):
"""
detect position of sub sequence in sequence.
Input: two sequence strings 1: long sequence 2: subsequence
Output: list of the positions that subsequence is present in sequence
Return position of subsequence in sequence
"""
position_list =[]
for nucl in range(len(long)):
if long[nucl:nucl+len(sub_sequence)] == sub:
position_list.append(nucl+1)
return position_list
def dna_to_rna(string):
dna = string
rna = dna.replace('T','U')
return rna
def write_to_txt(string):
text_file = open("answer.txt", "w")
text_file.write(string)
text_file.close()
def reverse_string(string):
rev_string = string[::-1]
return rev_string
def comp_seq(sequence):
"""
turn sequence into complementary sequence
Input: string
Output: complementary reverse sequence string
"""
comp_seq = ""
for nuc in sequence:
if nuc == "A":
comp_seq += "T"
elif nuc == "T":
comp_seq += "A"
elif nuc == "G":
comp_seq += "C"
elif nuc == "C":
comp_seq += "G"
#comp_seq = comp_seq[::-1]
return comp_seq
if __name__ == "__main__":
sequences = read_dataset(argv[1])
print(sequences)
rev_sequence = reverse_string(sequences)
print(rev_sequence)
rev_comp_sequence = comp_seq(rev_sequence)
print(rev_comp_sequence)
write_to_txt(rev_comp_sequence)
# sequences = split_dataset(sequences)
# long_sequence = extract_sequence(sequences)
# sub_sequence = extract_subsequence(sequences)
# position_list = detect_position(long_sequence,sub_sequence)
# print(join_list_int(position_list))
|
[
"noreply@github.com"
] |
wchung94.noreply@github.com
|
0fe02bcc871aa7dfcaf95743a66be42b74cf3460
|
2161711dcdc06abe39d96082edcc91ba4de95668
|
/swagger_client/models/items_point.py
|
afba13b1545cad5006ecdbd2405e92df6f0d5f1e
|
[] |
no_license
|
PriceTT/swagger-piwebapi-python
|
7eb25c329b33a76785cdb0484fae0dfb354722e2
|
20a4a47a38dfe7051b1a35831fb6cd3d2a19679a
|
refs/heads/master
| 2021-06-18T21:21:48.808589 | 2017-06-15T18:44:48 | 2017-06-15T18:44:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,530 |
py
|
# coding: utf-8
"""
PI Web API 2017 Swagger Spec
Swagger Spec file that describes PI Web API
OpenAPI spec version: 1.9.0.266
Contact: techsupport@osisoft.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ItemsPoint(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, items=None, links=None):
"""
ItemsPoint - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'items': 'list[InlineResponse20036Items]',
'links': 'InlineResponse2002Links1'
}
self.attribute_map = {
'items': 'Items',
'links': 'Links'
}
self._items = items
self._links = links
@property
def items(self):
"""
Gets the items of this ItemsPoint.
:return: The items of this ItemsPoint.
:rtype: list[InlineResponse20036Items]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this ItemsPoint.
:param items: The items of this ItemsPoint.
:type: list[InlineResponse20036Items]
"""
self._items = items
@property
def links(self):
"""
Gets the links of this ItemsPoint.
:return: The links of this ItemsPoint.
:rtype: InlineResponse2002Links1
"""
return self._links
@links.setter
def links(self, links):
"""
Sets the links of this ItemsPoint.
:param links: The links of this ItemsPoint.
:type: InlineResponse2002Links1
"""
self._links = links
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ItemsPoint):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"eng@dstcontrols.com"
] |
eng@dstcontrols.com
|
1cf1fe265bd8b195a726d99463ad7614874319b6
|
07591b6533f6d6ed7995e8759eebef6adf7a848d
|
/PracticaFinal/venv/Scripts/easy_install-script.py
|
a42d8178fb91e59f04bbb0141c640cd911e1e3c1
|
[] |
no_license
|
Alesitoker/Python
|
de021d2e8c17937ccb7d7bc950ba561896c9dd07
|
fab22e422c0bee0ea2c47f2edea963f70e3bcf81
|
refs/heads/master
| 2020-04-22T18:25:16.601669 | 2019-02-13T20:44:39 | 2019-02-13T20:44:39 | 170,576,308 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 446 |
py
|
#!C:\zProyectos\Python\PracticaFinal\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"alesitonightray@gmail.com"
] |
alesitonightray@gmail.com
|
b256f3620897b95b8091de3b1fe682dc9e9ccd5d
|
ea3f2cf034d4004f467bfbd246e4899fafae360d
|
/journal12wow.py
|
ce13a7e2291d07628975112c49e582fbd442cd9d
|
[] |
no_license
|
prasanthlouis/Milzoo-dataentryparser
|
e2aa515cffafbc16086261f0c0ca4051837a4673
|
bf0e1c3918ca0425497b77612ff1136c365309ef
|
refs/heads/master
| 2016-09-10T19:41:37.286705 | 2015-06-24T03:41:25 | 2015-06-24T03:41:25 | 37,323,062 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,452 |
py
|
import pyPdf
import re
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from os import listdir
from os.path import isfile, join
flag=0
i=0
j=0
flagtit=0
list=[]
title=""
authorx=""
author=""
quote="'"
mypath="C:\Users\Prasanth\Desktop\journal12\part3\\"
values={'Dr P R Varghese':34,'Amina S.':35,'Thomas Zachariah':36,'Ramesh Babu M.G.':38,'B. Sleema':39,'Rajkumar R.':40,'K. K. Hemalatha':41,'James T. J.':42,'Tresamma George':43,'Manju V. Subramanian':44,'Jeeja Tharakan':45,'N. D. Inasu':46,'Raju Thomas K.':47,'M. John George':48,'Savitha Nandanan':49,'Joseph Louis Olakkengil':50,'Sreeranjit kumar C.V.':51,'Ramya K.':52,'Usha M.':53,'Madhavan. S':54,'G. Muraleedhara Kurup':55,'Dhanalekshmy':56,'Radhadevi':57,'Jojo Joseph Vellanikaran':58,'P. A. SebastianSacred Heart College':59,
'Abdussalam A.K.':60,
'E.A.A. Shukkur':61,
'E.A.Jayson':62,
'Fab.Varghese P.P.':63,
'Francy C.F.':64,
'G. Girijan':65,
'Jain J.Therattil':66,
'Jinsu Varghese':67,
'K. Azeez':68,
'K.R. Arathy':69,
'M.A.Mohammed-Aslam':70,
'P.A. Azeez':71,
'P.V. Jyothi':72,
'Sheela Devi D':73,
'Thomas K.M.':74,
'Zubair':75,
'Ms.Rima Joseph':76,
'Dr. Meera Jan Abraham St.Teresas College':77,
'Jayasree S.Mercy College':78,
'Deepthi Venugopal St.Aloysius College':79,
'Indu M.S.':80,
'Saritha Francis':81,
'Jose Jacob':82,
'Mohammed-Aslam M. A.':83,
'A. K. Abdussalam':84
}
def getPDFContent(path):
content = ""
# Load PDF into pyPDF
pdf = pyPdf.PdfFileReader(file(path, "rb"))
# Iterate pages
for i in range(0, pdf.getNumPages()):
# Extract text from page and add to content
content += pdf.getPage(i).extractText() + "\n"
# Collapse whitespace
content = " ".join(content.replace(u"\xa0", " ").strip().split())
return content
onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]
driver = webdriver.Firefox()
username = driver.find_element_by_name("admin_name")
driver.find_element_by_name("submit").click()
for y in onlyfiles:
x=getPDFContent(mypath+y).encode("ascii", "ignore")
#use this after adding authors
for val in values:
if val in x:
authorx+=val
print authorx
temptitle=x.split(":",1)[1]
while(flagtit==0):
try:
while(temptitle[i].islower() or temptitle[i].isdigit() or temptitle[i]==',' or temptitle[i]=='.' or temptitle[i]==' ' or temptitle[i+1].islower() or temptitle[i+1]=='.' or temptitle[i]=='&' or temptitle[i+2]==' ' or temptitle[i+1]==' ' or temptitle[i]=='-'):
i+=1
except:
print 'except'
while(temptitle[i].isupper() or temptitle[i]==' ' or temptitle[i]==',' or temptitle[i]=='(' or temptitle[i]==')' or temptitle[i]=='-' or temptitle[i]=='.' or temptitle[i]=='&' or temptitle[i]==':' or temptitle[i]=='p'):
title+=str(temptitle[i])
i+=1
title = title[:-1]
if title=='RESEARCH PAPER ISSN' or title=='ISSN':
title=""
continue
else:
break
print y
print title
i-=1
while(1):
if((temptitle[i]=='I' and temptitle[i+1]=='n' and temptitle[i+2]=='t') or ((temptitle[i]=='A' and temptitle[i+1]=='b' and temptitle[i+2]=='s'))):
break
else:
authorx+=str(temptitle[i])
i+=1
if 'and' in authorx:
authorx=re.sub(r"\band\b",",",authorx)
list=authorx.split(',')
f = open('myfile.txt','a')
for lis in list:
f.write(lis+'\n') # python will convert \n to os.linesep
f.close()
if 'Abstract' in x:
if 'Key words' in x:
desc=re.findall(r'Abstract(.*?)Key words',x)
else:
desc=re.findall(r'Abstract(.*?)Introduction',x)
flag=1
if(flag==0):
if 'Introduction' in x:
desc=re.findall(r'Introduction(.*?)Insulin',x)
if(desc==[]):
desc=re.findall(r'Introduction(.*?)Pathophysiology',x)
flag=1
if 'Inroduction' in x:
desc=re.findall(r'Inroduction(.*?)Hormone',x)
print desc
driver.find_element_by_xpath("//select[@id='journal_name']/option[@value='28']").click()
articlename=driver.find_element_by_name("article_name")
articlename.send_keys(title)
articledesc=driver.find_element_by_name("article_desc")
articledesc.send_keys(desc)
articlepdf=driver.find_element_by_name("article_pdf")
articlepdf.send_keys(mypath+y)
for vals in values:
if vals in authorx:
print str(values[vals])
driver.find_element_by_xpath("//select[@id='article_author']/option[@value="+quote+str(values[vals])+quote+"]").click()
submit=driver.find_element_by_name("submit").click()
title=""
i=0
author=""
authorx=""
flag=0
|
[
"prasanthlouis21@gmail.com"
] |
prasanthlouis21@gmail.com
|
ef63780996c3f52c992a74e3fa166810a517d5b2
|
681cbe8efc086d77ead838a015a8c41c690fa6b6
|
/urls.py
|
e3a196486a18fec0a8989b15e5cbc3ceaca297e7
|
[
"MIT"
] |
permissive
|
daijingjing/tornado_web_base
|
9541cefe3f079753543d2ecc7f18c89c4cf9cbbe
|
e0f76e847843354ee0b6e8d9e80a09c602b9cd29
|
refs/heads/master
| 2021-01-13T07:30:32.916556 | 2016-10-20T06:50:01 | 2016-10-20T06:50:01 | 71,348,429 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 118 |
py
|
# -*- encoding: utf-8 -*-
from modules.index.IndexHandler import IndexHandler
urls = [
(r'/index', IndexHandler),
]
|
[
"jjyyis@qq.com"
] |
jjyyis@qq.com
|
b8857962ab1a71d572050bda7708df505dbe661b
|
3670fc92bd2eeb5c2ca11dba05d19482b94000bb
|
/best binary/new/cervicbin2a.py
|
f7d44db290244215e8bf8f4f432533652278dec6
|
[] |
no_license
|
thamizharasia2019/keras-networks
|
bf9d9fec5586dde6520b2602552e31adbbec2da7
|
4379f4014650c20bba5dcf7209b5cedfbc0f002f
|
refs/heads/master
| 2020-12-05T11:07:14.756227 | 2020-01-30T08:25:24 | 2020-01-30T08:25:24 | 232,090,406 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,696 |
py
|
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.utils import np_utils
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
import math
import keras
from keras.layers import Dense,GlobalAveragePooling2D
from keras.preprocessing import image
from keras.models import Model
from keras.optimizers import Adam
from keras.applications import ResNet50
from keras.applications import imagenet_utils
from keras.preprocessing.image import load_img,img_to_array
from keras.preprocessing.image import load_img
from keras.callbacks import ModelCheckpoint
from keras.applications.imagenet_utils import preprocess_input
from keras.callbacks import LearningRateScheduler
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
#ap.add_argument("-a", "--augment", type=int, default=-1,
# help="whether or not 'on the fly' data augmentation should be used")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
args = vars(ap.parse_args())
# # Using Resnet50 and initialised with weights of imagenet
# ## images in smear 2005 are resized to 224 x224
# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename, load the image, and
# resize it to be a fixed 64x64 pixels, ignoring aspect ratio
label = imagePath.split(os.path.sep)[-2]
#image = load_img(imagePath, target_size=(224, 224))
# convert the image pixels to a numpy array
#image = img_to_array(image)
# reshape data for the model
#image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = cv2.imread(imagePath,1)
image = cv2.resize(image, (224, 224))
image= preprocess_input(image)
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
data = np.array(data, dtype="float")
print('loaded data')
#print(len(data))
#print(len(labels))
# convert the data into a NumPy array, then preprocess it by scaling
# all pixel intensities to the range [0, 1]
#data = np.array(data, dtype="float") / 255.0
# encode the labels (which are currently strings) as integers and then
# one-hot encode them
le = LabelEncoder()
labels = le.fit_transform(labels)
labels = np_utils.to_categorical(labels, 2)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, random_state=10, shuffle=True)
# Resnet initialisation with imagenet
img_height,img_width = 224,224
num_classes = 2
input_shape= (img_height,img_width,3)
#base_model=ResNet50(weights='imagenet',include_top=False,input_shape= (img_height,img_width,3)) #imports the mobilenet model and discards the
restnet = ResNet50(include_top=False, weights='imagenet', input_shape= (img_height,img_width,3))
output = restnet.layers[-1].output
output = keras.layers.Flatten()(output)
preds=Dense(num_classes,activation='softmax')(output ) #final layer with softmax activatio
model = Model(inputs=restnet.input, outputs=preds)
# Freeze the layers except the last 4 layers
for layer in restnet.layers[:-3]:
layer.trainable = False
#for layer in base_model.layers[:-3]:
# layer.trainable = False
# Check the trainable status of the individual layers
#for layer in base_model.layers:
# print(layer, layer.trainable)
# ## Added three dense layers and the last layer is having 7 classes
#x=base_model.output
#x=GlobalAveragePooling2D()(x)
#x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
#x=Dense(1024,activation='relu')(x) #dense layer 2
#x=Dense(512,activation='relu')(x) #dense layer 3
#x = Flatten(name = "flatten")(x)
#preds=Dense(num_classes,activation='softmax')(x) #final layer with softmax activation
# ## created new model using base model input and output with 7 classes
#model=Model(inputs=base_model.input,outputs=preds)
# ## Displayed model details
model.summary()
# ## Created function to computer F1 SCORE
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# ## Compiled model using Adam optimizer and computed accuracy and f1 score
print("[INFO] compiling model...")
#opt = SGD(lr=INIT_LR, momentum=0.9, decay=INIT_LR / EPOCHS)
# optimiser intitialisation
#INIT_LR = 1e-1
BS = 64
EPOCHS = 100
#decay_rate = INIT_LR
#decay_rate = INIT_LR / EPOCHS
# learning rate schedule
initial_lrate = 0.1
drop = 0.5
epochs_drop = 10.0
def step_decay(epoch):
#lrrate = math.floor(initial_lrate/3.0)
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
#decay_rate.append(lrrate)
return lrate
#learning_rate=0.1
#decay_rate=learning_rate/ 3
#opt = keras.optimizers.Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=decay_rate)
opt = keras.optimizers.Adam(lr=0.3, beta_1=0.3, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy',f1])
# hyperparameter tuning
filepath=" binary weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
#early = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=10)
reduce1 = keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', verbose=1,factor=0.33, patience=2,min_lr=0.000001 )
lrate = LearningRateScheduler(step_decay, verbose=1)
#callbacks_list = [checkpoint,early,reduce1]
#callbacks_list = [checkpoint,reduce1,lrate]
callbacks_list = [checkpoint,reduce1]
#callbacks_list = [checkpoint,lrate]
# data augmentation
train_datagen = ImageDataGenerator(
# preprocessing_function=preprocess_input,
adaptive_equalization=True,
histogram_equalization=True,
rotation_range=90,
brightness_range=[0.5,2],
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(
# preprocessing_function=preprocess_input
)
#validation_generator = val_datagen.flow(testX, testY)
print("[INFO] training network for {} epochs...".format(EPOCHS))
H = model.fit_generator(
train_datagen.flow(trainX, trainY, batch_size=BS),
validation_data=(testX, testY),
steps_per_epoch=len(trainX) // BS,
callbacks=callbacks_list,
verbose=2,
epochs=EPOCHS)
# ## Displaying plot of Accuracy Vs epochs
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=1)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=le.classes_))
# plot the training loss and accuracy
N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure()
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.plot(N, H.history["accuracy"], label="train_acc")
plt.plot(N, H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
import matplotlib
from matplotlib import pyplot as plt
results1=H.history
training_accuracy=results1['accuracy']
val_acc=results1['val_accuracy']
epochs1=range(1,len(training_accuracy)+1)
plt.plot(epochs1,training_accuracy,label='Training Accuracy',marker="*",color='r')
plt.plot(epochs1,val_acc,label='Validation Accuracy',marker="+",color='g')
plt.title('Accuracy Vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
# ## Displaying plot of Loss Vs epochs
# In[19]:
trainloss=results1['loss']
valloss=results1['val_loss']
epochs1=range(1,len(trainloss)+1)
plt.plot(epochs1,trainloss,label='Training Loss',marker="*",color='r')
plt.plot(epochs1,valloss,label='Validation Loss',marker="+",color='g')
plt.title('Loss Vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('Loss')
# ## Displaying plot of F1 score Vs epochs
# In[20]:
trainf1=results1['f1']
valf1=results1['val_f1']
epochs1=range(1,len(trainf1)+1)
plt.plot(epochs1,trainf1,label='Training F1 score',marker="*",color='r')
plt.plot(epochs1,valf1,label='Validation F1 score',marker="+",color='g')
plt.title('F1 score Vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('F1 score')
# In[ ]:
|
[
"noreply@github.com"
] |
thamizharasia2019.noreply@github.com
|
89809d6a7f389db1e69a5d801eef297af6202c18
|
5d021d0d4eb15d819f197ff71bbf572b5fec600c
|
/dockerfile_tx2_forwarder/scripts/old/tx2_forwarder_02.py
|
6237ea2791e06c5347e2f34c098d7f51b47077d9
|
[] |
no_license
|
vivekmids/ATMFraudDetection
|
23f200618c4a1a13d190e91e38ae8c2439dd82e9
|
c117e3c24ad218751adbdb22b27d684b8916df36
|
refs/heads/master
| 2020-09-24T00:21:18.179192 | 2019-12-03T12:55:28 | 2019-12-03T12:55:28 | 225,619,139 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,740 |
py
|
import paho.mqtt.client as mqtt
import time
# This script takes incoming messages from a local mosquitto broker on the TX2
# and forwards them to a remote broker on my VSI
local_client_topics = ["faces"]
responder_client_topics = ["response"]
remote_client_topics = ["adjudication/pass", "adjudication/fail_face", "adjudication/fail_info"]
def on_message_local(client, userdata, msg):
"""After receiving a message, the local client will publish it to the remote
client under the same topic
"""
if msg.topic == "faces":
print("'faces' message received!")
remote_client.publish("faces", payload=msg.payload, qos=0, retain=False)
print("'faces' message published to remote client", msg.payload)
else:
print("Message with unspecificied topic received from local client: ", msg.topic)
print("###### No action taken on message ######")
def on_message_responder(client, userdata, msg):
if msg.topic == "response":
print("'response' message received!")
# Do I need to change to retain=True for qos=2?
remote_client.publish("response", payload=msg.payload, qos=2, retain=False)
print("'response' message published to remote client", msg.payload)
else:
print("Message with unspecificied topic received from responder client: ", msg.topic)
print("###### No action taken on message ######")
def on_message_remote(client, userdata, msg):
"""
"""
if msg.topic == "adjudication/pass":
print("'adjudication/pass' message received!")
local_client.publish("adjudication/pass", payload=msg.payload, qos=2, retain=False)
# Do I need to change to retain=True for qos=2?
print("'adjudication/pass' message published to local client", msg.payload)
elif msg.topic == "adjudication/fail_face":
# Published if face fails authentication
print("'adjudication/fail_face' message received!")
local_client.publish("adjudication/fail_face", payload=msg.payload, qos=2, retain=False)
# Do I need to change to retain=True for qos=2?
print("'adjudication/fail_face' message published to local client", msg.payload)
elif msg.topic == "adjudication/fail_info":
# Published if user fails authentication based on personal info
print("'adjudication/fail_info' message received!")
local_client.publish("adjudication/fail_info", payload=msg.payload, qos=2, retain=False)
# Do I need to change to retain=True for qos=2?
print("'adjudication/fail_info' message published to local client", msg.payload)
else:
print("Message with unspecificied topic received from remote client: ", msg.topic)
print("###### No action taken on message ######")
while True:
# Instantiate a Paho MQTT client ('local_client'), have it connect to the
# mosquitto broker on the TX2, and make it subscribe to the "faces" topic
local_client = mqtt.Client("camera_to_forwarder")
local_client.connect("mosquitto")
print("Local client connect")
local_client.on_message = on_message_local
for topic in local_client_topics:
local_client.subscribe(topic)
print("Subscribed to local client topic: ", topic)
#local_client.subscribe("faces")
#local_client.subscribe("response")
print("Local client subscribe")
local_client.loop_start()
time.sleep(2)
local_client.loop_stop()
# Responder client
responder_client = mqtt.Client("responder_to_cloud")
responder_client.connect("mosquitto")
print("Responder client connect")
responder_client.on_message = on_message_responder
for topic in responder_client_topics:
responder_client.subscribe(topic)
print("Subscribed to responder client topic: ", topic)
#local_client.subscribe("faces")
#local_client.subscribe("response")
print("Responder client subscribe")
responder_client.loop_start()
time.sleep(0.5)
responder_client.loop_stop()
# Remote client
remote_client = mqtt.Client("forwarder_to_cloud")
remote_client.connect("169.62.97.69")
print("Remote client connect")
remote_client.on_message = on_message_remote
for topic in remote_client_topics:
remote_client.subscribe(topic)
print("Subscribed to remote client topic: ", topic)
#remote_client.subscribe("adjudication/pass")
#remote_client.subscribe("adjudication/fail_face")
#remote_client.subscribe("adjudication/fail_info")
print("Remote client subscribe")
remote_client.loop_start()
time.sleep(0.5)
remote_client.loop_stop()
#local_client.loop_forever()
#remote_client.loop_forever() # Is this syntax at the end correct?
|
[
"vivekagarwal@berkeley.edu"
] |
vivekagarwal@berkeley.edu
|
a779e8313069004fad5ba304cfa7c9c7fa2dd8d8
|
2de9c11528bf468612f83b845c02d570f8d271e0
|
/geese/agent/__init__.py
|
68e5af3ce864aa948724ee227b78382f8152f006
|
[] |
no_license
|
waderaku/hungry_geese_for_kaggle
|
4e7cfe2010820ecbab05cdbcadd8395995b4f932
|
811b76bf47571adc1aa0d0987c02b72bca4789ac
|
refs/heads/master
| 2023-06-07T14:20:43.483612 | 2021-07-10T06:54:38 | 2021-07-10T06:54:38 | 369,221,676 | 0 | 0 | null | 2021-07-10T06:54:39 | 2021-05-20T13:41:36 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 79 |
py
|
from geese.agent.agent import Agent
from geese.agent.ppo_agent import PPOAgent
|
[
"yosemat.beta@gmail.com"
] |
yosemat.beta@gmail.com
|
fe95a77072a775bf40d487a65f5bdecc85549ffe
|
a164d560cc5dd30ecc3a04a7d2ee08a144b5a6ff
|
/test/test_chnmf.py
|
0c09f86ce25f962e7f97ebff9eea52943a7088d0
|
[] |
no_license
|
rikkhill/pymf
|
fe6736bc4780856fbc02fc7a138f81dab788e776
|
c2c9dd960b98c3ff9bb94947253f268b16028d7e
|
refs/heads/master
| 2021-01-23T01:52:08.114731 | 2016-08-09T10:23:18 | 2016-08-09T10:23:18 | 53,073,722 | 0 | 3 | null | 2016-03-03T18:33:49 | 2016-03-03T18:33:49 | null |
UTF-8
|
Python
| false | false | 889 |
py
|
import numpy as np
from numpy.testing import *
from pymf.chnmf import CHNMF
from base import *
class TestCHNMF():
data = np.array([[1.0, 0.0, 0.0, 0.5],
[0.0, 1.0, 0.0, 0.0]])
W = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
H = np.array([[1.0, 0.0, 0.0, 0.5],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.5]])
def test_compute_w(self):
""" Computing W without computing H doesn't make much sense for chnmf..
"""
mdl = CHNMF(self.data, num_bases=3)
mdl.H = self.H
mdl.factorize(niter=10, compute_h=False)
assert_set_equal(mdl.W, self.W, decimal=2)
def test_compute_h(self):
mdl = CHNMF(self.data, num_bases=3)
mdl.W = self.W
mdl.factorize(niter=10, compute_w=False)
assert_set_equal(mdl.H, self.H, decimal=2)
|
[
"christian.thurau@admins-MacBook-Pro-4.local"
] |
christian.thurau@admins-MacBook-Pro-4.local
|
ae0c7ef4e03d6fc5850468f0060b0796d3a8d2ed
|
ca82a6883decd4b9def4d77a9e0d818c82ccd387
|
/configs/settings.py
|
9d250f158ba5131d3ae07660aecec14a4b544f49
|
[] |
no_license
|
DanielBok/redesigned-invention
|
87e37401f044fca8d3b6e0ef361b688a9f51ebfe
|
ca16922184dac7973f71baa546ec75bb2387edc2
|
refs/heads/master
| 2020-05-31T22:54:21.814225 | 2017-06-26T13:20:28 | 2017-06-26T13:20:28 | 94,052,528 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 540 |
py
|
import os
from os import getenv
from os.path import abspath, dirname, join
DEBUG = getenv('DEBUG', "YES") == "YES"
DEBUG_TB_INTERCEPT_REDIRECTS = getenv('DEBUG_TB_INTERCEPT_REDIRECTS', False)
SECRET_KEY = getenv('SECRET_KEY', os.urandom(128).hex())
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = getenv('DATABASE_URL',
"sqlite:///{0}".format(abspath(join(dirname(__file__), '..', 'app_data', 'data.db'))))
TIMEZONE = 'Asia/Singapore'
SERVER_NAME = getenv('SERVER_NAME', 'localhost:5000')
|
[
"daniel.bok@outlook.com"
] |
daniel.bok@outlook.com
|
ef465ebd4d48eed8cd2849a07522138d036a5def
|
f2978acf9e7861bab33e9640ad80bf2164405384
|
/af_base_demo/__manifest__.py
|
a588b059b7d2138a20709fcf7674e4f705c772b0
|
[] |
no_license
|
roman-rudyk-ndev/odoo-af
|
3c1775b06061cf2a4d87bd65feeaaeec3a415a2d
|
4eefc4ba2af21f24cd666459537fb8e8e64c5738
|
refs/heads/master
| 2023-06-30T06:43:06.584160 | 2020-12-07T10:48:59 | 2020-12-07T10:48:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 653 |
py
|
{
'name': 'AF Base Demodata',
'version': '12.0.0.2',
'category': '',
'description': """
Module to overright Odoo original demodata.
===========================================================
AFC-119
""",
'author': 'Vertel AB',
'license': 'AGPL-3',
'website': 'http://www.vertel.se',
'depends': ['base'],
'demo': [
'data/base_partner_demo_se.xml',
#'data/res.country.state.se.csv',
#'data/res_company_data_se.xml',
#'data/res_company_data_se.xml',
#'data/res_partner_image_demo_se.xml',
#'data/res_users_demo_se.xml'
],
'application': False,
'installable': True,
}
|
[
"fredrik@arvas.se"
] |
fredrik@arvas.se
|
dde1ac5a79a4f406e745bf6983a9b16cb780a9b7
|
158c80436eddc5f4e98987badb2c2c330ee4dde7
|
/Forest/ThumbBiter/ThumbBiter.py
|
051a2bf29e5b1f67136e4cd45123bf7c0f600ec2
|
[] |
no_license
|
DRCART03/CodeCombat
|
3bb84eb90c310c4dbe2adbb302890eb4c2866eec
|
a9eff4fec5be4dac8914d8b9b1483586b265800b
|
refs/heads/master
| 2020-08-04T11:02:56.504091 | 2019-10-01T14:31:22 | 2019-10-01T14:31:22 | 212,115,556 | 0 | 0 | null | 2019-10-01T14:30:37 | 2019-10-01T14:20:29 | null |
UTF-8
|
Python
| false | false | 177 |
py
|
if (2 + 2 == 4):
hero.say("Hey!")
if (2 + 2 == 4):
hero.say("Yes, you!");
if (3 + 3 == 6):
hero.say("Come at me!");
if (2 * 10 == 20):
hero.say("Prativnij");
|
[
"vadim-job-hg@yandex.ru"
] |
vadim-job-hg@yandex.ru
|
11bd8f2d51b9d7888bb7781fc3fb369b177e498a
|
b4a851c432b4ede182d88ea3636e79ec1ae4374e
|
/Práctico 3/Ejercicio 7/ClaseLista.py
|
70b5bdb7f101616c8edd47df173946311b372e9f
|
[] |
no_license
|
mamasanet/C-digos
|
cab2a305f1c81ce717cc8f2cf586cec85418e0bc
|
3926cdf7af7e9fb6673b8f088d1f087444d52d14
|
refs/heads/master
| 2022-11-10T15:30:07.616139 | 2020-07-03T00:21:47 | 2020-07-03T00:21:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,715 |
py
|
from ClaseDocenteInvestigador import DocenteInvestigador
from ClaseInvestigador import Investigador
from archivodeinterface import inter
from ClasePersonal import Personal
from ClaseDocente import Docente
from ClasePersonaldeApoyo import PersonaldeApoyo
from ClaseNodo import Nodo
import zope
@zope.interface.implementer(inter)
class Lista:
__comienzo = None
__actual = None
__tope = 0
__indice = 0
def __init__(self):
self.__comienzo = None
self.__actual = None
def __iter__(self):
return self
def __next__(self):
if self.__indice == self.__tope:
self.__actual = self.__comienzo
self.__indice = 0
raise StopIteration
else:
self.__indice += 1
dato = self.__actual.getDato()
self.__actual = self.__actual.getSiguiente()
return dato
def agregarElemento(self, dato):
nodo = Nodo(dato)
nodo.setSiguiente(self.__comienzo)
self.__comienzo = nodo
self.__actual = nodo
self.__tope += 1
def insertarElemento(self, elemento, posicion):
if posicion == 0:
self.agregarElemento(elemento)
else:
aux = self.__comienzo
i = 0
elemento = Nodo(elemento)
while i < posicion and aux != None:
anterior = aux
aux = aux.getSiguiente()
i += 1
if i > posicion:
raise IndexError
else:
elemento.setSiguiente(aux)
anterior.setSiguiente(elemento)
self.__tope += 1
def mostrarElemento(self, posicion):
i = 0
actual = self.__comienzo
encontrado = None
while actual != None and encontrado == None:
if i == posicion:
auto = actual.getDato()
encontrado = auto
else:
actual = actual.getSiguiente()
i += 1
return encontrado
def validacategoria(self, categoria):
categorias = ['I', 'II', 'III', 'IV', 'V']
encontrada = None
i = 0
while i < len(categorias) and encontrada == None:
if categorias[i] == categoria:
encontrada = categorias[i]
i = len(categorias)
else:
i += 1
return encontrada
def validacarrera(self, carrera):
actual = self.__comienzo
band = False
while actual != None and band == False:
personal = actual.getDato()
if carrera == personal.getCarrera():
band = True
else:
actual = actual.getSiguiente()
return band
def item4(self):
band = False
while not band:
carrera = input('Ingrese nombre de carrera: ').capitalize()
if self.validacarrera(carrera) == True:
band = True
else:
print('ERROR, nombre de carrera incorrecta.')
listadocenteinvestiga = [] #creo una lista para ordenar los objetos de la clase docente investigador
actual = self.__comienzo
while actual != None:
personal = actual.getDato()
if personal.getCarrera() == carrera:
if isinstance(personal, DocenteInvestigador):
listadocenteinvestiga.append(personal) #agrego los docentes investigadores
actual = actual.getSiguiente()
print()
sorted(listadocenteinvestiga)
for doc in listadocenteinvestiga:
print(doc)
def validaarea(self, area):
band = False
actual = self.__comienzo
while actual != None and band == False:
personal = actual.getDato()
if isinstance(personal, Investigador) or isinstance(personal, DocenteInvestigador):
if area == personal.getArea():
band = True
else:
actual = actual.getSiguiente()
else:
actual = actual.getSiguiente()
return band
def item5(self):
band = False
while not band:
areadeinvestigacion = input('Ingrese área de investigación: ').capitalize()
if self.validaarea(areadeinvestigacion) == True:
band = True
else:
print('ERROR, área de investigación incorrecta.')
actual = self.__comienzo
cont_investigador = 0
cont_docinvestigador = 0
while actual != None:
personal = actual.getDato()
if isinstance(personal, DocenteInvestigador) or isinstance(personal, Investigador):
if areadeinvestigacion == personal.getArea():
if isinstance(personal, DocenteInvestigador):
cont_docinvestigador += 1
elif isinstance(personal, Investigador):
cont_investigador += 1
actual = actual.getSiguiente()
else:
actual = actual.getSiguiente()
print('\nEn el área de investigacion %s trabajan %s investigador/es y %s Docente/s Investigador/es.\n' % (areadeinvestigacion, cont_investigador, cont_docinvestigador))
def item6(self):
lista = []
actual = self.__comienzo
while actual != None:
personal = actual.getDato()
tipo = ''
if type(personal) == DocenteInvestigador:
tipo = 'Docente Investigador'
elif isinstance(personal, PersonaldeApoyo):
tipo = 'Personal de Apoyo'
elif isinstance(personal, Investigador):
tipo = 'Investigador'
elif isinstance(personal, Docente):
tipo = 'Docente'
listapersonal = [personal.getNombre(), personal.getApellido(), tipo, personal.getSueldo()]
lista.append(listapersonal)
actual = actual.getSiguiente()
lista.sort(key = lambda x:x[1], reverse = False)
print(' Nombre Apellido Tipo de Agente Sueldo')
for per in lista:
print(per)
print()
def item7(self):
bande = False
while not bande:
print('=== CATEGORÍAS: I | II | III | IV | V ===')
categoria = input('Ingrese categoría(i = I | v = V): ').upper()
encontrada = self.validacategoria(categoria)
if encontrada != None:
categoria = encontrada
bande = True
else:
print('ERROR, categoría incorrecta.')
print()
acum_importe = 0
actual = self.__comienzo
while actual != None:
personal = actual.getDato()
if isinstance(personal, DocenteInvestigador):
if categoria == personal.getCategoria():
print('Apellido: %s | Nombre: %s | Importe extra por Docencia e Investigación: $%s' % (personal.getApellido(), personal.getNombre(), personal.getImporteextra()))
acum_importe += personal.getImporteextra()
actual = actual.getSiguiente()
print('Importe total a pagar por el extra de Docencia e Investigación es: $%s.\n' % (acum_importe))
def toJSON(self):
listapersonal = []
for a in self:
listapersonal.append(a.toJSON())
d = dict(__class__ = self.__class__.__name__, personal = listapersonal)
return d
def mostrar(self):
for personal in self:
print(personal.mostrar())
def __len__(self):
return self.__tope
|
[
"ismafacultad19@hotmail.com"
] |
ismafacultad19@hotmail.com
|
e1a9d6f442faf4956bc0976a8a9b690feb0e4d1d
|
ed278bcbd85f11da80b67fed5dd3b68f27dbaa40
|
/landing/models.py
|
fa83d8c6ac4edcb082f7f4948c1d6c2b7ea6f4eb
|
[] |
no_license
|
DenisAmberCode/aromatic
|
6f60f2fef2ead1c536cc88c3e9ea62efa5d5ba1e
|
0ad2ddd00224b7e7bf8aeec75c87754b642c9a62
|
refs/heads/master
| 2022-12-26T10:36:20.329473 | 2020-10-07T16:48:27 | 2020-10-07T16:48:27 | 244,430,835 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
from django.db import models
class Sub(models.Model):
email = models.EmailField()
name = models.CharField(max_length=256)
def __str__(self):
return "Name: %s Email: %s" % (self.name, self.email)
class Meta:
verbose_name = "Sub"
verbose_name_plural = "Subs"
|
[
"gladkiy-denis@mail.ru"
] |
gladkiy-denis@mail.ru
|
5851e4cb051a236ff338f53081a59497afd9ad72
|
b6f4ba6de41d40b91023405e0549c0db53907af1
|
/projects/migrations/0007_newsitem.py
|
7706fbbaea0c7eeaba626a0bef581401bc879ab2
|
[] |
no_license
|
arevej/Mitarbet
|
06c2fe6b030d7a1c4797b9b787cdff118c3dbdfd
|
e81d33ecccf483f44431afc1c3345f818917705d
|
refs/heads/master
| 2021-06-17T19:30:14.078127 | 2017-06-13T21:37:44 | 2017-06-13T21:42:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 931 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-12 11:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0006_file'),
]
operations = [
migrations.CreateModel(
name='NewsItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_date', models.DateField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"tim@Tims-MacBook-Air.local"
] |
tim@Tims-MacBook-Air.local
|
765d1068276256da66e9fe796de3ddbddcd08b3f
|
c6f5411c12c34b1150add156805cd1a8c95c9741
|
/task_dm1.py
|
4034f835b00d04eb69ba334432667e3598521bf5
|
[] |
no_license
|
khorkhordin/khorkhordin
|
d1550939771266e803d9a3cf4265c3c23d457c07
|
627895e4c1cd65d01bc2696071a454333387103b
|
refs/heads/master
| 2021-01-17T19:52:09.086848 | 2016-07-22T16:37:41 | 2016-07-22T16:37:41 | 62,707,513 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
from drawman import *
import time
A = [(0,0),(100,0),(100,100),(0,100),(0,0)]
pen_down()
for x,y in A:
to_point(x,y)
pen_up()
time.sleep(5)
|
[
"vashor@mail.ru"
] |
vashor@mail.ru
|
2e113de7cf196160d26f6a11e759efb0b1d9c64c
|
523b984f35957ea9e90397d0f39df68379fea3f3
|
/tensorflow_study/basic_study/Session.py
|
77ac0f586eb178e015d92b1c88885f5f7b653122
|
[] |
no_license
|
Alxemade/PycharmProjects
|
f3219b9b188b01d0074c7aab04917758914ebd73
|
0c3a7ea3a83fcfa2ece0383408a1166f7d122335
|
refs/heads/master
| 2020-03-10T05:02:28.128432 | 2018-04-12T07:09:17 | 2018-04-12T07:09:17 | 129,207,595 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 510 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "xuchao"
'test session'
import tensorflow as tf
a = tf.constant([[2, 2]])
b = tf.constant([[3], [3]])
c = tf.matmul(a, b) # 输出两个数的矩阵乘法
d = tf.multiply(a, b) # 这个是矩阵的向量积
# 1 . 第一种写法,需要自己关闭close
sess = tf.Session()
print(sess.run(c))
sess.close()
# 2. 第二种写法,不需要自己close
with tf.Session() as sess:
print(sess.run(c)) # 这种写法不需要自己关闭close
|
[
"chaoxu06@mail.ustc.edu.cn"
] |
chaoxu06@mail.ustc.edu.cn
|
6c4327226a11b220c2c2885b44266a2e04c37d1a
|
4b24899e5e424d4d953670cf9d9bbd3aa888fca1
|
/deprecated-function/main.py
|
f85872075116487b6c804fc35929985703645545
|
[
"Apache-2.0"
] |
permissive
|
meelement/NLP-Dataset
|
ed180b05c18f7675865caa304d70a024269a510d
|
fd0000021c6a8ac378f8e65f975841f0cbb3cb79
|
refs/heads/master
| 2021-09-15T19:18:33.844672 | 2018-06-09T03:12:17 | 2018-06-09T03:12:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,893 |
py
|
import json
import pandas as pd
def texttolist(fromfile):
fo = open(fromfile, "r")
result = fo.read().split('\n')
result = map(str.strip, result)
result = filter(None, result)
return result
def jsontotext(lists, filename):
texts = []
for k in lists:
x = json.loads(k)
texts.append(x['text'])
fo = open(filename, "wb")
for i in xrange(len(texts)):
texts[i] = texts[i].encode('ascii', 'ignore').decode('ascii')
fo.write(texts[i])
fo.write("\n")
fo.close()
def filterlist(lists):
for i in reversed(xrange(len(lists))):
strings = lists[i].split()
for x in reversed(xrange(len(strings))):
if strings[x].find('@') >= 0 or strings[x].find('http') >= 0:
del strings[x]
lists[i] = ' '.join(strings)
if len(lists[i]) <= 3:
del lists[i]
return lists
def listtotext(lists, filename):
fo = open(filename, "wb")
lists = filterlist(lists)
for i in xrange(len(lists)):
fo.write(lists[i])
fo.write("\n")
fo.close()
def readcsv(filename):
dataset = pd.read_csv(filename)
negative = []
positive = []
for i in xrange(dataset.shape[0]):
if (i + 1) % 10000 == 0:
print 'done process ' + str(i + 1)
try:
string = ' '.join(dataset.ix[i][3].split())
if dataset.ix[i][1] == 0:
negative.append(string)
else:
positive.append(string)
except:
continue
listtotext(negative, 'negativetweet')
listtotext(positive, 'positivetweet')
jsontotext(texttolist('negative_tweets.json'), 'negative')
listtotext(texttolist('negative'), 'negative')
jsontotext(texttolist('positive_tweets.json'), 'positive')
listtotext(texttolist('positive'), 'positive')
readcsv('Sentiment Analysis Dataset.csv')
|
[
"husein.zol05@gmail.com"
] |
husein.zol05@gmail.com
|
30e30927aa9371dc9d4c55b636e753e30bacc84a
|
2696bd485fd09f8b0199f98972163e1140793fd1
|
/ems/eventhook.py
|
52f8e994bf70adb0d1add1f89cf7a910a1e67591
|
[
"MIT"
] |
permissive
|
mtils/ems
|
24b192faf1d03f78cb9c930193051666a453d18b
|
a958177d1474828e1d892dda20f4be68869e0483
|
refs/heads/master
| 2020-05-30T04:37:52.866679 | 2016-10-04T07:30:42 | 2016-10-04T07:30:42 | 30,531,077 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,463 |
py
|
class EventHook(object):
"""A 'event' Transmitter
You can hook into it with hook += myReceiver (callable)
then hook.fire() will call myReceiver()
(or hook += myobj.onFoo => hook.fire(bar) will call myobj.onFoo(bar))
Normal usage would be:
class DB(object):
def __init__(self):
self.recordCreated = EventHook()
def create(self, entry):
//...code
self.recordCreated.fire(entry)
class DebugPrinter(object):
def printCreatedEntry(self, entry):
db = DB()
dp = DebugPrinter()
db.recordCreated += dp.printCreatedEntry
"""
def __init__(self):
self.__receivers = []
self.fireBlocked = False
self.wasFired = False
self._store = None
def __iadd__(self, handler):
"""Adds a receiver to this EventHook
args:
handler A callable which will be called on fire
:returns: EventHook
"""
self.__receivers.append(handler)
return self
def __isub__(self, handler):
"""Removes a receiver from this EventHook
args:
handler The callable which was previous assigned
:returns: EventHook
"""
self.__receivers.remove(handler)
return self
def fire(self, *args, **keywargs):
"""Fires a 'event'. Not really, it calls every assigned callable
If some callable returns true, it will stop Propagation
:returns: void
"""
if self.fireBlocked:
return
self.wasFired = True
for handler in self.__receivers:
result = handler(*args, **keywargs)
if result:
return
def __call__(self, *args, **keywargs):
"""Alias for fire(). The main purpose of this method is to allow
chaining of events. So like
instance.hook += my_callable
you can write
instance.hook += my_object.hook
Than the event of instance will be fired if my_object.hook is fired
:rtype: void
"""
return self.fire(*args, **keywargs)
def clearOfType(self, receiverObj):
"""Removes all receivers of the class of the class
ob the passed method
:returns EventHook
"""
deleteLater = set()
for knownReceiver in self.__receivers:
if knownReceiver.im_self == receiverObj:
deleteLater.add(knownReceiver)
for knownReceiver in deleteLater:
self -= knownReceiver
return self
def clear(self):
"""Clears all receivers
:returns: EventHook
"""
self.__receivers = []
return self
def get(self):
"""Get a value from store to reduce the if value changed lines in using objects
Simply set the value via self.loaded.set(True) and self.loaded.get()
to let Eventhook do the work
"""
return self._store
def set(self, value):
"""Set a value to a store to reduce the if value changed lines in using objects
Simply set the value via self.loaded.set(True) and self.loaded.get()
to let Eventhook do the work
"""
if self._store == value:
return
self._store = value
self.fire(value)
def __len__(self):
return len(self.__receivers)
|
[
"mtils@web-utils.de"
] |
mtils@web-utils.de
|
4563342b904cd73111c83ce1d6be56435505a36b
|
646c0dacee016463f3b3cb058ed566dab91a9bfa
|
/mypython/bin/easy_install
|
0abe67ed7f6caabade44b35e5bf0d9fd6a4261f8
|
[] |
no_license
|
brosnans/django-blog
|
3ea79831209500500b4f52fa4e40c4e5ca560c94
|
ddb1b83a336b7ccc14131fb3201359c2a12d8426
|
refs/heads/master
| 2022-08-12T23:36:07.294870 | 2020-05-23T11:18:34 | 2020-05-23T11:18:34 | 260,501,706 | 0 | 0 | null | 2020-05-01T17:52:29 | 2020-05-01T16:07:06 |
Python
|
UTF-8
|
Python
| false | false | 256 |
#!/workspace/django-blog/mypython/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"s.brosnan3@gmail.com"
] |
s.brosnan3@gmail.com
|
|
30779aacb4a3ba1a363f80eaab68a5b57e15dda2
|
0f2172763e165e295d3913e8ce1c5738f8c7de58
|
/products/views.py
|
f3d5bc5cf57c19d88923a7acdfebb96d825772e6
|
[] |
no_license
|
Yogeshc685/learning
|
fd132634f726bb8a401c91e7d09fcaffcc6eea05
|
1bcef1df43226a065fbf8fcb7ba8d9864c70b1fb
|
refs/heads/master
| 2020-09-07T03:45:55.992836 | 2019-11-09T13:37:27 | 2019-11-09T13:37:27 | 220,646,494 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 796 |
py
|
from django.shortcuts import render, get_object_or_404
from .models import Category, Product
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = Product.objects.filter(category=category)
context = {
'category': category,
'categories': categories,
'products': products
}
return render(request, 'product/index.html', context)
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
context = {
'product': product
}
return render(request, 'detail.html', context)
|
[
"Yogesh.Chauhan@legitquest.com"
] |
Yogesh.Chauhan@legitquest.com
|
3424ef5fc1269c9b3b38288b5799630237e459a4
|
3add20877ed753be70402e97f40ad3737a265515
|
/ladder_advanced/3_data_structure_II/363. Trapping Rain Water.py
|
c056979a1dd41625c462deb488ea6fa16cc5834f
|
[] |
no_license
|
zhuohuwu0603/interview-algothims
|
85f48b7de2e87129fd353528b114cb80c8877d7b
|
338d3bc2f2916c5c4936767b07b2fd22b4121049
|
refs/heads/master
| 2022-01-08T11:27:16.441367 | 2019-06-12T05:49:25 | 2019-06-12T05:49:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,039 |
py
|
'''Description
Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.
Example
Given [0,1,0,2,1,0,1,3,2,1,2,1], return 6.
Challenge
O(n) time and O(1) memory
O(n) time and O(n) memory is also acceptable.
'''
class Solution:
# @param heights: a list of integers
# @return: a integer
def trapRainWater(self, heights):
# write your code here
if not heights:
return 0
left, right = 0, len(heights) - 1
leftheight, rightheight = heights[left], heights[right]
res = 0
while left < right:
if leftheight < rightheight:
left += 1
if leftheight > heights[left]:
res += leftheight - heights[left]
else:
leftheight = heights[left]
else:
right -= 1
if rightheight > heights[right]:
res += rightheight - heights[right]
else:
rightheight = heights[right]
return res
'''Summary
算法武器:左右相向型双指针
本题需要首先分析实验现象,然后归纳出解决问题的逻辑方法,然后着手编程。
我们通过实验发现,如果从左往右灌水,只有下一个柱子的高度比左边当前最大柱子高度小的时候,我们才能灌水。同理右边也是如此。
接下来的问题是怎么灌水?从哪个方向?从左边灌水?从右边灌水?一起灌水?
我们通过分析发现我们必须从左右两边中高度最小的一边开始灌水,这样我们保证水不会漏掉。如果出现漏水现象,那么是因为水往高度更低的地方留了,所以我们必须从左右两边边界柱子最低的位置开始。
因为柱子的高度不一,所以我们左右两边边界柱子的高度是在不断变化的,所以我们需要随时更新边界柱子高度。
'''
|
[
"michaelz@squareup.com"
] |
michaelz@squareup.com
|
57ad0d706feb3ca8ef3d596147a296f192ee8675
|
11c2f99b74a0f1d8e1f36902bb2f290207c28fff
|
/asciiArt.py
|
3ee522d0150d72fd1487613b66d99dda112c552a
|
[] |
no_license
|
endlessseal/Python
|
0a801d10413564918e36da8cfe86ae2ce8c79b69
|
5320cdd4dcfa9cb39564b8e14e0c040a8d16db84
|
refs/heads/master
| 2021-01-10T12:35:47.697942 | 2020-03-13T14:48:17 | 2020-03-13T14:48:17 | 43,763,664 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,335 |
py
|
import numpy as np
from PIL import ImageFilter, Image, ImageDraw
#Converts an image to ascii art
#version 1 #2 second on average edge calculation a bit off
#due to rounding
def AsciiMe(fileName, cols= 128, scale = 1 ):
gscale = '@%#*+=-:. '
image = Image.open(fileName).convert('L').filter(ImageFilter.EDGE_ENHANCE_MORE)
max_width, max_height = image.size
w = max_width/cols
h = w/scale
rows = int(max_height/h)
newimg = []
for row in range(rows):
y1,y2 = (np.arange(row,row+2)*h).astype(int)
newimg.append("")
for col in range(cols):
x1,x2 = (np.arange(col,col+2)*w).astype(int)
newimg[row] += gscale[int((np.average(image.crop((x1, y1, x2, y2)))*9)/255)]
return '\n'.join(newimg)
#version 2 0.1 second on average
def AsciiMe2(fileName,size=(512,512)):
gscale = '@%#*+=-:. '
image = Image.open(fileName).convert('L').filter(ImageFilter.EDGE_ENHANCE_MORE)
image.thumbnail(size, Image.ANTIALIAS)
normalizer = np.vectorize(lambda t: gscale[int((t * 9)/255)])
n = normalizer(image)
return '\n'.join([''.join(x) for x in n])
txt = AsciiMe2('Capture.PNG')
r = txt.split('\n')
x, y = len(r[0]),len(r)
img = Image.new('RGB', (x*8, y*16))
d = ImageDraw.Draw(img)
d.text((0, 0), txt, fill=(255, 255, 255))
img.save('test.png')
|
[
"noreply@github.com"
] |
endlessseal.noreply@github.com
|
93075674c5021ced9980234d2d3c9c6d500f90a4
|
a3de78decfdd973a560654f0a0696048ab8dfa21
|
/basicus/urls.py
|
3782a4418e520d92fdf9d35782810da21f9c316d
|
[
"MIT"
] |
permissive
|
FiaDot/basicus
|
ed56e656ff58fcaedc55b9135bbb6a001f6d8c99
|
72faca3c455bcce7e0f482867528948791644fcb
|
refs/heads/master
| 2020-04-04T12:56:49.743343 | 2014-01-23T09:19:51 | 2014-01-23T09:19:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 505 |
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'basicus.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^board/$', 'board.views.board'),
url(r'^board/(\d+)/$', 'board.views.article'),
url(r'^board/write/$', 'board.views.write'),
url(r'^board/comment/(\d+)/$', 'board.views.comment'),
)
|
[
"fiadot@gmail.com"
] |
fiadot@gmail.com
|
197a5a0b7dd44984fa0f6475e28264d5b7db156f
|
004c3753b810cd36290c586c3766f1c77f7249f0
|
/sbs_paie/models/prime_ret.py
|
b9d91bcbc507f2283f40e3951d936e1673deb060
|
[] |
no_license
|
OdooAkeno/akeno
|
2426ec6045f5875873622118ce889e06b6e4c3c7
|
2c603c9f4cc5a719502526a0c070930671d12975
|
refs/heads/master
| 2022-04-29T01:42:37.716084 | 2021-06-29T14:20:28 | 2021-06-29T14:20:28 | 156,714,618 | 2 | 10 | null | 2021-06-29T14:20:29 | 2018-11-08T13:55:22 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,166 |
py
|
# -*- coding: utf-8 -*-
from odoo import fields, models, _
HELP_TYPE_CONTRAT = _(u"""Cochez cette case si les ecritures comptables"""
u"""doivent etre générés pour ce type de contrat""")
class PrimeRet(models.Model):
_name = 'sbs_paie.prime_ret'
regle_salariale = fields.Many2one(
string='Regle salariale',
required=True,
readonly=False,
help=u"Regle salariale lié",
comodel_name='hr.salary.rule')
code = fields.Char(
string='Code',
readonly=True,
related="regle_salariale.code")
name = fields.Char(
string='Name',
required=False,
size=50)
contrats = fields.Many2many(
string='Contrats',
readonly=True,
help="contrats utilisant cette prime/retenue",
comodel_name='hr.contract',
relation='model_primeret_to_contrat')
is_prime = fields.Boolean(
string='Est une prime ?',
default=True,
help="Décochez cette case si c'est une retenue")
montant = fields.Float(
string='Montant',
required=True,
default=0.0,
digits=(16, 2))
|
[
"fouda.henri2009@gmail.com"
] |
fouda.henri2009@gmail.com
|
5b69d9387984124354530ab6af87f68e913eb53c
|
43de73816f972863de20b15a7410f4008f45049e
|
/Worksheet 3 Iteration/parts.py
|
f6bf41d44573903a7eb8f494874163f6d84f9fb5
|
[] |
no_license
|
wkelly1/SchoolCode
|
eb0bb539544c07ebbafaca9d25a5adce2cbd6a50
|
76de56c369278f3c809adf5e0b3ada1198478857
|
refs/heads/master
| 2022-03-25T00:20:39.427923 | 2019-12-05T18:03:51 | 2019-12-05T18:03:51 | 105,812,754 | 0 | 1 | null | 2019-12-05T18:03:53 | 2017-10-04T20:03:35 |
Python
|
UTF-8
|
Python
| false | false | 412 |
py
|
i = True
noOldParts = 0
while i == True:
number = input("Please input a part number")
if len(number) != 4:
print("Input the number again, wrong length")
elif number == "9999":
print("9999 is too high")
i = False
elif number[3] == "6" or "7" or "8":
noOldParts = noOldParts + 1
else:
partNo = partNo + 1
print("number of old parts = ", noOldParts)
|
[
"william.kelly20@btinternet.com"
] |
william.kelly20@btinternet.com
|
6b8ee59736e57e08a3e475f65a52bf453ec8b9cc
|
e158fc0f4427aaf919f0ab2402ab0f404805cb94
|
/postgres/datadog_checks/postgres/statements.py
|
a4a9c39af7c5ea2303427bd718ed21d4d9035ec8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
shlomi-viz/integrations-core
|
6e76603dc9685160a72d50b18fb04bff8fb51f28
|
c54944bacf940077efdff4a69351e5fe6e408a80
|
refs/heads/master
| 2023-04-25T12:34:14.281959 | 2021-05-21T20:33:36 | 2021-05-21T20:33:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,445 |
py
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import unicode_literals
import copy
import time
import psycopg2
import psycopg2.extras
from datadog_checks.base.log import get_check_logger
from datadog_checks.base.utils.db.sql import compute_sql_signature
from datadog_checks.base.utils.db.statement_metrics import StatementMetrics
from datadog_checks.base.utils.db.utils import default_json_event_encoding, resolve_db_host
from datadog_checks.base.utils.serialization import json
try:
import datadog_agent
except ImportError:
from ..stubs import datadog_agent
STATEMENTS_QUERY = """
SELECT {cols}
FROM {pg_stat_statements_view} as pg_stat_statements
LEFT JOIN pg_roles
ON pg_stat_statements.userid = pg_roles.oid
LEFT JOIN pg_database
ON pg_stat_statements.dbid = pg_database.oid
WHERE query != '<insufficient privilege>'
AND query NOT LIKE 'EXPLAIN %%'
{filters}
LIMIT {limit}
"""
DEFAULT_STATEMENTS_LIMIT = 10000
# Required columns for the check to run
PG_STAT_STATEMENTS_REQUIRED_COLUMNS = frozenset({'calls', 'query', 'rows'})
PG_STAT_STATEMENTS_METRICS_COLUMNS = frozenset(
{
'calls',
'rows',
'total_time',
'total_exec_time',
'shared_blks_hit',
'shared_blks_read',
'shared_blks_dirtied',
'shared_blks_written',
'local_blks_hit',
'local_blks_read',
'local_blks_dirtied',
'local_blks_written',
'temp_blks_read',
'temp_blks_written',
}
)
PG_STAT_STATEMENTS_TAG_COLUMNS = frozenset(
{
'datname',
'rolname',
'query',
}
)
PG_STAT_STATEMENTS_OPTIONAL_COLUMNS = frozenset({'queryid'})
PG_STAT_ALL_DESIRED_COLUMNS = (
PG_STAT_STATEMENTS_METRICS_COLUMNS | PG_STAT_STATEMENTS_TAG_COLUMNS | PG_STAT_STATEMENTS_OPTIONAL_COLUMNS
)
class PostgresStatementMetrics(object):
"""Collects telemetry for SQL statements"""
def __init__(self, check, config):
self._check = check
self._config = config
self._db_hostname = None
self._log = get_check_logger()
self._state = StatementMetrics()
self._stat_column_cache = []
def _execute_query(self, cursor, query, params=()):
try:
self._log.debug("Running query [%s] %s", query, params)
cursor.execute(query, params)
return cursor.fetchall()
except (psycopg2.ProgrammingError, psycopg2.errors.QueryCanceled) as e:
# A failed query could've derived from incorrect columns within the cache. It's a rare edge case,
# but the next time the query is run, it will retrieve the correct columns.
self._stat_column_cache = []
self._log.warning('Statement-level metrics are unavailable: %s', e)
return []
def _get_pg_stat_statements_columns(self, db):
"""
Load the list of the columns available under the `pg_stat_statements` table. This must be queried because
version is not a reliable way to determine the available columns on `pg_stat_statements`. The database can
be upgraded without upgrading extensions, even when the extension is included by default.
"""
if self._stat_column_cache:
return self._stat_column_cache
# Querying over '*' with limit 0 allows fetching only the column names from the cursor without data
query = STATEMENTS_QUERY.format(
cols='*', pg_stat_statements_view=self._config.pg_stat_statements_view, limit=0, filters=""
)
cursor = db.cursor()
self._execute_query(cursor, query, params=(self._config.dbname,))
col_names = [desc[0] for desc in cursor.description] if cursor.description else []
self._stat_column_cache = col_names
return col_names
def _db_hostname_cached(self):
if self._db_hostname:
return self._db_hostname
self._db_hostname = resolve_db_host(self._config.host)
return self._db_hostname
def collect_per_statement_metrics(self, db, db_version, tags):
try:
rows = self._collect_metrics_rows(db)
if not rows:
return
payload = {
'host': self._db_hostname_cached(),
'timestamp': time.time() * 1000,
'min_collection_interval': self._config.min_collection_interval,
'tags': tags,
'postgres_rows': rows,
'postgres_version': 'v{major}.{minor}.{patch}'.format(
major=db_version.major, minor=db_version.minor, patch=db_version.patch
),
}
self._check.database_monitoring_query_metrics(json.dumps(payload, default=default_json_event_encoding))
except Exception:
db.rollback()
self._log.exception('Unable to collect statement metrics due to an error')
return []
def _load_pg_stat_statements(self, db):
available_columns = set(self._get_pg_stat_statements_columns(db))
missing_columns = PG_STAT_STATEMENTS_REQUIRED_COLUMNS - available_columns
if len(missing_columns) > 0:
self._log.warning(
'Unable to collect statement metrics because required fields are unavailable: %s',
', '.join(list(missing_columns)),
)
return []
query_columns = sorted(list(available_columns & PG_STAT_ALL_DESIRED_COLUMNS))
params = ()
filters = ""
if self._config.dbstrict:
filters = "AND pg_database.datname = %s"
params = (self._config.dbname,)
return self._execute_query(
db.cursor(cursor_factory=psycopg2.extras.DictCursor),
STATEMENTS_QUERY.format(
cols=', '.join(query_columns),
pg_stat_statements_view=self._config.pg_stat_statements_view,
filters=filters,
limit=DEFAULT_STATEMENTS_LIMIT,
),
params=params,
)
def _collect_metrics_rows(self, db):
rows = self._load_pg_stat_statements(db)
def row_keyfunc(row):
return (row['query_signature'], row['datname'], row['rolname'])
rows = self._normalize_queries(rows)
if not rows:
return []
available_columns = set(rows[0].keys())
metric_columns = available_columns & PG_STAT_STATEMENTS_METRICS_COLUMNS
rows = self._state.compute_derivative_rows(rows, metric_columns, key=row_keyfunc)
self._check.gauge('dd.postgres.queries.query_rows_raw', len(rows))
return rows
def _normalize_queries(self, rows):
normalized_rows = []
for row in rows:
normalized_row = dict(copy.copy(row))
try:
obfuscated_statement = datadog_agent.obfuscate_sql(row['query'])
except Exception as e:
# obfuscation errors are relatively common so only log them during debugging
self._log.debug("Failed to obfuscate query '%s': %s", row['query'], e)
continue
normalized_row['query'] = obfuscated_statement
normalized_row['query_signature'] = compute_sql_signature(obfuscated_statement)
normalized_rows.append(normalized_row)
return normalized_rows
|
[
"noreply@github.com"
] |
shlomi-viz.noreply@github.com
|
ec0c49006c20ac7edf04435bcbb47f47b528c888
|
daaf072e12df5c49f46d24304ff7b94d9d86dd44
|
/aa.py
|
9ffe45541341e8478a4ef3d0bafee024a45ace4f
|
[] |
no_license
|
raygolden/pythontest
|
fbdd4ad6030d5105e579bd1f82c33bc7e4072b76
|
2636992c572853f5b5516107b743aa962313661c
|
refs/heads/master
| 2020-05-18T20:40:11.188722 | 2015-03-27T00:50:33 | 2015-03-27T00:50:33 | 32,622,443 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright 漏 2014 ray.huang <ray.huang@ray-gii-rmbp>
#
# Distributed under terms of the MIT license.
"""
"""
import MySQLdb
conn = MySQLdb.connect( host = 'localhost', user = 'root', passwd = 'gree')
curs = conn.cursor()
conn.select_db('mysql')
result1 = curs.execute( 'show global status;')
result2 = curs.fetchall()
for i, j in result2:
print ' The status %s is: %s' % (i, j)
|
[
"Ray.huang@gree.net"
] |
Ray.huang@gree.net
|
aca7fa244db7e1aa8433fb9233c50dbc42d554ca
|
a3a3e1298db9555eda37f8da0c74a437d897cb1f
|
/compiled/Python3/Euler_Problem-079.py
|
74c47034017c8d8ec829b7d1935b24844f384663
|
[
"MIT"
] |
permissive
|
LStepanek/Project-Euler_Befunge
|
58f52254ee039ef6a5204fc65e62426c5e9d473a
|
f35fb2adecd737e410dee7b89b456cd61b25ce78
|
refs/heads/master
| 2021-01-01T17:51:52.413415 | 2017-05-03T17:23:01 | 2017-05-03T17:26:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,371 |
py
|
#!/usr/bin/env python3
# transpiled with BefunCompile v1.1.0 (c) 2015
import gzip, base64
_g = ("AR+LCAAAAAAABAC1UcFuwyAM/RV36S5F2WxYTEAR2mfsUKVHrpw49eNnQ5tW6y6rNEfB5snPzzYVnjSzGTgKwDMCyc9BvJW7RfBsBQ/NO5ofeTyDJ8nTXPEsPBKMGDuP"
+ "GLyjB553gkuuo+7br3U4gA1aq/fz2OcMblYNC/bivWC9ltQI+CtvFty1GXQW3HTdjB2zc+v1J+/ZfQ6b/Y33rN7XZv+o96SlaTIp0jHWFCV8J+OwaPT6cWA2xmJBixkF"
+ "zvkFX8Y4YWGuyj3HkcoClTJ6LEadHmxv8dRj7vECbai0B+8PazITZjJFyOJYjlqXW2d3YYG7ZZyPI8VWdWqlRXmh805RZRUIxjBDwQW8fBerJg2wHwAHCO0sGiwp6pA+"
+ "D+sJYsAcyYRV1Mq2H0XH3VBPMbbcIL0eeepBPuyEGWPtyQbwyltLa+MIGa2uqd+yuIllo7nd42VGBvvwLntMF7030PJJn8HenkE2zutN72qf+9M67MaMIRpaWiNepQpy"
+ "HIWYMzpZ2/IN4Pew4JgEAAA=")
g = base64.b64decode(_g)[1:]
for i in range(base64.b64decode(_g)[0]):
g = gzip.decompress(g)
g=list(g)
def gr(x,y):
if(x>=0 and y>=0 and x<56 and y<21):
return g[y*56 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<56 and y<21):
g[y*56 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(10,10,1)
sa(9)
return 1
def _1():
sa(sr());
sa(1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()-1);
sa(sr());
return 2
def _2():
return (1)if(sp()!=0)else(3)
def _3():
gw(3,0,5)
gw(2,0,48)
sp();
sa(49)
return 4
def _4():
sa(0)
sa(gr(gr(2,0),gr(3,0))-48)
gw(5,0,gr(gr(2,0),gr(3,0))-48)
sa(sp()+12);
sa(7)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(0)
sa(gr(gr(2,0)+1,gr(3,0))-48)
gw(6,0,gr(gr(2,0)+1,gr(3,0))-48)
sa(sp()+12);
sa(7)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(0)
sa(gr(gr(2,0)+2,gr(3,0))-48)
gw(7,0,gr(gr(2,0)+2,gr(3,0))-48)
sa(sp()+12);
sa(7)
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(gr(5,0)+1,gr(6,0)+1,2)
gw(gr(5,0)+1,gr(7,0)+1,2)
gw(gr(6,0)+1,gr(7,0)+1,2)
gw(gr(7,0)+1,gr(5,0)+1,0)
gw(gr(7,0)+1,gr(6,0)+1,0)
gw(gr(6,0)+1,gr(5,0)+1,0)
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return (18)if(sp()!=0)else(5)
def _5():
gw(9,0,0)
sp();
sa(9)
sa(gr(21,7))
return 6
def _6():
return (7)if(sp()!=0)else(17)
def _7():
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa((0)if(sp()!=0)else(1))
return (9)if(sp()!=0)else(8)
def _8():
sa(sr()+12)
sa(7)
v0=sp()
sa(gr(sp(),v0))
return 6
def _9():
sa(sp()+1);
return 10
def _10():
return (13)if(sr()-gr(9,0)==0)else(11)
def _11():
global t0
global t1
sa(sr());
sa(sr());
sa(sr()+12)
sa(9)
v0=sp()
sa(gr(sp(),v0))
sa(sp()+1);
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+11);
sa(9)
v0=sp()
sa(gr(sp(),v0))
sa(sp()+1);
v0=sp()
t0=gr(sp(),v0)
sa(sp()*t0);
t1=sp()
t1=(0)if(t1!=0)else(1)
return (9)if((t1)!=0)else(12)
def _12():
global t0
sa(sr());
sa(sr());
sa(sr()+12)
sa(9)
v0=sp()
t0=gr(sp(),v0)
gw(2,0,t0)
sa(sp()+11);
sa(9)
v0=sp()
sa(gr(sp(),v0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+12);
sa(9)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()-1);
sa(sr());
sa(gr(2,0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+12);
sa(9)
v0=sp()
v1=sp()
gw(v1,v0,sp())
return 10
def _13():
print(gr(12,9),end="",flush=True)
sp();
sa(1)
sa((0)if(1-gr(9,0)!=0)else(1))
return 14
def _14():
return (15)if(sp()!=0)else(16)
def _15():
sp();
return 19
def _16():
global t0
sa(sr()+12)
sa(9)
v0=sp()
t0=gr(sp(),v0)
print(t0,end="",flush=True)
sa(sp()+1);
sa((0)if(sr()-gr(9,0)!=0)else(1))
return 14
def _17():
sa(sr());
sa(gr(9,0))
gw(9,0,gr(9,0)+1)
sa(sp()+12);
sa(9)
v0=sp()
v1=sp()
gw(v1,v0,sp())
return 7
def _18():
global t0
t0=(td(sr(),10))+1
gw(3,0,t0)
t0=((tm(sr(),10))*4)+12
gw(2,0,t0)
return 4
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18]
c=0
while c<19:
c=m[c]()
|
[
"mailport@mikescher.de"
] |
mailport@mikescher.de
|
a0841b1a5b949fc825d4ef568892bd91e7b54aee
|
fd5edea96dcdc04a3092794439b605d29e3371f3
|
/python/supervised/make_cubic.py
|
215ad6b19100c3ebac2cff1e37bb2542016d9203
|
[] |
no_license
|
kristtuv/Thesis
|
bb2373d12a9319fadd76c843870705741d3bcfc6
|
2b7beee27aa7f0301d36f498a01532ca62c48742
|
refs/heads/master
| 2021-02-02T01:27:35.535805 | 2020-02-27T13:47:16 | 2020-02-27T13:47:16 | 243,525,211 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 259 |
py
|
from create_crystals_from_poscar import replicate_cell
# pipe = import_file()
# data = pipe.compute()
# export_file(data, 'cubic.data', 'lammps/data')
file_name = '1541503.cif'
dump_name = file_name.replace('.cif', '')
replicate_cell(file_name, dump_name)
|
[
"kristtuv@student.matnat.uio.no"
] |
kristtuv@student.matnat.uio.no
|
4c986677b74476c713e5b8104509068a990ef3d8
|
de072cdcb568fb8dfb487166a2579fbfc3610090
|
/socialenv/bin/misaka
|
8529ad21c79d093177fdda1561558f27308e39ec
|
[] |
no_license
|
corri-golden/BeSocial
|
75379964a32213be8b264ad94f945f39dc2c589d
|
f7b5f6f14ec129dc18c1cb2cd9988197b76fe39d
|
refs/heads/master
| 2023-01-03T10:38:03.791077 | 2020-11-02T15:20:38 | 2020-11-02T15:20:38 | 309,409,948 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,996 |
#!/Users/datduu.../workspace/python/besocial/besocialproject/socialenv/bin/python
import sys
from os import path
from misaka import *
from misaka.utils import extension_map, html_flag_map
help = '''\
Usage: misaka [--ext-<extension>...] [--html-<flag>...] [--smartypants] [<file>...]
Parser extensions:
{}
Render flags:
{}
Other options:
--smartypants
-h | --help
'''.format(
'\n'.join([' --ext-' + a for a in extension_map.keys()]),
'\n'.join([' --html-' + a for a in html_flag_map.keys()]))
if __name__ == '__main__':
args = sys.argv[1:]
files = []
flags = []
extensions = []
pants_enabled = False
for arg in args:
if arg in ('-h', '--help'):
print(help)
sys.exit(0)
elif arg == '--smartypants':
pants_enabled = True
elif arg.startswith('--ext-'):
arg = arg[6:]
if not arg in extension_map:
print('--ext-{0} is not a valid Markdown extension'.format(arg))
sys.exit(1)
extensions.append(arg)
elif arg.startswith('--html-'):
arg = arg[7:]
if not arg in html_flag_map:
print('--html-{0} is not a valid HTML render flag'.format(arg))
sys.exit(1)
flags.append(arg)
else:
# If it's not a extension or HTML flag,
# then it must be a file, right?
files.append(arg)
renderer = HtmlRenderer(flags)
if pants_enabled:
to_html = lambda n: smartypants(Markdown(renderer, extensions)(n))
else:
to_html = Markdown(renderer, extensions)
if files:
for fn in files:
fn = path.abspath(fn)
if not path.exists(fn):
print('Does not exist: %s' % fn)
else:
with open(fn, 'r') as fd:
source = fd.read()
print(to_html(source))
else:
print(to_html(sys.stdin.read()))
|
[
"corri.golden@gmail.com"
] |
corri.golden@gmail.com
|
|
5f27eb80c78136364d163e215ad2d3eb62a1377d
|
d576990621d928a20605705e5cc576800e59cb43
|
/users/models.py
|
1d3a7a149e55e15ba44dfe7088e5c4e616e5d9cc
|
[] |
no_license
|
zaldivards/blogging
|
8f5673886f7da0e87d6a669cac9471828bf978ab
|
3ca2f50c101e00cd6a616d90a9ddfbcc33494058
|
refs/heads/master
| 2022-10-11T05:17:39.958943 | 2020-06-13T05:13:51 | 2020-06-13T05:13:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from profiles.models import Profile
# Create your models here.
def post_save_user(sender, instance, created, *args, **kwargs):
if created:
try:
Profile.objects.create(user=instance)
except Exception:
raise
post_save.connect(post_save_user, sender=get_user_model())
|
[
"zh15002@ues.edu.sv"
] |
zh15002@ues.edu.sv
|
d001fc09b45ff9b5c443917aac2060acdde459e3
|
204ec78fcebcea9e1e1da4905cf3fad0a514b01f
|
/pyocd/target/builtin/target_LPC55S28Jxxxxx.py
|
821279940cd5a3a19581a9781d99ff1dc2693895
|
[
"Apache-2.0"
] |
permissive
|
ARMmbed/pyOCD
|
659340bf8753aa8e15a72890b8bea64dff2c2f42
|
d4cdcf7e532cae17caad866839287bbe1e0d952b
|
refs/heads/master
| 2023-05-31T13:45:15.797588 | 2020-10-12T13:55:47 | 2020-10-12T13:55:47 | 190,203,829 | 3 | 1 |
Apache-2.0
| 2019-07-05T11:05:40 | 2019-06-04T13:09:56 |
Python
|
UTF-8
|
Python
| false | false | 8,596 |
py
|
# pyOCD debugger
# Copyright (c) 2019-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..family.target_lpc5500 import LPC5500Family
from ...core.memory_map import (FlashRegion, RamRegion, RomRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO = {
'load_address' : 0x20000000,
# Flash algorithm as a hex string
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0xf240b580, 0xf2c00004, 0xf6420000, 0xf84961e0, 0xf2401000, 0xf2c52000, 0x21000000, 0x1080f8c0,
0x1084f8c0, 0x1180f8c0, 0x71fbf647, 0xf6406001, 0x21ff6004, 0x0000f2c5, 0x01def2cc, 0xf04f6001,
0x210240a0, 0xf2407001, 0xf2c00010, 0x44480000, 0xf874f000, 0xbf182800, 0xbd802001, 0x47702000,
0xf240b580, 0xf2c00010, 0xf2460000, 0x4448636c, 0xf6c62100, 0xf44f3365, 0xf0002200, 0x2800f87f,
0x2001bf18, 0xbf00bd80, 0xf020b580, 0xf2404170, 0xf2c00010, 0xf2460000, 0x4448636c, 0x3365f6c6,
0x4200f44f, 0xf86af000, 0xbf182800, 0xbd802001, 0x4614b570, 0x0441460d, 0x4670f020, 0xf240d10d,
0xf2c00010, 0xf2460000, 0x4448636c, 0xf6c64631, 0xf44f3365, 0xf0004200, 0xf240f851, 0xf2c00010,
0xf5b50000, 0xbf987f00, 0x7500f44f, 0x46314448, 0x462b4622, 0xf86af000, 0xbf182800, 0xbd702001,
0x460cb5b0, 0xf0204605, 0x46114070, 0xf0004622, 0x2800fa01, 0x4425bf08, 0xbdb04628, 0x460ab580,
0x4170f020, 0x0010f240, 0x0000f2c0, 0xf0004448, 0x2800f875, 0x2001bf18, 0x0000bd80, 0x02f4f241,
0x3200f2c1, 0x290068d1, 0x2360d00a, 0x78926283, 0xf2406849, 0xf2c0030c, 0xf8490300, 0x47082003,
0x40baf240, 0x0000f2c0, 0x41c7f240, 0x0100f2c0, 0x44794478, 0xf0002284, 0xbf00f98d, 0x0c0cf240,
0x0c00f2c0, 0xc00cf859, 0x0f02f1bc, 0xf244d104, 0xf2c11c3b, 0x47603c00, 0x1c00f241, 0x3c00f2c1,
0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c008, 0x406af240, 0x0000f2c0, 0x4177f240, 0x0100f2c0,
0x44794478, 0xf0002295, 0xbf00f965, 0x0c0cf240, 0x0c00f2c0, 0xc00cf859, 0x0f02f1bc, 0xf244d104,
0xf2c11c9d, 0x47603c00, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c00c,
0x401af240, 0x0000f2c0, 0x4127f240, 0x0100f2c0, 0x44794478, 0xf00022a5, 0xbf00f93d, 0x1300f241,
0x3300f2c1, 0x2b00681b, 0x691bd001, 0xf2404718, 0xf2c030ec, 0xf2400000, 0xf2c031f9, 0x44780100,
0x22ad4479, 0xf926f000, 0x0c0cf240, 0x0c00f2c0, 0xc00cf859, 0x0f02f1bc, 0xf244d104, 0xf2c12c7d,
0x47603c00, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c014, 0x309ef240,
0x0000f2c0, 0x31abf240, 0x0100f2c0, 0x44794478, 0xf00022c2, 0xbf00f8ff, 0x1300f241, 0x3300f2c1,
0x2b00681b, 0x699bd001, 0xf2404718, 0xf2c03070, 0xf2400000, 0xf2c0317d, 0x44780100, 0x22cb4479,
0xf8e8f000, 0x1100f241, 0x3100f2c1, 0x29006809, 0x6a89d001, 0xf2404708, 0xf2c03044, 0xf2400000,
0xf2c03151, 0x44780100, 0x22d54479, 0xf8d2f000, 0x1100f241, 0x3100f2c1, 0x29006809, 0x6ac9d001,
0xf2404708, 0xf2c03018, 0xf2400000, 0xf2c03125, 0x44780100, 0x22dc4479, 0xf8bcf000, 0x1300f241,
0x3300f2c1, 0x2b00681b, 0x6b1bd001, 0xf2404718, 0xf2c020ec, 0xf2400000, 0xf2c021f9, 0x44780100,
0x22e34479, 0xf8a6f000, 0x1200f241, 0x3200f2c1, 0x2a006812, 0x6b52d001, 0xf2404710, 0xf2c020c0,
0xf2400000, 0xf2c021cd, 0x44780100, 0x22ea4479, 0xf890f000, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc,
0x0f00f1bc, 0xf8dcd002, 0x4760c038, 0x208ef240, 0x0000f2c0, 0x219bf240, 0x0100f2c0, 0x44794478,
0xf00022f1, 0xbf00f877, 0x1200f241, 0x3200f2c1, 0x2a006812, 0x6bd2d001, 0xf2404710, 0xf2c02060,
0xf2400000, 0xf2c0216d, 0x44780100, 0x22f84479, 0xf860f000, 0x1200f241, 0x3200f2c1, 0x2a006812,
0x6c12d001, 0xf2404710, 0xf2c02034, 0xf2400000, 0xf2c02141, 0x44780100, 0x22ff4479, 0xf84af000,
0x1300f241, 0x3300f2c1, 0x2b00681b, 0x6c5bd001, 0xf2404718, 0xf2c02008, 0xf2400000, 0xf2c02115,
0x44780100, 0xf44f4479, 0xf0007283, 0xbf00f833, 0x1300f241, 0x3300f2c1, 0x2b00681b, 0x6c9bd001,
0xf2404718, 0xf2c010d8, 0xf2400000, 0xf2c011e5, 0x44780100, 0xf2404479, 0xf000120d, 0xbf00f81b,
0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c04c, 0x10a2f240, 0x0000f2c0,
0x11aff240, 0x0100f2c0, 0x44794478, 0x728af44f, 0xf800f000, 0x4605b50e, 0x460e4614, 0xf000a013,
0x4628f870, 0xf86df000, 0xf000a016, 0x4630f86a, 0xf867f000, 0xf000a015, 0x2100f864, 0x100bf88d,
0xf10d210a, 0xf88d000a, 0xe008100a, 0xf2f1fb94, 0x4212fb01, 0xf4f1fb94, 0xf8003230, 0x2c002d01,
0xf000dcf4, 0xf000f84e, 0x0000f841, 0x202a2a2a, 0x65737361, 0x6f697472, 0x6166206e, 0x64656c69,
0x0000203a, 0x6966202c, 0x0020656c, 0x696c202c, 0x0020656e, 0x0301ea40, 0x079bb510, 0x2a04d10f,
0xc810d30d, 0x1f12c908, 0xd0f8429c, 0xba19ba20, 0xd9014288, 0xbd102001, 0x30fff04f, 0xb11abd10,
0xd00307d3, 0xe0071c52, 0xbd102000, 0x3b01f810, 0x4b01f811, 0xd1071b1b, 0x3b01f810, 0x4b01f811,
0xd1011b1b, 0xd1f11e92, 0xbd104618, 0x2000b510, 0xf81ef000, 0x8000f3af, 0x4010e8bd, 0xf0002001,
0xb510b811, 0xe0024604, 0xf0001c64, 0x7820f804, 0xd1f92800, 0xb508bd10, 0xf88d4669, 0x20030000,
0xbd08beab, 0x20184901, 0xe7febeab, 0x00020026, 0xf000b510, 0xe8bdf80b, 0xf0004010, 0x4770b801,
0xd0012800, 0xbfeef7ff, 0x00004770, 0x2100b510, 0xf000a002, 0x2001f813, 0x0000bd10, 0x41474953,
0x3a545242, 0x6e624120, 0x616d726f, 0x6574206c, 0x6e696d72, 0x6f697461, 0x0000006e, 0x4605b570,
0x200a460c, 0x1c6de000, 0xffc5f7ff, 0x7828b135, 0xd1f82800, 0x1c64e002, 0xffbdf7ff, 0x7820b114,
0xd1f82800, 0x4070e8bd, 0xf7ff200a, 0x4c46bfb4, 0x5f485341, 0x5f495041, 0x45455254, 0x70616900,
0x73662f31, 0x61695f6c, 0x632e3170, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
],
# Relative function addresses
'pc_init': 0x20000021,
'pc_unInit': 0x2000007d,
'pc_program_page': 0x200000d1,
'pc_erase_sector': 0x200000a9,
'pc_eraseAll': 0x20000081,
'static_base' : 0x20000000 + 0x00000020 + 0x00000650,
'begin_stack' : 0x20000900,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x200,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001200], # Enable double buffering
'min_program_length' : 0x200,
# Flash information
'flash_start': 0x0,
'flash_size': 0x80000,
'sector_sizes': (
(0x0, 0x8000),
)
}
class LPC55S28(LPC5500Family):
MEMORY_MAP = MemoryMap(
FlashRegion(name='nsflash', start=0x00000000, length=0x00080000, access='rx',
blocksize=0x200,
is_boot_memory=True,
are_erased_sectors_readable=False,
algo=FLASH_ALGO),
RomRegion( name='nsrom', start=0x03000000, length=0x00020000, access='rx'),
RamRegion( name='nscoderam', start=0x04000000, length=0x00008000, access='rwx',
default=False),
FlashRegion(name='sflash', start=0x10000000, length=0x00080000, access='rx',
blocksize=0x200,
is_boot_memory=True,
are_erased_sectors_readable=False,
algo=FLASH_ALGO,
alias='nsflash'),
RomRegion( name='srom', start=0x13000000, length=0x00020000, access='srx',
alias='nsrom'),
RamRegion( name='scoderam', start=0x14000000, length=0x00008000, access='srwx',
alias='nscoderam',
default=False),
RamRegion( name='nsram', start=0x20000000, length=0x00030000, access='rwx'),
RamRegion( name='nsram4', start=0x20040000, length=0x00004000, access='rwx'),
RamRegion( name='sram', start=0x30000000, length=0x00030000, access='srwx',
alias='nsram'),
RamRegion( name='sram4', start=0x30040000, length=0x00004000, access='rwx',
alias='nsram4'),
)
def __init__(self, session):
super(LPC55S28, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("LPC55S28.xml")
|
[
"flit@me.com"
] |
flit@me.com
|
eccb82a49b4249ba854e0abde733606f08efaba1
|
d635f439116674f6ed5d8c7b3bbc6991026925eb
|
/scheduled_bots/scripts/fix_zfin_names.py
|
faf8890400ad7d41ac1080d8663d68788af0c994
|
[
"MIT"
] |
permissive
|
SuLab/scheduled-bots
|
141a73949e2b2e97738944ecb1a0fcd214af5d77
|
bc83f5e013fd18247805efc4bf1f8f948aef859e
|
refs/heads/main
| 2023-02-24T16:19:24.926230 | 2023-02-17T10:48:03 | 2023-02-17T10:48:03 | 74,090,171 | 7 | 13 |
MIT
| 2022-12-08T05:07:36 | 2016-11-18T03:04:06 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 4,359 |
py
|
"""
One off, hacky script to fix already created zebrafish genes with duplicate names
It finds all zebrafish genes with dupe names (from their mygene/entrezgene gene symbol), that already exist in wikipedia
and changes the label
In the future, zebrafish genes without a ZFIN ID will be skipped
"""
from scheduled_bots.geneprotein import GeneBot, ProteinBot, HelperBot
from scheduled_bots.geneprotein.GeneBot import WDPASS, WDUSER, organisms_info, PROPS
from scheduled_bots.geneprotein.ChromosomeBot import ChromosomeBot
from wikidataintegrator import wdi_helpers, wdi_login
from pymongo import MongoClient
from collections import Counter
from wikidataintegrator.wdi_helpers import id_mapper
def do_nothing(records):
for record in records:
yield record
def genes():
entrez_wd = id_mapper("P351")
login = wdi_login.WDLogin(user=WDUSER, pwd=WDPASS)
coll = MongoClient().wikidata_src.mygene
metadata_coll = MongoClient().wikidata_src.mygene_sources
metadata = metadata_coll.find_one()
organism_info = organisms_info[7955]
doc_filter = {'taxid': 7955, 'entrezgene': {'$exists': True}}
docs = coll.find(doc_filter).batch_size(20)
total = docs.count()
print("total number of records: {}".format(total))
docs = HelperBot.validate_docs(docs, 'eukaryotic', PROPS['Entrez Gene ID'])
records = HelperBot.tag_mygene_docs(docs, metadata)
records = list(records)
# find all names with dupes
dupe_names = {k for k,v in Counter([x['symbol']['@value'] for x in records]).items() if v>1}
# for all records that have one of these names, change the name to "name (entrezgene)"
records = [x for x in records if x['symbol']['@value'] in dupe_names]
for record in records:
record['symbol']['@value'] = record['symbol']['@value'] + " (" + str(record['entrezgene']['@value']) + ")"
# skip items that aren't already in wikidata (DONT CREATE NEW ITEMS!)
records = [x for x in records if str(x['entrezgene']['@value']) in entrez_wd]
print("len records: {}".format(len(records)))
cb = ChromosomeBot()
chr_num_wdid = cb.get_or_create(organism_info, login=login)
bot = GeneBot.ChromosomalGeneBot(organism_info, chr_num_wdid, login)
bot.filter = lambda x: iter(x)
bot.run(records, total=total, fast_run=True, write=True)
def proteins():
uni_wd = id_mapper("P352")
login = wdi_login.WDLogin(user=WDUSER, pwd=WDPASS)
coll = MongoClient().wikidata_src.mygene
metadata_coll = MongoClient().wikidata_src.mygene_sources
metadata = metadata_coll.find_one()
organism_info = organisms_info[7955]
doc_filter = {'taxid': 7955, 'uniprot': {'$exists': True}, 'entrezgene': {'$exists': True}}
docs = coll.find(doc_filter).batch_size(20)
total = docs.count()
print("total number of records: {}".format(total))
docs = HelperBot.validate_docs(docs, 'eukaryotic', PROPS['Entrez Gene ID'])
records = HelperBot.tag_mygene_docs(docs, metadata)
records = list(records)
for record in records:
if 'Swiss-Prot' in record['uniprot']['@value']:
record['uniprot_id'] = record['uniprot']['@value']['Swiss-Prot']
elif 'TrEMBL' in record['uniprot']['@value']:
record['uniprot_id'] = record['uniprot']['@value']['TrEMBL']
records = [x for x in records if 'uniprot_id' in x and isinstance(x['uniprot_id'], str)]
# find all names with dupes
dupe_names = {k for k,v in Counter([x['name']['@value'] for x in records]).items() if v>1}
# for all records that have one of these names, change the name to "name (uniprot)"
records = [x for x in records if x['name']['@value'] in dupe_names]
print("len dupe records: {}".format(len(records)))
for record in records:
record['name']['@value'] = record['name']['@value'] + " (" + record['uniprot_id'] + ")"
# skip items that aren't already in wikidata (DONT CREATE NEW ITEMS!)
records = [x for x in records if x['uniprot_id'] in uni_wd]
print("len records: {}".format(len(records)))
cb = ChromosomeBot()
chr_num_wdid = cb.get_or_create(organism_info, login=login)
bot = ProteinBot.ProteinBot(organism_info, chr_num_wdid, login)
bot.filter = lambda x: iter(x)
bot.run(records, total=total, fast_run=False, write=True)
if __name__ == "__main__":
#genes()
proteins()
|
[
"gstupp@scripps.edu"
] |
gstupp@scripps.edu
|
93b504520b8dbb07d500b33fddc1aabeec8f54c6
|
08ed089d8e2e17946390ce7184912ec9e266c904
|
/prog22.py
|
2a76409732ae7fb9b5dc4c222413e38e6cdcd3ac
|
[] |
no_license
|
SiddhiPevekar/Python-Programming-MHM
|
16b2676b7e096aa4916a0f82e215ed2e13f57674
|
973da504159f96a9c9bac3232e9c50080b441888
|
refs/heads/master
| 2022-11-30T03:16:03.111083 | 2020-08-18T20:24:44 | 2020-08-18T20:24:44 | 288,554,185 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 166 |
py
|
#program to print table of a given number
n=int(input("enter any number to print the table for:\n"))
i=1
while i<=10:
print(n,"*",i,"=",n*i)
i+=1
|
[
"noreply@github.com"
] |
SiddhiPevekar.noreply@github.com
|
ec2556c6ce8254a2f694afbb645a4a477ff4d8c5
|
fbc0d5c081dbfead483a1e1e226a380446bcd56e
|
/TDT4120 - Algoritmer og datastrukturer/exercises/7/solution.py
|
e63b310aa452513d2a21ed49018858926484d730
|
[] |
no_license
|
blauks/ntnu-2
|
58eec17a8f6db7424a6cb44d74e029b68256320a
|
38fa0ddfaa726408f087d1792fd0e00810f9243c
|
refs/heads/master
| 2020-08-16T11:01:10.946232 | 2019-08-08T11:28:48 | 2019-08-08T11:28:48 | 215,494,235 | 0 | 1 | null | 2019-10-16T08:15:48 | 2019-10-16T08:15:48 | null |
UTF-8
|
Python
| false | false | 2,344 |
py
|
#!/usr/bin/python3
from sys import stdin
def maxValue(widths, heights, values, paperWidth, paperHeight):
# Create array; width major
result = [None] * (paperWidth + 1)
for w in range(paperWidth + 1):
result[w] = [-1] * (paperHeight + 1)
# Find the minimal width or height
minSize = 10**9
for w in widths:
if w < minSize:
minSize = w
for h in heights:
if h < minSize:
minSize = h
# Zero out all entries with too small width or height
for a in range(minSize):
for w in range(paperWidth):
result[w][a] = 0
for h in range(paperHeight):
result[a][h] = 0
# Set the values we know (better values may be found, though)
for x in range(len(values)):
if widths[x] <= paperWidth and heights[x] <= paperHeight and result[widths[x]][heights[x]] < values[x]:
result[widths[x]][heights[x]] = values[x]
if heights[x] <= paperWidth and widths[x] <= paperHeight and result[heights[x]][widths[x]] < values[x]:
result[heights[x]][widths[x]] = values[x]
# Calculate the other entries
for w in range(paperWidth + 1):
for h in range(paperHeight + 1):
if result[w][h] == 0:
continue
if result[w][h] == -1:
best = 0
else:
best = result[w][h]
for cutWidth in range(1, w):
if best < result[cutWidth][h] + result[w - cutWidth][h]:
best = result[cutWidth][h] + result[w - cutWidth][h]
for cutHeight in range(1, h):
if best < result[w][cutHeight] + result[w][h - cutHeight]:
best = result[w][cutHeight] + result[w][h - cutHeight]
result[w][h] = best
return result[paperWidth][paperHeight]
widths = []
heights = []
values = []
for triple in stdin.readline().split():
dim_value = triple.split(':', 1)
dim = dim_value[0].split('x', 1)
width = int(dim[0][1:])
height = int(dim[1][:-1])
value = int(dim_value[1])
widths.append(int(width))
heights.append(int(height))
values.append(int(value))
for line in stdin:
paperWidth, paperHeight = line.split('x', 1)
print(maxValue(widths, heights, values, int(paperWidth), int(paperHeight)))
|
[
"nikasmh@hotmail.com"
] |
nikasmh@hotmail.com
|
2e1c0c7bce599f5fe1ed94db622874e338896168
|
d956b85ffe0b733a3ef4b26cd88e3ff204a9ccde
|
/jogos/sistema.py
|
8a236cda144741a25639bc89370a570fed83cd3d
|
[] |
no_license
|
joselsantospqt/Python
|
5e871014c003c06cb3f7d63f83491677e858ca8b
|
2473dce6ba3b8f4784bc655d36095dfe235bc261
|
refs/heads/main
| 2023-06-21T07:32:24.488452 | 2021-08-04T00:32:03 | 2021-08-04T00:32:03 | 303,821,152 | 0 | 0 | null | 2021-04-06T00:14:02 | 2020-10-13T20:28:02 |
Python
|
UTF-8
|
Python
| false | false | 290 |
py
|
usuario = input("Informe o usuário do sistema!")
if(usuario == "Flávio"):
print("Seja bem-vindo Flávio!")
elif(usuario == "Douglas"):
print("Seja bem-vindo Douglas!")
elif(usuario == "Ricardo"):
print("Seja bem-vindo Ricardo")
else:
print("Usuário não identificado!")
|
[
"jricardo@trustprev.com.br"
] |
jricardo@trustprev.com.br
|
215bda69bdf2236f417186baa73d8bd555c235e5
|
a6f5be55760ccc4c1dbdf5ddd9698087687cfb52
|
/main.py
|
73a939cc3c9ff34b233d6234c53e06b827ef1754
|
[] |
no_license
|
asoa/twitter_trend_summary
|
882446a8a051b49f4f38c991a7899c250df101de
|
02e6e70d3ca60240d5b1ca92dd5473c5f2fab54e
|
refs/heads/master
| 2020-04-21T14:03:50.167716 | 2019-02-08T18:05:18 | 2019-02-08T18:05:18 | 169,622,012 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,723 |
py
|
#!/usr/bin/env python3
import authenticate
import query
import statistics
TREND_LENGTH = 0
def prompt(top_trends):
"""Prompt user to enter integer selection for hashtag to get statistics for
Args:
top_trends: (list) sorted list on tweet volume
Returns: None
"""
print("The top trends sorted by count are: \n")
for n in range(0, TREND_LENGTH):
print("\t{}: {} {}".format(n + 1, top_trends[n][0], top_trends[n][1]))
def main():
global TREND_LENGTH
api = authenticate.Authenticate()
q = query.TwitterQuery(twitter_api=api.twitter_api, query_type='trends')
top_trends = sorted([(trend['name'], trend['tweet_volume']) for trend in q.query_result[0]['trends']
if trend['tweet_volume'] is not None],
key=lambda x: x[1], reverse=True)
TREND_LENGTH = len(top_trends)
hashtag = 0
while hashtag not in range(1, TREND_LENGTH): # loop while hashtag isn't in range
try:
prompt(top_trends)
hashtag = int(input("\nWhat hashtag do you want to get statistics for? "))
assert hashtag in range(1, TREND_LENGTH + 1) # if hashtag isn't in range, throw exception
tweets = query.TwitterQuery(api.twitter_api, query_type='search', q=top_trends[hashtag-1][0], out_file=True)
stats = statistics.Statistics(tweets.json_list)
stats.print_prettytable()
hashtag = 0 # resets hashtag back to 0 to continue to prompt user for hashtag selection
except:
print(" ***** Choose hashtag between 1 and {} ***** \n".format(TREND_LENGTH))
# prompt(top_trends)
if __name__ == "__main__":
main()
|
[
"abailey82@gmail.com"
] |
abailey82@gmail.com
|
c485360b827f31c8c734fea935c1397580298209
|
e1fc93bea161969e2e262d5cc4bcb02128dbdd39
|
/mymodule/Mypickle.py
|
39040ebd1d6601530574ea816816f276e1fb3176
|
[] |
no_license
|
Sojiro323/twitterAPI
|
079991b221de5cd963e7615e579982e5fbce68ad
|
0c83371c52adc945abf0571791ae40391cd580fa
|
refs/heads/master
| 2021-09-06T18:33:07.126042 | 2018-02-09T19:15:53 | 2018-02-09T19:15:53 | 106,917,864 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,603 |
py
|
import pickle
from inspect import currentframe
import os
import sys
def load(path, files):
if path[-1] != "/":path = path + "/"
#names = {id(v):k for k,v in currentframe().f_back.f_locals.items()}
#name = path + names.get(id(file)) + ".pickle"
if isinstance(files, str):
name = path + files + ".pickle"
print("\nload {0}.pickle".format(files))
if not os.path.exists(name):
print('not found directry or file:{0}'.format(name))
sys.exit()
with open(name, mode='rb') as f:
load_file = pickle.load(f)
return load_file
return_files = []
print("load pickles {0}".format(files))
for f in files:
name = path + f + ".pickle"
if not os.path.exists(name):
print('not found directry or file:{0}'.format(name))
sys.exit()
with open(name, mode='rb') as f:
load_file = pickle.load(f)
return_files.append(load_file)
return return_files
def save_name(path,*files):
print("\nsave pickle...")
if path[-1] != "/":path = path + "/"
names = {id(v):k for k,v in currentframe().f_back.f_locals.items()}
for f in files:
name = path + names.get(id(f)) + ".pickle"
with open(name, mode='wb') as p:
pickle.dump(f, p)
print('{0}:{1}'.format(name, len(f)))
def save(path, f, name):
print("\nsave pickle...")
if path[-1] != "/":path = path + "/"
name = path + name + ".pickle"
with open(name, mode='wb') as p:
pickle.dump(f, p)
print('{0}:{1}'.format(name, len(f)))
|
[
"m23622059@gmail.com"
] |
m23622059@gmail.com
|
321b950e4d0ce2addfd5f32ac86913881c735794
|
73aca8a8c9c0a197e99af31bd124681b1b68e2bf
|
/path-interals/opening-door/opening_door.py
|
3846f84ba484facd57bd8bb4869b30fa5731240d
|
[] |
no_license
|
i-abr/EnsembleMPPI
|
a4f7013fa990f997c6c0ce94647aa733bf78da86
|
b3fd5bccf720fd218cdb71880b6661306dbf7a14
|
refs/heads/master
| 2023-06-24T18:17:40.798344 | 2020-08-20T03:52:01 | 2020-08-20T03:52:01 | 274,959,810 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,081 |
py
|
#!/usr/bin/env python3
import numpy as np
from scipy.stats import multivariate_normal
from mujoco_py import load_model_from_path, MjSim, MjViewer, MjSimPool
import os
from mult_model_mppi import MultModelMPPIMujoco
from numpy.random import choice
import pickle
from mujoco_py.generated import const
frame_skip = 2
sim_model_path = 'assets/DAPG_door.xml'
model_path = 'assets/DAPG_door.xml'
model = load_model_from_path(model_path)
sim = MjSim(model)
viewer = MjViewer(sim)
### data idx that are necessary in this example
door_hinge_did = model.jnt_dofadr[model.joint_name2id('door_hinge')]
grasp_sid = model.site_name2id('S_grasp')
handle_sid = model.site_name2id('S_handle')
door_bid = model.body_name2id('frame')
#### TODO: TESTING CODE REMOVE LATER
sim.model.jnt_axis[door_hinge_did] = np.array([-1.,0., 0])
sim.model.jnt_pos[door_hinge_did][2] = 0.2
np.random.seed(451)
def update_actuator_gains(_sim):
_sim.model.actuator_gainprm[_sim.model.actuator_name2id('A_WRJ1'):_sim.model.actuator_name2id('A_WRJ0')+1,:] = np.array([10,0,0])
_sim.model.actuator_gainprm[_sim.model.actuator_name2id('A_FFJ3'):_sim.model.actuator_name2id('A_THJ0')+1,:] = np.array([1,0,0])
_sim.model.actuator_biasprm[_sim.model.actuator_name2id('A_WRJ1'):_sim.model.actuator_name2id('A_WRJ0')+1,:] = np.array([0,-10,0])
_sim.model.actuator_biasprm[_sim.model.actuator_name2id('A_FFJ3'):_sim.model.actuator_name2id('A_THJ0')+1,:] = np.array([0,-1,0])
def task(data, action):### this function assumes that the input data is a numpy array
touch_sensor = data.sensordata[:]
palm_pos = data.site_xpos[grasp_sid].ravel()
handle_pos = data.site_xpos[handle_sid].ravel()
door_pos = data.qpos[door_hinge_did]
loss = 0.0
#if door_pos < 1.57:
loss += 400.0*np.linalg.norm(palm_pos - handle_pos) + 0.001*np.linalg.norm(data.qvel[:])
return loss - 100.0 * door_pos
def terminal_cost(data):
touch_sensor = data.sensordata[:]
palm_pos = data.site_xpos[grasp_sid].ravel()
handle_pos = data.site_xpos[handle_sid].ravel()
door_pos = data.qpos[door_hinge_did]
loss = 0.0
# if door_pos < 1.4:
loss += 10.0 * np.linalg.norm(palm_pos - handle_pos) #- 1000* np.sum(touch_sensor)
return 0*np.linalg.norm(data.qvel[:])
def update_param(_model):
axis_choices = [np.array([-1., 0., 0.]), np.array([0., 0., 1.])]
idx = np.random.choice(len(axis_choices))
chosen_axis = axis_choices[idx]
_model.jnt_axis[door_hinge_did] = chosen_axis
if idx == 0:
_model.jnt_pos[door_hinge_did][:] = 0.
_model.jnt_pos[door_hinge_did][2] = np.random.uniform(0.1, 0.5) #* chosen_axis
else:
_model.jnt_pos[door_hinge_did][:] = 0.
_model.jnt_pos[door_hinge_did][0] = np.random.uniform(0.1, 0.5)
def update_distribution(sims, probs):
var_joint_pos = 0.0
for _sim, m_prob in zip(mppi.pool.sims, mppi.model_probs):
_diff = _sim.model.jnt_pos[door_hinge_did].ravel() - mean_joint_pos
var_joint_pos += m_prob * np.outer(_diff, _diff)
#print('Mean estimated Door hinge pos: {}, axis : {}, var : {}'.format(mean_joint_pos, mean_joint_axis, var_joint_pos))
for sim in sims:
sampled_sim = choice(sims, p=probs)
sim.model.jnt_pos[door_hinge_did][:] = sampled_sim.model.jnt_pos[door_hinge_did][:].copy()
jnt_ax = sampled_sim.model.jnt_axis[door_hinge_did][:].copy()
jnt_pos = sampled_sim.model.jnt_pos[door_hinge_did][:].copy()
if np.argmax(np.abs(jnt_ax)) == 0:
sim.model.jnt_pos[door_hinge_did][2] = np.random.normal(jnt_pos[2], 0.01) #* chosen_axis
else:
sim.model.jnt_pos[door_hinge_did][0] = np.random.normal(jnt_pos[0], 0.01) #* chosen_axis
sim.model.jnt_axis[door_hinge_did][:] = jnt_ax
no_trajectories = 40
sim_model_pool = []
for i in range(no_trajectories):
sim_model = load_model_from_path(sim_model_path)
update_param(sim_model) ### this updates the distribution of door hinges? that sounds dumb
sim_model_pool.append(sim_model)
mppi = MultModelMPPIMujoco(sim_model_pool, task, terminal_cost,
frame_skip=frame_skip,
horizon=30, no_trajectories=no_trajectories , noise=0.2, lam=.8)
print(mppi.num_states, mppi.num_actions)
input()
### update actuator
for m_sim in mppi.pool.sims:
update_actuator_gains(m_sim)
update_actuator_gains(sim)
### I need a filter
door_hinge_distr = []
hinge_poses = []
hinge_axis = []
hinge_probs = []
counter = 0
while True:
counter += 1
state = sim.get_state()
ctrl, pred_meas = mppi(state, predict_measurements=True)
sim.data.ctrl[:] = ctrl
for _ in range(frame_skip):
sim.step()
real_meas = sim.data.sensordata[:].copy()
real_meas += np.random.normal(0., 0.01, size=real_meas.shape)
### Use the measurements to update the probability of the models
logl = np.array([multivariate_normal.logpdf(real_meas-s_meas, 0., 0.01).sum()
for s_meas in pred_meas])
logl -= np.max(logl)
mppi.model_probs *= np.exp(logl)
mppi.model_probs += 1e-5
mppi.model_probs /= np.sum(mppi.model_probs)
norm_prob = np.linalg.norm(mppi.model_probs)
mean_joint_pos = 0.0
mean_joint_axis = 0.0
_hinge_poses = []
_hinge_axis = []
_hinge_probs = []
for _sim, m_prob in zip(mppi.pool.sims, mppi.model_probs):
mean_joint_pos += _sim.model.jnt_pos[door_hinge_did] * m_prob
mean_joint_axis += _sim.model.jnt_axis[door_hinge_did] * m_prob
_hinge_poses.append(_sim.model.jnt_pos[door_hinge_did].ravel().copy())
_hinge_axis.append(_sim.model.jnt_axis[door_hinge_did].ravel().copy())
_hinge_probs.append(m_prob.copy())
if abs(_hinge_axis[-1])[0] > 0:
rot = np.array([
[0., 0., -1],
[0., 1., 0.],
[1., 0., 0.]
]).flatten()
else:
rot = np.eye(3).flatten()
viewer.add_marker(pos=_sim.data.xanchor[door_hinge_did].flatten() + np.array([0., -0.1, 0.]),
size=np.array([0.01,0.01,0.4]), type=const.GEOM_ARROW, label='',
rgba=np.array([1.,1.,1.,m_prob/norm_prob]),
mat=rot)
#hinge_poses.append(_hinge_poses)
#hinge_axis.append(_hinge_axis)
#hinge_probs.append(_hinge_probs)
viewer.render()
if 1/np.sum(np.square(mppi.model_probs)) < no_trajectories/2:
print('RESAMPLING POOL')
update_distribution(mppi.pool.sims, mppi.model_probs)
mppi.model_probs = np.ones(mppi.model_probs.shape)
mppi.model_probs /= np.sum(mppi.model_probs)
#if counter % 200 == 0:
# file_pi = open('door-hinge-data.pickle', 'wb')
# pickle.dump({ 'hinge_poses': hinge_poses,
# 'hinge_axis' : hinge_axis,
# 'hinge_probs' : hinge_probs
# }, file_pi)
if sim.data.qpos[door_hinge_did] >= 1.2:#1.2:
sim.reset()
mppi.reset()
if os.getenv('TESTING') is not None:
break
|
[
"iabr4073@gmail.com"
] |
iabr4073@gmail.com
|
afa76986caca9ddf448586f9ea23e5d755f39919
|
b3a282c34768b325bf22e40c6eb0affefbcf4071
|
/config.py
|
1fd3e929b5acf685987c8382a92ca369ef9ac8e5
|
[] |
no_license
|
geoleonsh/DevopsPlatform
|
b6d3b60cb6e252c6c29e2596b1a5498ee5e81982
|
8efafc73e4943ce3829bcea66030a148cd293173
|
refs/heads/master
| 2020-03-30T07:12:57.023360 | 2018-11-07T11:30:22 | 2018-11-07T11:30:22 | 150,923,384 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 117 |
py
|
class MainConfig(object):
CSRF_ENABLED = True
# 前端表单key任意设置
SECRET_KEY = 'fhsfuiahsdfv8'
|
[
"liusaizhong@58yicun.com"
] |
liusaizhong@58yicun.com
|
fd527e343f2d02b25498d71c07d67c135ee94b18
|
613ef547c2c61611904019e36a3c8647cd371d95
|
/__init__.py
|
ef5eb1c7aa162857f50713ce590146564f6a1b84
|
[] |
no_license
|
brendanarnold/py-wien2k
|
75a7d1b51ce3c157b77492a287e9054273633558
|
51b441ac26dc5434b5d71f870b51c8eebf6178b9
|
refs/heads/master
| 2022-12-26T10:39:31.900738 | 2011-03-03T15:59:01 | 2011-03-03T15:59:01 | 301,226,846 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 597 |
py
|
__all__ = ['EnergyReader', 'Scf2Reader', 'StructReader', 'OutputkgenReader', 'KlistReader', 'KlistWriter', 'Output2Reader', 'Band', 'Kpoint', 'Kmesh', 'SymMat']
from readers.EnergyReader import EnergyReader
from readers.Scf2Reader import Scf2Reader
from readers.StructReader import StructReader
from readers.OutputkgenReader import OutputkgenReader
from readers.KlistReader import KlistReader
from readers.Output2Reader import Output2Reader
from writers.KlistWriter import KlistWriter
from Band import Band
from Kpoint import Kpoint
from Kmesh import Kmesh
from SymMat import SymMat
|
[
"none@none"
] |
none@none
|
a80e6f6f4b88c83d6c9c2ce635b2d77d0cb3381c
|
fb41cb1bd46144afbcb750ff3798e4ab5d7b59b8
|
/eas/boundary.py
|
b5a224f02a3c582ac19123f70039ea9fc457c12f
|
[] |
no_license
|
YanChunyanLA/OptimizationAlgorithm
|
42d8456a9b306c7741d1854aafd784dcb736986f
|
27f2eec73dd7287fe941c42def30a10ff95881fe
|
refs/heads/master
| 2021-05-22T23:10:31.945384 | 2020-03-31T10:01:29 | 2020-03-31T10:01:29 | 253,136,126 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,548 |
py
|
import random
def _boundary_strategy(vector, us, ls):
for i, _ in enumerate(vector):
if vector[i] > us[i]:
vector[i] = us[i]
if vector[i] < ls[i]:
vector[i] = ls[i]
return vector
def _c_boundary_strategy(c, u, l):
if c > u:
return u
if c < l:
return l
return c
def _middle_strategy(vector, us, ls):
for i, _ in enumerate(vector):
if vector[i] > us[i] or vector[i] < ls[i]:
vector[i] = (us[i] + ls[i]) / 2.0
return vector
def _c_middle_strategy(c, u, l):
if c > u or c < l:
c = (u + l) / 2.0
return c
def _random_strategy(vector, us, ls):
for i, _ in enumerate(vector):
if vector[i] > us[i] or vector[i] < ls[i]:
vector[i] = ls[i] + random.random() * (us[i] - ls[i])
return vector
def _c_random_strategy(c, u, l):
if c > u or c < l:
c = l + random.random() * (u - l)
return c
class Boundary(object):
BOUNDARY = 'use boundary'
MIDDLE = 'use middle'
RANDOM = 'use random'
@staticmethod
def make_strategy(s):
if s == Boundary.BOUNDARY:
return _boundary_strategy
elif s == Boundary.MIDDLE:
return _middle_strategy
else:
return _random_strategy
@staticmethod
def make_c_strategy(s):
if s == Boundary.BOUNDARY:
return _c_boundary_strategy
elif s == Boundary.MIDDLE:
return _c_middle_strategy
else:
return _c_random_strategy
|
[
"a2htray.yuen@gmail.com"
] |
a2htray.yuen@gmail.com
|
b6128f23171b76118df0786aed60172fd230694a
|
6f0b42dfb44cdb0f83bfd756404b7299d1e2aced
|
/Pricing/data-preprocessing/sk-learn.py
|
8d5e8b8ab294d029fa2b97c8026bd6c2618288e6
|
[] |
no_license
|
playing-kaggle/House-Pricing
|
7e81300910a097520a6b013569fc822a277f3ce5
|
b4c30aa3e04d108fbfa613c10a389cf41aebfe02
|
refs/heads/master
| 2020-06-15T21:15:12.353971 | 2016-12-27T08:39:25 | 2016-12-27T08:39:25 | 75,264,664 | 3 | 1 | null | 2016-12-18T15:20:40 | 2016-12-01T06:56:18 |
Python
|
UTF-8
|
Python
| false | false | 1,621 |
py
|
import pandas as pd
from sklearn import linear_model
import numpy as np
import matplotlib.pyplot as plt
train_data = pd.read_csv('../../train.csv', index_col='Id')
# print(unique_value)
'''
vectorize using some column MSZoning as an example
'''
column_list = ['MSSubClass', 'MSZoning', 'LotShape', 'LandContour']
new_column_list = []
for column in column_list:
unique_value = train_data[column].unique()
for value in unique_value:
new_column_name = str(column) + '_' + str(value)
train_data.loc[train_data[column] == value, new_column_name] = 1
train_data.loc[train_data[column] != value, new_column_name] = 0
new_column_list.append(new_column_name)
train_data.drop(column,inplace=True)
#print(train_data.columns)
#print(train_data[new_column_list])
new_column_list.extend(['BsmtFinSF1','SalePrice'])
train_data = train_data[new_column_list]
data_X = train_data.drop('SalePrice', axis=1)
data_Y = train_data['SalePrice']
#print(data_X['BsmtFinSF1'])
#data_X = np.asarray(data_X)
#data_Y = np.asarray(data_Y)
#print(data_X);print(data_Y)
regr = linear_model.LinearRegression()
regr.fit(data_X, data_Y)
print('Coefficients: \n', regr.coef_)
print(regr.predict(data_X))
print("Mean squared error: %.2f"
% np.mean((regr.predict(data_X) - data_Y) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(data_X, data_Y))
#print(data)
#plt.plot(data_Y, regr.predict(data_X), color='blue',
# linewidth=3)
plt.scatter(data_Y,regr.predict(data_X))
plt.xticks(())
plt.yticks(())
plt.show()
# print(train_data.columns)
|
[
"shao Liu"
] |
shao Liu
|
4fd1987687c69928873f02007ae48e452ae6e846
|
a6326ad61856ba7b9610bc1ad5dcd54aa6351eeb
|
/multi_user_blog_final_original_copy/template.py
|
768001582053073f9788df87123b60fe032d1f3f
|
[] |
no_license
|
jp853/udacity-multi-user-blog-project
|
8f14eca838bca63eb9c030216ac4c24de60c8f6f
|
4821224d4b7582df2def3fe103a44427015e75ae
|
refs/heads/master
| 2021-01-17T20:57:02.333648 | 2017-04-05T17:02:28 | 2017-04-05T17:02:28 | 84,153,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,859 |
py
|
import hashlib
import hmac
import os
import random
import re
from string import letters
from google.appengine.ext import db
import jinja2
# Jinja configuration
# template loading code
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
# Parents
def users_key(group='default'):
return db.Key.from_path('users', group)
def blog_key(name='default'):
return db.Key.from_path('blogs', name)
secret = 'udacity'
# validate username, password, email
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
# make a key to save in a cookie with salt
def make_secure_key(key):
return '%s|%s' % (key, hmac.new(secret, key).hexdigest())
# checks value of first and second key
# if not, return none
def check_secure_key(secure_key):
key = secure_key.split('|')[0]
if secure_key == make_secure_key(key):
return key
# make a salt with 5 random letters
def make_salt(size=5):
return ''.join(random.choice(letters) for x in xrange(size))
# make a hashed password
def make_pw_hash(username, pw, salt=None):
if not salt:
salt = make_salt()
h = hashlib.sha256(username + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
# check hashed password
def valid_pw(username, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(username, password, salt)
|
[
"noreply@github.com"
] |
jp853.noreply@github.com
|
28cf08b36b2ab86735c6b5367a92b76f64f18ec8
|
18d8d28b113e6252753b359332c94d282abe1e4e
|
/xiangshui/xiangshui/middlewares.py
|
bb7cd2a2cfda680dd48212fab59e6b91668de497
|
[
"MIT"
] |
permissive
|
ayueaa/Some-Spiders
|
cf916c9af8ed690f67a7b9df87b0c6755f1f590b
|
4cf085e55eab822c08d06b62099d1c235d1840ae
|
refs/heads/master
| 2023-01-06T05:27:03.308055 | 2019-05-13T12:34:48 | 2019-05-13T12:34:48 | 186,411,205 | 7 | 4 |
MIT
| 2022-12-30T09:30:34 | 2019-05-13T12:01:18 |
Julia
|
UTF-8
|
Python
| false | false | 3,603 |
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class XiangshuiSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class XiangshuiDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"woyue02@gmail.com"
] |
woyue02@gmail.com
|
69971bcc3664619ff5722d80b52691c48e509479
|
efb2c5b16e5725a02858c6e756698a48ea4e6bd3
|
/main/migrations/0005_auto_20210428_1731.py
|
b808d785f49d38a360b95e0edb0aefba769c74dd
|
[] |
no_license
|
nart-veb/abhrealty
|
3ef1bdf1712c7980f43057884f55970a4627644e
|
088514f487abedb285e9853cfa019bb3f1e4c829
|
refs/heads/master
| 2023-04-28T06:04:55.569629 | 2021-05-14T09:53:28 | 2021-05-14T09:53:28 | 366,337,909 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
# Generated by Django 3.1.4 on 2021-04-28 17:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_reviews'),
]
operations = [
migrations.AlterField(
model_name='reviews',
name='description',
field=models.TextField(blank=True, max_length=1000, verbose_name='Описание'),
),
]
|
[
"55654935+nart-veb@users.noreply.github.com"
] |
55654935+nart-veb@users.noreply.github.com
|
20b774b3b0cf82f552605cbfa94025bc851aa1ca
|
9bd913a239222b39a5fe61a29bbf94cde3e5972e
|
/Python网络爬虫从入门到实践/PythonScraping-master/Cha 11 -服务器采集/tor3.py
|
45a38c9a5e116e6d5bf71ba772443bfe169e0be7
|
[] |
no_license
|
MarryYou/python-
|
923ab1f53261c862a0b62d0d4cc92fcb5b39127f
|
0c7bd2e2f84bced4840475e12590b948929963bb
|
refs/heads/master
| 2020-03-22T02:34:34.972298 | 2018-07-02T03:51:22 | 2018-07-02T03:51:22 | 139,379,382 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,046 |
py
|
from stem import Signal
from stem.control import Controller
import socket
import socks
import requests
import time
#controller = Controller.from_port(port = 9151)
#controller.authenticate()
#socks.set_default_proxy(socks.SOCKS5, "127.0.0.1", 9150)
#socket.socket = socks.socksocket
total_scrappy_time = 0
total_changeIP_time = 0
for x in range(0,10):
#a = requests.get("http://checkip.amazonaws.com").text
#print ("第", x+1, "次IP:", a)
time1 = time.time()
a = requests.get("http://www.santostang.com/").text
#print (a)
time2 = time.time()
total_scrappy_time = total_scrappy_time + time2-time1
print ("第", x+1, "次抓取花费时间:", time2-time1)
time3 = time.time()
#controller.signal(Signal.NEWNYM)
time.sleep(5)
time4 = time.time()
total_changeIP_time = total_changeIP_time + time4-time3-5
print ("第", x+1, "次更换IP花费时间:", time4-time3-5)
print ("平均抓取花费时间:", total_scrappy_time/10)
print ("平均更换IP花费时间:", total_changeIP_time/10)
|
[
"61434508@qq.com"
] |
61434508@qq.com
|
9ec25b5cabc09a7928d973a2efe01f81f343ec69
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/edgelm/tests/test_constraints.py
|
d14e0980b697d887887217338827bdc3c1bc7357
|
[
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 |
MIT
| 2023-08-19T11:33:20 | 2019-07-23T04:15:28 |
Python
|
UTF-8
|
Python
| false | false | 10,757 |
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import unittest
import torch
from fairseq.token_generation_constraints import *
def tensorize(constraints: List[List[int]]) -> torch.Tensor:
return [torch.tensor(x) for x in constraints]
class TestHelperRoutines(unittest.TestCase):
def setUp(self):
self.examples = [
([[]], torch.tensor([[0]])),
([[], []], torch.tensor([[0], [0]])),
([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])),
(
[
[
torch.tensor([3, 1, 2]),
torch.tensor([3]),
torch.tensor([4, 5, 6, 7]),
],
[],
[torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])],
],
torch.tensor(
[
[3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0],
]
),
),
]
def test_packing(self):
"""Ensures the list of lists of tensors gets packed correctly."""
for batch_constraints, expected_tensor in self.examples:
packed = pack_constraints(batch_constraints)
assert torch.equal(packed, expected_tensor)
class TestUnorderedConstraintState(unittest.TestCase):
def setUp(self):
# Tuples of (contraint set, expected printed graph, token counts per node)
self.examples = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
"([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))",
{1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1},
),
([], "[None].False#0", {}),
(tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}),
(
tensorize([[100000, 1, 2, 3, 4, 5]]),
"([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))",
{100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
),
(
tensorize([[1, 2], [1, 2]]),
"([None].False#2 ([1].False#2 [2].True#2))",
{1: 2, 2: 2},
),
(
tensorize([[1, 2], [3, 4]]),
"([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))",
{1: 1, 2: 1, 3: 1, 4: 1},
),
]
self.sequences = [
(
self.examples[0][0],
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 94],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4, 999],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[4, 5, 6, 8],
{"bank": 2, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
# Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5]
# [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]],
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": True},
),
(
tensorize([[1], [2, 3]]),
# Should not be able to get credit for entering 1 a second time
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[4][0],
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
self.examples[4][0],
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
(
self.examples[5][0],
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
]
def test_graphs(self):
"""
Test whether unordered graph systems are created correctly.
"""
for example in self.examples:
constraints, expected, gold_counts = example
c = ConstraintNode.create(constraints)
assert (
ConstraintNode.print_graph(c) == expected
), f"got {ConstraintNode.print_graph(c)}, expected {expected}"
assert (
c.token_counts() == gold_counts
), f"{c} got {c.token_counts()} wanted {gold_counts}"
def test_next_tokens(self):
"""
Tests that the set of next tokens is correct.
"""
for example in self.examples:
constraints, expected, gold_counts = example
root = ConstraintNode.create(constraints)
root_tokens = set(root.children.keys())
for sequence in constraints:
state = UnorderedConstraintState(root)
for token in sequence:
all_tokens = root_tokens.union(state.node.children.keys())
assert (
all_tokens == state.next_tokens()
), f"ALL {all_tokens} NEXT {state.next_tokens()}"
state = state.advance(token)
def test_sequences(self):
for constraints, tokens, expected in self.sequences:
state = UnorderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
class TestOrderedConstraintState(unittest.TestCase):
def setUp(self):
self.sequences = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 94],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 3, 999, 1, 4],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 999, 999],
{"bank": 3, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 77, 1, 3, 1],
{"bank": 6, "num_completed": 2, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1], [2, 3]]),
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [3, 4]]),
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
]
def test_sequences(self):
for i, (constraints, tokens, expected) in enumerate(self.sequences):
state = OrderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
if __name__ == "__main__":
unittest.main()
|
[
"tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net"
] |
tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net
|
191bcd0c40adf152e4120b55d3dc1ac26b1b414e
|
5c185fb955a3e9de3ce0da6de8e7596abd7ffaee
|
/proyecto.py
|
2897854ee41cf4a16fe0d63c630084dd0d58b759
|
[] |
no_license
|
Steven0414/Codigo-proyecto-webservice
|
a407ffe0fd9ac0babfcdf24f98fe234b551d6a22
|
945bd0c2a72f7114c62832142e96a84b7003691e
|
refs/heads/master
| 2021-01-20T18:29:00.890359 | 2016-07-23T02:53:54 | 2016-07-23T02:53:54 | 63,995,339 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,613 |
py
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# Librerias requeridas para correr aplicaciones basadas en Flask
from flask import Flask, jsonify, make_response, request
import subprocess
app = Flask(__name__)
# Web service que se invoca al momento de ejecutar el comando
# curl http://localhost:5000
@app.route('/',methods = ['GET'])
def index():
return "Hola Univalle"
# Este metodo retorna la lista de sistemas operativos soportados por VirtualBox
# Los tipos de sistemas operativos soportados deben ser mostrados al ejecutar
# el comando
# curl http://localhost:5000/vms/ostypes
# Este es el codigo del item 1
@app.route('/vms/ostypes',methods = ['GET'])
def ostypes():
output = subprocess.check_output(['VBoxManage','list','ostypes'])
return output
# Este metodo retorna la lista de maquinas asociadas con un usuario al ejecutar
# el comando
# curl http://localhost:5000/vms
# Este es el codigo del item 2a
@app.route('/vms',methods = ['GET'])
def listvms():
output = subprocess.check_output(['VBoxManage','list','vms'])
return output
# Este metodo retorna aquellas maquinas que se encuentran en ejecucion al
# ejecutar el comando
# curl http://localhost:5000/vms/running
# Este es el codigo del item 2b
@app.route('/vms/running',methods = ['GET'])
def runninglistvms():
output = subprocess.check_output(['VBoxManage','list','runningvms'])
return output
# Este metodo retorna las caracteristica de una maquina virtual cuyo nombre es
# vmname 3.
@app.route('/vms/info/<vmname>', methods = ['GET'])
def vminfo(vmname):
info = subprocess.Popen(['VBoxManage','showvminfo',vmname],stdout = subprocess.PIPE)
grep = subprocess.check_output(['grep','-e','NIC','-e','Number','-e','Memory'],stdin = info.stdout)
return grep
@app.errorhandler(404)
def not_found(error):
return make_response("No se encontro ninguna maquina con el nombre indicado\n")
# Usted deberá realizar además los items 4 y 5 del enunciado del proyecto
# considerando que:
# - El item 4 deberá usar el método POST del protocolo HTTP
@app.route('/vms/create', methods=['POST'])
def createvms():
nombre = request.form['Nombre']
nucleos = request.form['Nucleos']
ram = request.form['Ram']
mensaje = subprocess.check_output(['./scriptCrear',nombre,nucleos,ram])
return mensaje
# - El item 5 deberá usar el método DELETE del protocolo HTTP
@app.route('/vms/delete', methods=['DELETE'])
def deletevms():
nombre = request.form['Nombre']
mensaje = subprocess.check_output(['VBoxManage','unregistervm',nombre,'--delete'])
return mensaje
if __name__ == '__main__':
app.run(debug = True, host='0.0.0.0')
|
[
"erick.garcia@correounivalle.edu.co"
] |
erick.garcia@correounivalle.edu.co
|
b2fba4c7ec71c02907f48cd6640fe79838e48fc2
|
b162d299e38aca44f01ad667e18a9d3962effff1
|
/wifimap/spots/tests/test_detail.py
|
3f0fba64587092ed8393d43aed83626846a9aa31
|
[] |
no_license
|
andrewsmedina/saojoaomap
|
87958e95410f0eea2659c6d2c28018893e92ed24
|
12b35650538a94b9bca201db1f66a59e8a6e78ee
|
refs/heads/master
| 2021-01-17T07:40:49.645775 | 2014-05-29T05:41:42 | 2014-05-29T05:41:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 976 |
py
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from spots.models import AccessPoint
class DetailViewTest(TestCase):
def setUp(self):
self.access_point = AccessPoint.objects.create(name='point 1', description='Point 1 description', address='Rio de Janeiro, Brazil', lat=-22.9963233069, lng=-43.3637237549)
self.url = reverse('spot_detail', args=[self.access_point.id])
self.response = self.client.get(self.url)
def tearDown(self):
AccessPoint.objects.all().delete()
def test_view_exists(self):
assert 200 == self.response.status_code
def test_name_in_spot_detail(self):
assert 'point 1' in self.response.content
def test_address_in_spot_detail(self):
assert 'Rio de Janeiro, Brazil' in self.response.content
def test_description_in_spot_detail(self):
assert 'Point 1 description' in self.response.content
|
[
"andrewsmedina@gmail.com"
] |
andrewsmedina@gmail.com
|
64fe7fb5bcacbb566dc2ac641311ea5929a50dd6
|
e3f94fd8a7e62b98c739c4ebaffc3c8a32b3c041
|
/sklearn/decsionTree.py
|
56ade8f72708018ebafaf11b552b94c65c73f28a
|
[] |
no_license
|
TheOneAC/ML
|
94f8fcc1b8fd3b8a032e7af288c11689c395f72f
|
d0d02deb0dd656a8074ac4ef26f53904e8088ead
|
refs/heads/master
| 2020-05-21T21:38:06.026611 | 2017-09-16T17:05:49 | 2017-09-16T17:05:49 | 84,650,560 | 0 | 0 | null | 2017-09-16T17:02:24 | 2017-03-11T13:07:11 |
Python
|
UTF-8
|
Python
| false | false | 3,667 |
py
|
import math
from collections import Counter, defaultdict
def entropy(class_probabilities):
return sum( -p * math.log(p,2)
for p in class_probabilities
if p)
def class_probabilities(labels):
total_count = len(labels)
return [ count / total_count
for count in Counter(labels).values()]
def data_entropy(labeled_data):
labels = [label
for label in labeled_data]
probabilities = class_probabilities(labels)
return entropy(probabilities)
def partition_entropy(subsets):
total_count = sum(len(subset) for subset in subsets)
return sum(data_entropy(subset) * len(subset) / total_count
for subset in subsets)
inputs = [
({'level':'Senior', 'lang':'Java', 'tweets':'no', 'phd':'no'},
False),
({'level':'Senior', 'lang':'Java', 'tweets':'no', 'phd':'yes'},
False),
({'level':'Mid', 'lang':'Python', 'tweets':'no', 'phd':'no'},
True),
({'level':'Junior', 'lang':'Python', 'tweets':'no', 'phd':'no'},
True),
({'level':'Junior', 'lang':'R', 'tweets':'yes', 'phd':'no'},
True),
({'level':'Junior', 'lang':'R', 'tweets':'yes', 'phd':'yes'},
False),
({'level':'Mid', 'lang':'R', 'tweets':'yes', 'phd':'yes'},
True),
({'level':'Senior', 'lang':'Python', 'tweets':'no', 'phd':'no'}, False),
({'level':'Senior', 'lang':'R', 'tweets':'yes', 'phd':'no'},
True),
({'level':'Junior', 'lang':'Python', 'tweets':'yes', 'phd':'no'}, True),
({'level':'Senior', 'lang':'Python', 'tweets':'yes', 'phd':'yes'}, True),
({'level':'Mid', 'lang':'Python', 'tweets':'no', 'phd':'yes'},
True),
({'level':'Mid', 'lang':'Java', 'tweets':'yes', 'phd':'no'},
True),
({'level':'Junior', 'lang':'Python', 'tweets':'no', 'phd':'yes'}, False)
]
def partition_by(inputs, attribute):
groups = defaultdict(list)
for input in inputs:
key = input[0][attribute]
groups[key].append(input)
return groups
def partition_entropy_by(inputs, atrribute):
partitions = partition_by(inputs, atrribute)
return partition_entropy(partitions)
for key in ['level','lang','tweets','phd']:
print key, partition_entropy_by(inputs, key)
senior_inputs = [(input, label)
for input, label in inputs if input["level"] == "Senior"]
for key in ['lang', 'tweets', 'phd']:
print key, partition_entropy_by(senior_inputs, key)
def classify(tree, input):
if tree in [True, False]:
return tree
attribute, subtree_dict = tree
subtree_key = input.get(attribute)
if subtree_key not in subtree_dict:
subtree_key = None
subtree = subtree_dict[subtree_key]
return classify(subtree, input)
def build_tree_id3(inputs, split_candidates = None):
if split_candidates is None:
split_candidates = inputs[0][0].keys()
num_inputs = len(inputs)
num_trues = len([label for item, label in inputs if label])
num_falses = num_inputs - num_trues
if num_trues == 0: return False
if num_falses == 0: return True
if not split_candidates:
return num_trues >= num_falses # if no split candidates left
best_attribute = min(split_candidates,
key=partial(partition_entropy_by, inputs))
partitions = partition_by(inputs, best_attribute)
new_candidates = [a for a in split_candidates
if a != best_attribute]
subtrees = {attribute_value: build_tree_id3(subset, new_candidates)
for attribute_value, subset in partitions.iteritems()}
subtrees[None] = num_trues > num_falses
return (best_attribute, subtrees)
tree = build_tree_id3(inputs)
classify(tree, { "level" : "Junior",
"lang" : "Java",
"tweets" : "yes",
"phd" : "no"} )
|
[
"scuhss@gmail.com"
] |
scuhss@gmail.com
|
c8c83c640bd7b2590a7214c8184fb4fabe5cac4d
|
dd4d027d98a2f8d5f3564dc09c0dfd04b356cbdd
|
/01.py
|
a3fccbb5a9d017348da61c8a0087d7c7a7b385fa
|
[] |
no_license
|
Seabra14/pythonProject
|
0f3478dc40b534fdb68bbba4325ab7f58274b36b
|
5a540ee509b0d564e9e3eb6dc3e8f8b8a2ac4c50
|
refs/heads/main
| 2023-08-10T08:56:00.169476 | 2021-09-13T14:30:58 | 2021-09-13T14:30:58 | 395,688,398 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 25 |
py
|
print("Olá, mundo!")
|
[
"88586353+Seabra14@users.noreply.github.com"
] |
88586353+Seabra14@users.noreply.github.com
|
ea451e351ef36fd009b9d26eb7ccc6a442d8a995
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_aws_iam/tests/test_fn_aws_iam_list_ssh_keys.py
|
bf56370026bc46ca10f9197f62fc42c7bae8403b
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 |
MIT
| 2023-03-29T20:40:31 | 2017-08-25T14:07:33 |
Python
|
UTF-8
|
Python
| false | false | 3,190 |
py
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Tests using pytest_resilient_circuits"""
import pytest
from mock import patch
from resilient_circuits.util import get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from .mock_artifacts import mocked_aws_iam_client, get_mock_config, get_func_responses
PACKAGE_NAME = "fn_aws_iam"
FUNCTION_NAME = "fn_aws_iam_list_ssh_keys"
# Read the mock configuration-data section from the package
config_data = get_mock_config()
def assert_keys_in(json_obj, *keys):
for key in keys:
assert key in json_obj
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_aws_iam_list_ssh_keys_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("fn_aws_iam_list_ssh_keys", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1].args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("fn_aws_iam_list_ssh_keys_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnAwsIamListSshKeys:
""" Tests for the fn_aws_iam_list_ssh_keys function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@patch("fn_aws_iam.components.fn_aws_iam_list_ssh_keys.AwsIamClient", side_effect=mocked_aws_iam_client)
@pytest.mark.parametrize("aws_iam_user_name, expected_results", [
("iam_test_user", [{'SSHPublicKeyId': 'APKA4EQBBG2YCGOGDY5G',
'Status': 'Active',
'UploadDate': '2020-02-25 11:05:17',
'UserName': 'iam_test_user'}
]),
("iam_test_user_not_exists", {'Status': 'NoSuchEntity'}),
])
def test_success(self, mock_get, circuits_app, aws_iam_user_name, expected_results):
""" Test calling with sample values for the parameters """
keys = ["content", "inputs", "metrics", "raw", "reason", "success", "version"]
function_params = {
"aws_iam_user_name": aws_iam_user_name
}
results = call_fn_aws_iam_list_ssh_keys_function(circuits_app, function_params)
assert_keys_in(results, *keys)
content = results["content"]
assert expected_results == content
|
[
"travis@example.org"
] |
travis@example.org
|
7c4652d580fe4ac2284cebb9b10d6671517da94a
|
2347a00aa41c023924de6bc4ffe0e8bc244a0f3f
|
/mariners_profile/migrations/0092_auto_20151007_0238.py
|
e90aecfbfc240f507a0698adad9d45fdf24e9f36
|
[] |
no_license
|
Dean-Christian-Armada/prod-people
|
2ac20d16aecb0cf1ae50a08e456060eee270b518
|
fb8d99394d78bbf4d1831223fce2d7ac4a04f34d
|
refs/heads/master
| 2021-01-01T16:19:36.904967 | 2016-01-26T09:20:36 | 2016-01-26T09:20:36 | 42,503,579 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mariners_profile', '0091_auto_20151007_0202'),
]
operations = [
migrations.RemoveField(
model_name='coc',
name='coc_number',
),
migrations.RemoveField(
model_name='license',
name='license_number',
),
]
|
[
"deanarmada@gmail.com"
] |
deanarmada@gmail.com
|
1bcfe78216a83e71d2f26e4c99a53448c285f3b1
|
f49a278fd34f125b596074786b8e9c52f8540ec8
|
/Persistant_Hierarchichal_FileStorage/NoSQL_FS.py.py
|
c3dfd6817f3714b41e6ca948354590952031360f
|
[] |
no_license
|
JaivardhanM/PrinciplesOfCompSysDesign
|
371877c26a9854f72d3b009b07f5afcb179f4e1b
|
c3bb7177db08dd0f7d3361e81ab4cd8403498f88
|
refs/heads/master
| 2021-01-10T16:30:52.972190 | 2016-02-18T16:22:49 | 2016-02-18T16:22:49 | 52,019,046 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,015 |
py
|
#!/usr/bin/env python
import logging
from collections import defaultdict
from errno import ENOENT
from stat import S_IFDIR, S_IFLNK, S_IFREG
from sys import argv, exit
from time import time
from time import time
import datetime
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
from xmlrpclib import Binary
import sys, pickle, xmlrpclib
import pymongo
from pymongo import MongoClient
#client = MongoClient('mongodb://localhost:27017/')
#fs_db = client.filesys_database
#fnodes = fs_db.filenodes.remove()
#fnodes = fs_db.filenodes
fnodes = MongoClient('mongodb://localhost:27017/').filesys_database.filenodes.remove()
count = 0
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
if not hasattr(__builtins__, 'bytes'):
bytes = str
class FileNode:
def __init__(self,name,isFile,path,url):
self.name = name
self.path = path
self.url = url # 'mongodb://localhost:27017/'
self.isFile = isFile # true if node is a file, false if is a directory.
self.put("data","") # used if it is a file
self.put("meta",{})
self.put("list_nodes",{})# contains a tuple of <name:FileNode> used only if it is a dir.
def put(self,key,value):
client = MongoClient(self.url)
fs_db = client.filesys_database
fnodes = fs_db.filenodes
#dict_temp = {str(self.path) : key , str(key): pickle.dumps(value)}
Node_id = fnodes.update({str(self.path) : key},{'$set': {str(key): pickle.dumps(value)}},upsert = True)
print Node_id , "dbug node id"
def get(self,key):
client = MongoClient(self.url)
fs_db = client.filesys_database
fnodes = fs_db.filenodes
res = fnodes.find_one({str(self.path):key})
#print "get key", {str(self.path):key}
#print "PRINTING RES", res
if key in res.keys():
#print "rv: = 1", pickle.loads(res[key])
return pickle.loads(res[key])
else:
return None
def set_data(self,data_blob):
self.put("data",data_blob)
def set_meta(self,meta):
self.put("meta",meta)
def get_data(self):
return self.get("data")
def get_meta(self):
return self.get("meta")
def list_nodes(self):
return self.get("list_nodes").values()
def add_node(self,newnode):
list_nodes = self.get("list_nodes")
list_nodes[newnode.name]=newnode
self.put("list_nodes",list_nodes)
def contains_node(self,name): # returns node object if it exists
if (self.isFile==True):
return None
else:
if name in self.get("list_nodes").keys():
return self.get("list_nodes")[name]
else:
return None
class FS:
def __init__(self,url):
self.url = url
self.root = FileNode('/',False,'/',url)
now = time()
self.fd = 0
self.root.set_meta(dict(st_mode=(S_IFDIR | 0755), st_ctime=now,st_mtime=now,\
st_atime=now, st_nlink=2))
# returns the desired FileNode object
def get_node_wrapper(self,path): # pathname of the file being probed.
# Handle special case for root node
if path == '/':
return self.root
PATH = path.split('/') # break pathname into a list of components
name = PATH[-1]
PATH[0]='/' # splitting of a '/' leading string yields "" in first slot.
return self.get_node(self.root,PATH,name)
def get_node(self,parent,PATH,name):
next_node = parent.contains_node(PATH[1])
if (next_node == None or next_node.name == name):
return next_node
else:
return self.get_node(next_node,PATH[1:],name)
def get_parent_node(self,path):
parent_path = "/"+("/".join(path.split('/')[1:-1]))
parent_node = self.get_node_wrapper(parent_path)
return parent_node
def add_node(self,node,path):
parent_path = "/"+("/".join(path.split('/')[1:-1]))
parent_node = self.get_node_wrapper(parent_path)
parent_node.add_node(node)
if (not node.isFile):
meta = parent_node.get("meta")
meta['st_nlink']+=1
parent_node.put("meta",meta)
else:
self.fd+=1
return self.fd
def add_dir(self,path,mode):
# create a file node
temp_node = FileNode(path.split('/')[-1],False,path,self.url)
temp_node.set_meta(dict(st_mode=(S_IFDIR | mode), st_nlink=2,
st_size=0, st_ctime=time(), st_mtime=time(),
st_atime=time()))
# Add node to the FS
self.add_node(temp_node,path)
def add_file(self,path,mode):
# create a file node
temp_node = FileNode(path.split('/')[-1],True,path,self.url)
temp_node.set_meta(dict(st_mode=(S_IFREG | mode), st_nlink=1,
st_size=0, st_ctime=time(), st_mtime=time(),
st_atime=time()))
# Add node to the FS
# before we do that, we have to manipulate the path string to point
self.add_node(temp_node,path)
self.fd+=1
return self.fd
def write_file(self,path,data=None, offset=0, fh=None):
# file will already have been created before this call
# get the corresponding file node
filenode = self.get_node_wrapper(path)
# if data == None, this is just a truncate request,using offset as
# truncation parameter equivalent to length
node_data = filenode.get("data")
node_meta = filenode.get("meta")
if (data==None):
node_data = node_data[:offset]
node_meta['st_size'] = offset
else:
node_data = node_data[:offset]+data
node_meta['st_size'] = len(node_data)
filenode.put("data",node_data)
filenode.put("meta",node_meta)
def read_file(self,path,offset=0,size=None):
# get file node
filenode = self.get_node_wrapper(path)
# if size==None, this is a readLink request
if (size==None):
return filenode.get_data()
else:
# return requested portion data
return filenode.get("data")[offset:offset + size]
def rename_node(self,old,new):
# first check if parent exists i.e. destination path is valid
future_parent_node = self.get_parent_node(new)
if (future_parent_node == None):
raise FuseOSError(ENOENT)
return
# get old filenodeobject and its parent filenode object
filenode = self.get_node_wrapper(old)
parent_filenode = self.get_parent_node(old)
# remove node from parent
list_nodes = parent_filenode.get("list_nodes")
del list_nodes[filenode.name]
parent_filenode.put("list_nodes",list_nodes)
# if filenode is a directory decrement 'st_link' of parent
if (not filenode.isFile):
parent_meta = parent_filenode.get("meta")
parent_meta["st_nlink"]-=1
parent_filenode.put("meta",parent_meta)
# add filenode to new parent, also change the name
filenode.name = new.split('/')[-1]
future_parent_node.add_node(filenode)
def utimens(self,path,times):
filenode = self.get_node_wrapper(path)
now = time()
atime, mtime = times if times else (now, now)
meta = filenode.get("meta")
meta['st_atime'] = atime
meta['st_mtime'] = mtime
filenode.put("meta",meta)
def delete_node(self,path):
# get parent node
parent_filenode = self.get_parent_node(path)
# get node to be deleted
filenode = self.get_node_wrapper(path)
# remove node from parents list
list_nodes = parent_filenode.get("list_nodes")
del list_nodes[filenode.name]
parent_filenode.put("list_nodes",list_nodes)
# if its a dir reduce 'st_nlink' in parent
if (not filenode.isFile):
parents_meta = parent_filenode.get("meta")
parents_meta["st_nlink"]-=1
parent_filenode.put("meta",parents_meta)
def link_nodes(self,target,source):
# create a new target node.
temp_node = FileNode(target.split('/')[-1],True,target,self.url)
temp_node.set_meta(dict(st_mode=(S_IFLNK | 0777), st_nlink=1,
st_size=len(source)))
temp_node.set_data(source)
# add the new node to FS
self.add_node(temp_node,target)
def update_meta(self,path,mode=None,uid=None,gid=None):
# get the desired filenode.
filenode = self.get_node_wrapper(path)
# if chmod request
meta = filenode.get("meta")
if (uid==None):
meta["st_mode"] &= 0770000
meta["st_mode"] |= mode
else: # a chown request
meta['st_uid'] = uid
meta['st_gid'] = gid
filenode.put("meta",meta)
class Memory(LoggingMixIn, Operations):
'Example memory filesystem. Supports only one level of files.'
def __init__(self,url):
global count # count is a global variable, can be used inside any function.
count +=1 # increment count for very method call, to track count of calls made.
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time())) # print the parameters passed to the method as input.(used for debugging)
print('In function __init__()') #print name of the method called
self.FS = FS(url)
def getattr(self, path, fh=None):
global count
count +=1
print ("CallCount {} " " Time {} arguments:{} {} {}".format(count,datetime.datetime.now().time(),type(self),path,type(fh)))
print('In function getattr()')
file_node = self.FS.get_node_wrapper(path)
if (file_node == None):
raise FuseOSError(ENOENT)
else:
return file_node.get_meta()
def readdir(self, path, fh):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print('In function readdir()')
file_node = self.FS.get_node_wrapper(path)
print "******8Printing FIle NODE****", file_node
print " FIlE_NODE>LIST_NODES ", file_node.list_nodes()
print "x.name:"
# print x.name for x in file_node.list_nodes()
print"done"
m = ['.','..']+[x.name for x in file_node.list_nodes()]
print m
return m
def mkdir(self, path, mode):
global count
count +=1
print ("CallCount {} " " Time {}" "," "argumnets:" " " "path;{}" "," "mode:{}".format(count,datetime.datetime.now().time(),path,mode))
print('In function mkdir()')
# create a file node
self.FS.add_dir(path,mode)
def create(self, path, mode):
global count
count +=1
print ("CallCount {} " " Time {} path {} mode {}".format(count,datetime.datetime.now().time(),path,mode))
print('In function create()')
return self.FS.add_file(path,mode) # returns incremented fd.
def write(self, path, data, offset, fh):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print ("Path:{}" " " "data:{}" " " "offset:{}" " " "filehandle{}".format(path,data,offset,fh))
print('In function write()')
self.FS.write_file(path, data, offset, fh)
return len(data)
def open(self, path, flags):
global count
count +=1
print ("CallCount {} " " Time {}" " " "argumnets:" " " "path:{}" "," "flags:{}".format(count,datetime.datetime.now().time(),path,flags))
print('In function open()')
self.FS.fd += 1
return self.FS.fd
def read(self, path, size, offset, fh):
global count
count +=1
print ("CallCount {} " " Time {}" " " "arguments:" " " "path:{}" "," "size:{}" "," "offset:{}" "," "fh:{}".format(count,datetime.datetime.now().time(),path,size,offset,fh))
print('In function read()')
return self.FS.read_file(path,offset,size)
def rename(self, old, new):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print('In function rename()')
self.FS.rename_node(old,new)
def utimens(self, path, times=None):
global count
count +=1
print ("CallCount {} " " Time {} Path {}".format(count,datetime.datetime.now().time(),path))
print('In function utimens()')
self.FS.utimens(path,times)
def rmdir(self, path):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print('In function rmdir()')
self.FS.delete_node(path)
def unlink(self, path):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print('In function unlink()')
self.FS.delete_node(path)
def symlink(self, target, source):
global count
count +=1
print ("CallCount {} " " Time {}" "," "Target:{}" "," "Source:{}".format(count,datetime.datetime.now().time(),target,source))
print('In function symlink()')
self.FS.link_nodes(target,source)
def readlink(self, path):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print('In function readlink()')
return self.FS.read_file(path)
def truncate(self, path, length, fh=None):
global count
print ("CallCount {} " " Time {}""," "arguments:" "path:{}" "," "length:{}" "," "fh:{}".format(count,datetime.datetime.now().time(),path,length,fh))
print('In function truncate()')
self.FS.write_file(path,offset=length)
def chmod(self, path, mode):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print('In function chmod()')
self.FS.update_meta(path,mode=mode)
return 0
def chown(self, path, uid, gid):
global count
count +=1
print ("CallCount {} " " Time {}".format(count,datetime.datetime.now().time()))
print('In function chown()')
self.FS.update_meta(path,uid=uid,gid=gid)
if __name__ == "__main__":
if len(argv) != 3:
print 'usage: %s <mountpoint> <remote hashtable>' % argv[0]
exit(1)
url = argv[2]
# Create a new HtProxy object using the URL specified at the command-line
fuse = FUSE(Memory(url), argv[1], foreground=True, debug=True)
|
[
"jaivardhan.mattapalli@gmail.com"
] |
jaivardhan.mattapalli@gmail.com
|
bd1cd6ee5d5a4e9716e857d0f5422a79d63509b8
|
b381ae027aff7ac7d3c32993072e7a029b036150
|
/samples/Linux/getCameraFeature.py
|
98f642eb0b9e6a14271d5a7641e2c80bd0b27ce3
|
[
"MIT"
] |
permissive
|
pixelink-support/pixelinkPythonWrapper
|
f0040fb4c89aff33efba03da7a95253a276c30d8
|
3f5d4f28c8730debe4e774ceedbbd31c75f85c1d
|
refs/heads/master
| 2022-10-11T10:19:02.191089 | 2022-09-09T18:51:46 | 2022-09-09T18:51:46 | 254,446,479 | 13 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,949 |
py
|
"""
getCameraFeature.py
Demonstrates how to get some information about a camera feature.
Note that there are two places to get information about a feature:
1) getCameraFeatures
2) getFeature
getCameraFeatures can be used to query (generally) static information about
a feature. e.g. number of parameters, if it's supported, param max min. etc.
getFeature is used to get the feature's current settings/value.
(setFeature is used to set the feature's current settings/value.)
"""
from pixelinkWrapper import*
def decode_feature_flags(flags):
if(flags & PxLApi.FeatureFlags.PRESENCE):
print("Flag PRESENCE - feature is supported")
if(flags & PxLApi.FeatureFlags.READ_ONLY):
print("Flag READ_ONLY - feature can only be read")
if(flags & PxLApi.FeatureFlags.DESC_SUPPORTED):
print("Flag DESC_SUPPORTED - feature can be saved to different descriptors")
if(flags & PxLApi.FeatureFlags.MANUAL):
print("Flag MANUAL - feature controlled by external app")
if(flags & PxLApi.FeatureFlags.AUTO):
print("Flag AUTO - feature automatically controlled by camera")
if(flags & PxLApi.FeatureFlags.ONEPUSH):
print("Flag ONEPUSH - camera sets feature only once, then returns to manual operation")
if(flags & PxLApi.FeatureFlags.OFF):
print("Flag OFF - feature is set to last known state and cannot be controlled by app")
"""
Print information about an individual camera feature
"""
def print_camera_feature(feature):
# Is the feature supported?
isSupported = feature.uFlags & PxLApi.FeatureFlags.PRESENCE
if(not(isSupported)):
print("Feature {0} is not supported".format(feature.uFeatureId))
else:
print("Number of parameters: {0}".format(feature.uNumberOfParameters))
print("Flags: {0}".format(feature.uFlags))
decode_feature_flags(feature.uFlags)
params = feature.Params
for i in range(feature.uNumberOfParameters):
print("Parameter {0}".format(i))
print("Min value: {0}".format(params[i].fMaxValue))
print("Max value: {0}".format(params[i].fMinValue))
"""
Print information about a feature.
This is one way to determine how many parameters are used by a feature.
The second way is demonstrated in print_feature_trigger.
The advantage of this method is that you can also see the max and min values
parameters supports.
Note that the max and min are exactly that: max and min.
It should not be assumed that all values between are supported.
For example, an ROI width parameter may have a min/max of 0/1600, but
widths of 7, 13, 59 etc. are not supported.
Note too that a feature's min and max values may change as other
features change.
For example, exposure and frame rate are interlinked, and changing
one may change the min/max for the other.
The feature flags reported by getCameraFeatures indicate which
flags are supported (e.g. FeatureFlags.AUTO). They do not indicate
the current settings; these are available through getFeature.
"""
def print_feature_parameter_info(hCamera, featureId):
assert 0 != hCamera, "No initialized camera"
print("\n----------Feature {0}----------\n".format(featureId))
# Read information about a feature
ret = PxLApi.getCameraFeatures(hCamera, featureId)
if(PxLApi.apiSuccess(ret[0])):
if(None != ret[1]):
cameraFeatures = ret[1]
assert 1 == cameraFeatures.uNumberOfFeatures, "Unexpected number of features"
assert cameraFeatures.Features[0].uFeatureId == featureId, "Unexpected returned featureId"
print_camera_feature(cameraFeatures.Features[0])
"""
In this case, what we'll do is demonstrate the use of FeatureId.ALL to read information
about all features at once.
However, we have to be careful because the order of the features is not
such that we can just index into the array using the feature id value.
Rather, we have to explicitly search the array for the specific feature.
"""
def print_feature_parameter_info2(hCamera, featureId):
assert 0 != hCamera, "No initialized camera"
featureIndex = -1
print("\n----------Feature {0}----------\n".format(featureId))
# Read information about all features
ret = PxLApi.getCameraFeatures(hCamera, PxLApi.FeatureId.ALL)
if(PxLApi.apiSuccess(ret[0])):
cameraFeatures = ret[1]
assert 1 < cameraFeatures.uNumberOfFeatures, "Unexpected number of features"
# Where in the structure of cameraFeatures is the feature we're interested in?
for i in range(cameraFeatures.uNumberOfFeatures):
if(featureId == cameraFeatures.Features[i].uFeatureId):
featureIndex = cameraFeatures.Features[i].uFeatureId
break
# Did we find it?
if(-1 == featureIndex):
print("ERROR: Unable to find the information for feature {0}".format(featureId))
return
print_camera_feature(cameraFeatures.Features[featureIndex])
"""
Feature Shutter
FeatureId.SHUTTER is the exposure time.
"""
def print_feature_shutter(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature Shutter:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.SHUTTER)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
print("Exposure time: {0} seconds\n".format(params[0]))
decode_feature_flags(flags)
"""
Feature White Balance
FeatureId.WHITE_BALANCE is not the RGB white balance, but rather the Color Temperature.
For the RGB white balance, see feature FeatureId.WHITE_SHADING.
Here we assume a colour camera.
If you're running this with a mono camera, getFeature will return an error.
"""
def print_feature_white_balance(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature White Balance:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.WHITE_BALANCE)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
print("Colour Temperature: {0} degrees Kelvin\n".format(params[0]))
decode_feature_flags(flags)
"""
Feature Trigger
At this point in time FeatureId.TRIGGER has 5 parameters.
"""
def print_feature_trigger(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature Trigger:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.TRIGGER)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
assert PxLApi.TriggerParams.NUM_PARAMS == len(params), "Returned Trigger params number is different"
print("Mode = {0}".format(params[PxLApi.TriggerParams.MODE]))
print("Type = {0} {1}".format(params[PxLApi.TriggerParams.TYPE],
decode_trigger_type(params[PxLApi.TriggerParams.TYPE])))
print("Polarity = {0} {1}".format(params[PxLApi.TriggerParams.POLARITY],
decode_polarity(params[PxLApi.TriggerParams.POLARITY])))
print("Delay = {0}".format(params[PxLApi.TriggerParams.DELAY]))
print("Parameter = {0}\n".format(params[PxLApi.TriggerParams.PARAMETER]))
decode_feature_flags(flags)
def decode_trigger_type(triggerType):
switcher = {
PxLApi.TriggerTypes.FREE_RUNNING: "trigger type FREE_RUNNING",
PxLApi.TriggerTypes.SOFTWARE: "trigger type SOFTWARE",
PxLApi.TriggerTypes.HARDWARE: "trigger type HARDWARE"
}
return switcher.get(triggerType, "Unknown trigger type")
def decode_polarity(polarity):
switcher = {
0: "negative polarity",
1: "positive polarity"
}
return switcher.get(polarity, "Unknown polarity")
"""
Feature GPIO
At this point in time we assume that GPIO has 6 parameters.
An error will be reported if you're using a microscopy camera
because they don't support GPIO.
"""
def print_feature_gpio(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature GPIO:\n")
# Get information about GPO1 by setting params[0] == 1
params = [1]
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.GPIO, params)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
assert PxLApi.GpioParams.NUM_PARAMS == len(params), "Returned GPIO params number is different"
print("GpioNumber = {0}".format(params[PxLApi.GpioParams.INDEX]))
print("Mode = {0}".format(params[PxLApi.GpioParams.MODE]))
print("Polarity = {0} {1}".format(params[PxLApi.GpioParams.POLARITY],
decode_polarity(params[PxLApi.GpioParams.POLARITY])))
decode_feature_flags(flags)
"""
Feature Saturation
Again we assume that this is a color camera.
getFeature will return an error if the camera is a mono camera.
"""
def print_feature_saturation(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature Saturation:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.SATURATION)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
assert 1 == len(params), "Returned params number is different"
print("Saturation = {0}".format(params[0]))
decode_feature_flags(flags)
def main():
# We assume there's only one camera connected
ret = PxLApi.initialize(0)
if(PxLApi.apiSuccess(ret[0])):
hCamera = ret[1]
# Print some information about the camera
print_feature_parameter_info(hCamera, PxLApi.FeatureId.SHUTTER)
print_feature_shutter(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.WHITE_BALANCE)
print_feature_white_balance(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.TRIGGER)
print_feature_trigger(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.GPIO)
print_feature_gpio(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.SATURATION)
print_feature_saturation(hCamera)
# Demonstrate two ways to get the same information
print_feature_parameter_info(hCamera, PxLApi.FeatureId.ROI)
print_feature_parameter_info2(hCamera, PxLApi.FeatureId.ROI)
# Uninitialize the camera now that we're done with it.
PxLApi.uninitialize(hCamera)
return 0
else:
print("ERROR: {0}\n".format(ret[0]))
return 1
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
pixelink-support.noreply@github.com
|
f8ac1d12a3df4b9ed4b473f9423f2f82d7fc386a
|
734df5f20c434750dd5fe6fc9b91b2c08e7359be
|
/colleges/urls.py
|
86370d442cfb4c556c74883fe29dc93b76aa1586
|
[] |
no_license
|
andamuthu/college_system
|
125483f1b54b48b1d80b7303bb22596d4eea4dbe
|
d718d4d579f076ec0916e477ff3e5189419272e4
|
refs/heads/master
| 2022-06-03T14:59:47.526368 | 2020-05-02T14:22:15 | 2020-05-02T14:22:15 | 260,703,685 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 184 |
py
|
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('marks_system/',include('marks_system.urls')),
path('admin/', admin.site.urls),
]
|
[
"andamuthu.a@gmail.com"
] |
andamuthu.a@gmail.com
|
34baa6ee2ae01bf7a7a2a5672393d079c838bb83
|
645a92f8a56687b56fc63b2942f16f6e0401ed82
|
/workforce/hw/group/py/01_center_summaries.py
|
79617525deb23e885b7b93caf3129bb5625e55c0
|
[] |
no_license
|
Busyclover/icl
|
4c8ef6240003f03881527d3f4b6c74fd4ce50560
|
d13e8545415117ab1759dd0d75a15c9f199d6c63
|
refs/heads/master
| 2021-06-20T00:30:42.655511 | 2017-07-21T09:30:23 | 2017-07-21T09:30:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,770 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 07:46:43 2016
@author: jim
"""
# %% Setup - load packages
import pandas as pd
import numpy as np
import codecs
import os
import re
import nltk
from collections import Counter
# %% define untility functions
def extract_nouns(text):
nouns = []
tokens = nltk.word_tokenize(text)
tags = nltk.pos_tag(tokens)
for item in tags:
if item[1] == 'NN' or item[1] == 'NNP' or item[1] == 'NNS' or item[1] == 'NNPS':
nouns.append(item)
return nouns
def stop_words(word_list):
from collections import Counter
counts = Counter(word_list)
stop_list =[];
new_word_list=[];
for key in counts:
new_word_list.append(key)
freq=counts[key]/float(len(counts))
if freq >0.1:
stop_list.append(key)
return (new_word_list, stop_list)
# %% step 1 - get the data for the centers
# list all files in directory
files = os.listdir("../data/center_summaries")
# convenience subsetter for testing
to_get = len(files)
# generate firm symbols and years
centres = [f.split('.')[0] for f in files]
# set up lists to store variables
global_list = []
centre_list = []
# loop over files to extract nouns
for i in range(to_get):
f = files[i] # get next file
path = "../data/center_summaries/" + f # set up the path
content = codecs.open(path, 'r', # read the file
encoding = 'utf-8',
errors = 'ignore').read()
# filtered = re.sub('\n', '', content) # do a bit of cleaning
nouns = extract_nouns(content) # extract the nouns
global_list = global_list + nouns # update the global list of nouns
centre_list.append(nouns) # add centre nouns to list
# %% step 2 - remove duplicates and stopwords from global list
# this remove duplicates and generates a list of stop words
(global_list, stop_list) = stop_words(global_list)
# this removes stop words
global_list = [word for word in global_list if word not in stop_list]
# %% step 3 - remove stop words and duplicates from individual firm dictionaries
# remove duplicates and stopwords from firm-level dictionaries
centre_list_deduped = []
for dic in centre_list:
dic = Counter(dic).keys()
dic = [word for word in dic if word not in stop_list]
centre_list_deduped.append(dic)
## %% step 4 - create and write data frame
## generate data frame of centre and deduped nouns
#centers_to_nouns = {'centre': centres[:to_get],
# 'nouns': centre_list_deduped}
#
#centers_df = pd.DataFrame(centers_to_nouns)
#centers_df.to_csv("../data/cleaned/01_centres_to_nouns.csv", index = False)
|
[
"jimmy22theave@gmail.com"
] |
jimmy22theave@gmail.com
|
4b423356352383a53ed31702fe7a21d92bb9f396
|
18ed561f3025b3212a49a017bbe314867862a164
|
/0x04-python-more_data_structures/1-search_replace.py
|
ed4a090894c0c9382de8aedffad44471267b88a6
|
[] |
no_license
|
ChristianCampos-55/holbertonschool-higher_level_programming
|
0720a47c63cbdd80631e7b5f918079f7f0f1bcb4
|
441f3349f2150420cde2c56a451d404863e873ad
|
refs/heads/master
| 2023-03-02T10:54:09.895190 | 2021-02-10T18:20:35 | 2021-02-10T18:20:35 | 259,389,034 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 264 |
py
|
#!/usr/bin/python3
def search_replace(my_list, search, replace):
new_list = []
count = 0
for i in my_list:
if i != search:
new_list.append(i)
else:
new_list.append(replace)
count += 1
return new_list
|
[
"1566@holbertonschool.com"
] |
1566@holbertonschool.com
|
6fc788e173ac857ab0db7d6f3e2dbb3adbbf8333
|
cb12b301f06e26a404197b99dcf1abb4329b2c22
|
/catalog.py
|
6ee5282895195e57a820ba3d1c22244343f7e86b
|
[
"MIT"
] |
permissive
|
garaujo23/Catalog-Web-App
|
255c54e1a88c48e59b9fb670877eb439d9f510a2
|
4b96fc209adc48f129c2496224c38962d68b07af
|
refs/heads/master
| 2020-05-05T09:15:59.229118 | 2019-05-19T05:15:05 | 2019-05-19T05:15:05 | 179,896,789 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,724 |
py
|
from flask import Flask, render_template, request, redirect
from flask import session as login_session, url_for, flash, jsonify
import random
import string
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item
import datetime
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
# create Flask app
app = Flask(__name__)
# Google sign in ID
id = '198020618168-a4ieutk5gt8bhc59l2jorugjd62jijvn.apps.googleusercontent.com'
# bind to database with SQLalchemy
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
# To check if an item is already in that category
def checkItem(item_title, category_title):
DBSession = sessionmaker(bind=engine)
session = DBSession()
print category_title
category = session.query(Category).filter_by(title=category_title).one()
check = session.query(Item.id).filter(
Item.title == item_title).filter(Item.category == category)
return session.query(check.exists()).scalar()
# Login page
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
# Oauth method for Google sign in
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('../client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != id:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is\
already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius:150px;\
-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s"
% login_session['username'])
print "done!"
return output
# Revoke auth/sign out
@app.route('/gdisconnect')
def gdisconnect():
if 'access_token' not in login_session:
return redirect('/login')
access_token = login_session['access_token']
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
if access_token is None:
response = 'Current user not connected.'
return render_template('logout.html', response=response)
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s'\
% login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] == '200':
del login_session['access_token']
del login_session['username']
del login_session['email']
del login_session['picture']
response = 'Successfully logged out'
return render_template('logout.html', response=response)
else:
response = 'Failed to revoke\
token for given user'
return render_template('logout.html', response=response)
# Home page showing categories and items
@app.route('/')
def showCatalog():
DBSession = sessionmaker(bind=engine)
session = DBSession()
catalog = session.query(Category).all()
items = session.query(Item).order_by(Item.date_time.desc()).limit(5).all()
return render_template('catalog.html', category=catalog, items=items)
# Show all items in category
@app.route('/category/<string:category_title>/items')
def showCategories(category_title):
DBSession = sessionmaker(bind=engine)
session = DBSession()
category = session.query(Category).filter_by(title=category_title).one()
items = session.query(Item).filter_by(category_id=category.id).all()
session.close()
return render_template('category.html', category=category, items=items)
# Show a specific item
@app.route('/category/<string:category_title>/<string:item_title>')
def showItem(category_title, item_title):
DBSession = sessionmaker(bind=engine)
session = DBSession()
catalog = session.query(Category).filter_by(title=category_title).one()
item = session.query(Item).filter_by(title=item_title).first()
session.close()
return render_template('item.html', category=catalog, item=item)
# JSON endpoint to get item information
@app.route('/category/<string:category_title>/<string:item_title>.json')
def itemJSON(item_title, category_title):
DBSession = sessionmaker(bind=engine)
session = DBSession()
category = session.query(Category).filter_by(title=category_title).one()
item = session.query(Item).filter_by(
title=item_title).filter_by(category_id=category.id).one()
session.close()
return jsonify(item=item.serialize)
# Add a new item if logged in
@app.route('/item/new', methods=['GET', 'POST'])
def newItem():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
now = datetime.datetime.now()
DBSession = sessionmaker(bind=engine)
session = DBSession()
if checkItem(request.form['title'], request.form['category']):
flash("Item already exists!")
catalog = session.query(Category).all()
return render_template('newitem.html', category=catalog)
else:
category = session.query(Category).filter_by(
title=request.form['category']).one()
newItem = Item(title=request.form['title'],
description=request.form['description'],
category=category,
date_time=now.strftime("%Y-%m-%d %H:%M"))
session.add(newItem)
session.commit()
return redirect(url_for('showCatalog'))
else:
DBSession = sessionmaker(bind=engine)
session = DBSession()
catalog = session.query(Category).all()
return render_template('newitem.html', category=catalog)
# Edit item if logged in
@app.route('/category/<string:category_title>/<string:item_title>/edit',
methods=['GET', 'POST'])
def editItem(category_title, item_title):
if 'username' not in login_session:
return redirect('/login')
DBSession = sessionmaker(bind=engine)
session = DBSession()
catalog = session.query(Category).all()
category = session.query(Category).filter_by(title=category_title).one()
editItem = session.query(Item).filter_by(
title=item_title).filter_by(category_id=category.id).one()
if request.method == 'POST':
if checkItem(request.form['title'], request.form['category']):
flash("Item already exists!")
return render_template('edititem.html', category=catalog,
category_title=category_title,
item=editItem)
else:
if request.form['title']:
editItem.title = request.form['title']
if request.form['description']:
editItem.description = request.form['description']
if request.form['category']:
editCategory = session.query(Category).filter_by(
title=request.form['category']).first()
editItem.category_id = editCategory.id
session.add(editItem)
session.commit()
return redirect(url_for('showItem',
category_title=editItem.category.title,
item_title=editItem.title))
else:
return render_template('edititem.html',
category=catalog,
category_title=category_title,
item=editItem)
# Delete item if logged in
@app.route('/category/<string:category_title>/<string:item_title>/delete',
methods=['GET', 'POST'])
def deleteItem(item_title, category_title):
if 'username' not in login_session:
return redirect('/login')
DBSession = sessionmaker(bind=engine)
session = DBSession()
category = session.query(Category).filter_by(title=category_title).one()
itemToDelete = session.query(Item).filter_by(
title=item_title).filter_by(category_id=category.id).one()
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
return redirect(url_for('showCatalog'))
else:
return render_template('deleteitem.html',
item=itemToDelete,
category=itemToDelete.category)
if __name__ == "__main__":
app.debug = True
app.secret_key = 'super_secret_key'
app.run(host='0.0.0.0', port=5000)
|
[
"gabearaujo25@gmail.com"
] |
gabearaujo25@gmail.com
|
a50e9935b9d2a3aae1119e351cf00d229b172e8e
|
18f4332ccaccf7e0a3a36d701f84358516ee6562
|
/test/classes.py
|
f2a42849e9f4e1794eceda42846528a2b8c0d09e
|
[] |
no_license
|
anishLearnsToCode/python-workshop-1
|
d4a4f04d635aae15ff7411e817863a50e877e51e
|
524a329233ce1ae20d926fff53f46925cd9cba3e
|
refs/heads/master
| 2022-11-22T09:10:42.674296 | 2020-07-15T18:38:26 | 2020-07-15T18:38:26 | 275,112,553 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 356 |
py
|
import sympy
class Person:
def __init__(self, firstName, lastname):
self.firstName = firstName
self.lastName = lastname
def fullName(self):
return self.firstName + ' ' + self.lastName
anish = Person('anish', 'sachdeva')
john = Person('john', 'doe')
print(anish.fullName())
print(john.fullName())
print(sympy.isprime(2))
|
[
"anish_bt2k16@dtu.ac.in"
] |
anish_bt2k16@dtu.ac.in
|
1ba82e68bbd6b265e9d6a7edad5a9811060f2e50
|
97ee5c0f2320aab2ca1b6ad0f18a4020dbd83d1c
|
/venv/Lib/site-packages/ibm_watson_machine_learning/libs/repo/swagger_client/models/experiment_input.py
|
43cf064a9983c91487e04e2cf38808fc5fae6743
|
[] |
no_license
|
yusufcet/healty-hearts
|
4d80471e82a98ea1902b00c8998faed43f99616c
|
a4cd429484e857b849df08d93688d35e632b3e29
|
refs/heads/main
| 2023-05-28T13:57:09.323953 | 2021-05-06T04:15:27 | 2021-05-06T04:15:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,751 |
py
|
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# (C) Copyright IBM Corp. 2020.
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pformat
from six import iteritems
import re
class ExperimentInput(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, tags=None, settings=None, training_references=None, training_data_reference=None, training_results_reference=None):
"""
ExperimentInputExperiments - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'tags': 'list[TagRepository]',
'settings': 'ExperimentInputSettings',
'training_references': 'list[TrainingReferenceExperiments]',
'training_data_reference': 'ConnectionObjectSourceExperiments',
'training_results_reference': 'ConnectionObjectTargetExperiments'
}
self.attribute_map = {
'tags': 'tags',
'settings': 'settings',
'training_references': 'training_references',
'training_data_reference': 'training_data_reference',
'training_results_reference': 'training_results_reference'
}
self._tags = tags
self._settings = settings
self._training_references = training_references
self._training_data_reference = training_data_reference
self._training_results_reference = training_results_reference
@property
def tags(self):
"""
Gets the tags of this ExperimentInputExperiments.
:return: The tags of this ExperimentInputExperiments.
:rtype: list[TagRepository]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this ExperimentInputExperiments.
:param tags: The tags of this ExperimentInputExperiments.
:type: list[TagRepository]
"""
self._tags = tags
@property
def settings(self):
"""
Gets the settings of this ExperimentInputExperiments.
:return: The settings of this ExperimentInputExperiments.
:rtype: ExperimentInputSettings
"""
return self._settings
@settings.setter
def settings(self, settings):
"""
Sets the settings of this ExperimentInputExperiments.
:param settings: The settings of this ExperimentInputExperiments.
:type: ExperimentInputSettings
"""
self._settings = settings
@property
def training_references(self):
"""
Gets the training_references of this ExperimentInputExperiments.
:return: The training_references of this ExperimentInputExperiments.
:rtype: list[TrainingReferenceExperiments]
"""
return self._training_references
@training_references.setter
def training_references(self, training_references):
"""
Sets the training_references of this ExperimentInputExperiments.
:param training_references: The training_references of this ExperimentInputExperiments.
:type: list[TrainingReferenceExperiments]
"""
self._training_references = training_references
@property
def training_data_reference(self):
"""
Gets the training_data_reference of this ExperimentInputExperiments.
:return: The training_data_reference of this ExperimentInputExperiments.
:rtype: ConnectionObjectSourceExperiments
"""
return self._training_data_reference
@training_data_reference.setter
def training_data_reference(self, training_data_reference):
"""
Sets the training_data_reference of this ExperimentInputExperiments.
:param training_data_reference: The training_data_reference of this ExperimentInputExperiments.
:type: ConnectionObjectSourceExperiments
"""
self._training_data_reference = training_data_reference
@property
def training_results_reference(self):
"""
Gets the training_results_reference of this ExperimentInputExperiments.
This is required for DL
:return: The training_results_reference of this ExperimentInputExperiments.
:rtype: ConnectionObjectTargetExperiments
"""
return self._training_results_reference
@training_results_reference.setter
def training_results_reference(self, training_results_reference):
"""
Sets the training_results_reference of this ExperimentInputExperiments.
This is required for DL
:param training_results_reference: The training_results_reference of this ExperimentInputExperiments.
:type: ConnectionObjectTargetExperiments
"""
self._training_results_reference = training_results_reference
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"yusufcetin9999@gmail.com"
] |
yusufcetin9999@gmail.com
|
7622ac86304511de6539e45378b787ae4cb659a9
|
dd660ea2bee9fdf568da47f9760568c0d5c91049
|
/test_prepare_data_from_rgb_detection.py
|
1fa84b7d384c3f3b8a7cc21adfce2cd6d28e9fb5
|
[] |
no_license
|
kanhua/lyft-3d-main
|
4cad265b9c92a62ab79ebab4e01e2ffe2ad8f025
|
1c74500f0bf5d52317af29cb350368832eb42aba
|
refs/heads/master
| 2021-08-07T18:48:05.983138 | 2020-09-17T00:49:19 | 2020-09-17T00:49:19 | 219,532,364 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 524 |
py
|
import os
from prepare_lyft_data import prepare_frustum_data_from_scenes, level5data
#from test_data_loader import level5testdata
output_file = os.path.join("/dltraining/artifacts/lyft_val_from_rgb.pickle")
token_file = os.path.join("/dltraining/artifacts/lyft_val_token_from_rgb.pickle")
# prepare_frustum_data_from_traincsv(64, output_file)
prepare_frustum_data_from_scenes(100000, output_file, token_filename=token_file, scenes=range(151,160),
from_rgb_detection=True, lyftdf=level5data)
|
[
"kanhua.lee@gmail.com"
] |
kanhua.lee@gmail.com
|
f5b76c17592dde22bcb05c9f18c865d4d373eaf1
|
361cdb847271a30e5cb4a12c3b3da94082b360a8
|
/z2kit/elf/__init__.py
|
697a0b3beb2134fe77bc6ea9162f568527605eaa
|
[] |
no_license
|
morivden/seccamp_z2
|
9af3985e77013fd1d7693b5226f0e39b3e53ba95
|
e275cf0dbff0a14dfc7dccc4c2cfc1fd2026866e
|
refs/heads/master
| 2021-01-22T06:54:58.320474 | 2017-09-04T02:23:55 | 2017-09-04T02:23:55 | 102,302,836 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
# z2kit.elf
from z2kit.elf.ELFFile import *
from z2kit.elf.ELFFileIdent import *
from z2kit.elf.ELFFileClasses import *
from z2kit.elf.ELFFileHeader import *
from z2kit.elf.ELFProgramHeader import *
from z2kit.elf.ELFSectionHeader import *
from z2kit.elf.ELFDynamicData import *
from z2kit.elf.ELFDynamicInfo import *
|
[
"fuziyamavolcano@yahoo.co.jp"
] |
fuziyamavolcano@yahoo.co.jp
|
257dc81f65a789c4900940a2f544bb7806e3a00c
|
d8b11ccad6dc9719e2292f21f7bd6a89b4dffe98
|
/HospitalMgmt/manage.py
|
07aa977e87db93dd1453304d922afd4dbab3cedb
|
[] |
no_license
|
meena5/Django-Hospital-Management-System
|
8f0e89bb58e3f5de93e6be1ee4f3a62cf5bf3036
|
e890f4ed0e1ac60a50cf69524011673a4064af65
|
refs/heads/main
| 2023-08-07T21:29:21.850648 | 2021-09-16T06:56:26 | 2021-09-16T06:56:26 | 407,054,574 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 690 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HospitalMgmt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
meena5.noreply@github.com
|
c241ccb910ec13422d9d7b4a1f7920ed1136b1a9
|
05a2c342c3e3a79ab95416ba073ad51ebefe4ff9
|
/demo89_sqlite_insert.py
|
dc61f87a58ee844248a3b33e4032e701aa0f1285
|
[] |
no_license
|
mike-taiwan/ucom_python
|
a2dbe1de17e7be561efb0dcfd00bca0eb1eb8b3f
|
f65cbd26310c964594a1f664f6b62af7f999f57d
|
refs/heads/master
| 2020-06-05T14:35:51.000865 | 2019-06-21T09:07:01 | 2019-06-21T09:09:21 | 192,460,610 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 531 |
py
|
import sqlite3
connection1 = sqlite3.connect('db\\sqlite3_lab1.sqlite')
employees = [{'NAME': 'Mark', "AGE": 38, 'DEPT': 1, 'ADDR': 'Taipei'},
{'NAME': 'John', "AGE": 43, 'DEPT': 2, 'ADDR': 'Hsinchu'},
{'NAME': 'James', "AGE": 47, 'DEPT': 1, 'ADDR': 'Taipei'}]
INSERT_DML = "INSERT INTO EMPLOYEE(NAME, AGE, DEPT, ADDRESS) VALUES(?,?,?,?)"
for e in employees:
connection1.execute(INSERT_DML, (e['NAME'],e['AGE'],e['DEPT'],e['ADDR']));
print ".",
print
connection1.commit()
connection1.close()
|
[
"mike@taifex.com.tw"
] |
mike@taifex.com.tw
|
20f0a8e8b32f52012d2a703d08458770c9ce272b
|
ccd86a24147214349065db2261e7ec7535af43b0
|
/text/datasets/stackoverflow_test.py
|
b6a0c7984ab0a1dbf761c3f7f17d5430cfb052ba
|
[] |
no_license
|
dallascard/DWAC
|
28cfdf926d7b89f4be729ea6c09aa9f9f9aadcd4
|
1869c9f61d38dae6fbb06dca659422e12d352fbb
|
refs/heads/master
| 2020-04-04T08:31:13.123379 | 2019-02-04T17:36:53 | 2019-02-04T17:36:53 | 155,784,426 | 20 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 433 |
py
|
import torch
from text.datasets.stackoverflow_dataset import StackOverflowDataset
from text.datasets.text_dataset import collate_fn
train_dataset = StackOverflowDataset('./data/stackoverflow', partition='train', download=True)
cuda = False
kwargs = {'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=256,
shuffle=True,
collate_fn=collate_fn,
**kwargs)
|
[
"dcard@andrew.cmu.edu"
] |
dcard@andrew.cmu.edu
|
bedc172d8d1fbcb004ad5ffceff067c69cb9c48d
|
31c125db294f926c77c81584492f2ab92d2085ab
|
/file_hash.py
|
144903e41feb8bc390f65995eee2d4f2bf24b03f
|
[] |
no_license
|
TechNight-Dev/DupliSniffer
|
b10d19358bf1dc8d2bc96a898a271c9703354827
|
a6e27f0011c24b547fd42c879f8ea47c7eec3306
|
refs/heads/master
| 2023-04-23T01:50:26.430026 | 2021-05-04T11:14:09 | 2021-05-04T11:14:09 | 358,055,720 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,015 |
py
|
#! /usr/bin/python
# https://www.programcreek.com/python/example/102910/hashlib.algorithms_guaranteed
import os
import hashlib
# TODO: Eventually add changing the type of hash used.
# Need to figure out what hashes are available
# Most likely use a dictionary to store types and the functions used.
# {'md5': hashlib.md5(), }
class HashedFile():
def __init__(self, path, hash_type=None):
self.path = path
if not os.path.exists(self.path):
raise Exception("File path does not exist")
if hash_type == None:
self.hash_type = 'md5'
else:
self.hash_type = hash_type.lower()
if self.hash_type not in hashlib.algorithms_guaranteed:
raise Exception(f"Algorithm '{hash_type}' is not supported")
self.duplicate = False
self.hash = self.calculate_hash()
self.file_name = os.path.basename(self.path)
def __eq__(self, other_hash):
if isinstance(other_hash, HashedFile):
return self.hash == other_hash.hash
else:
return False
def calculate_hash(self, blocksize=65536):
hasher = getattr(hashlib, self.hash_type)()
with open(self.path, 'rb') as a_file:
buf = a_file.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = a_file.read(blocksize)
return hasher.hexdigest()
def is_duplicate(self):
self.duplicate = True
def del_file(self):
os.remove(self.path)
def get_file_type(self):
pass
if __name__ == '__main__':
a_file = HashedFile('/home/derpy/Coding/Python/Practice/os_walk.py')
print(a_file.hash)
second_file = HashedFile('/home/derpy/Coding/Python/Practice/os_walk.py')
print(second_file.hash)
if a_file == second_file:
a_file.is_duplicate()
second_file.is_duplicate()
print("Duplicate")
else:
print('Not Duplicate')
|
[
"Tech-Night_Dev@protonmail.com"
] |
Tech-Night_Dev@protonmail.com
|
1b33b628e77b71abfc262b292bdf0d5c0b755753
|
5b4a662efb40a30493a9eb70644e38a623a326e1
|
/account/migrations/0001_initial.py
|
45bbecba5e981ee912ab23c21b7c02adbc9c7e21
|
[
"Apache-2.0"
] |
permissive
|
gemechis-elias/CodeTopia
|
1e60a402ef8963ef31178950ad4e07ac7cfcd691
|
cf57a1180112071efd7708d9c3ed7310b20bea2a
|
refs/heads/master
| 2021-03-12T05:26:31.911170 | 2020-03-10T13:53:19 | 2020-03-10T13:53:19 | 246,593,143 | 2 | 0 | null | 2020-03-11T14:27:17 | 2020-03-11T14:27:17 | null |
UTF-8
|
Python
| false | false | 2,416 |
py
|
# Generated by Django 3.0.2 on 2020-01-29 12:31
import account.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(height_field=300, upload_to=account.models.get_profile_pic_path, verbose_name='Profile Picture', width_field=300)),
('education_background', models.CharField(choices=[('PostGraduate', 'PostGraduate'), ('UnderGraduate', 'UnderGraduate'), ('HighSchool', 'HighSchool')], max_length=30, verbose_name='Educational Background')),
('phone_number_1', models.CharField(max_length=13, verbose_name='First Phone Number')),
('phone_number_2', models.CharField(blank=True, max_length=13, null=True, verbose_name='Second Phone Number')),
('github_url', models.URLField(blank=True, max_length=150, null=True, verbose_name='Github homepage URL.')),
('personal_url', models.URLField(blank=True, max_length=150, null=True, verbose_name='Personal website URL.')),
('facebook_account', models.URLField(blank=True, max_length=255, null=True, verbose_name='Facebook profile page.')),
('twitter_account', models.URLField(blank=True, max_length=255, null=True, verbose_name='Twitter profile page.')),
('linkedin_account', models.URLField(blank=True, max_length=255, null=True, verbose_name='LinkedIn profile page.')),
('short_bio', models.CharField(blank=True, max_length=60, null=True, verbose_name='Describe yourself')),
('bio', models.CharField(blank=True, max_length=400, null=True, verbose_name='Short bio')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='related_user')),
],
managers=[
('highschool', django.db.models.manager.Manager()),
],
),
]
|
[
"root@DESKTOP-STIPQ3E.localdomain"
] |
root@DESKTOP-STIPQ3E.localdomain
|
b95f8ba488584066b302a6a08ed1db4c3a32464f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_047/ch120_2020_10_05_23_47_15_174844.py
|
a558472d357ff5bd6fe65878fca7f5e3315483c1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,043 |
py
|
import random
dinheiro = 100
print('Seu dinheiro é ',dinheiro )
while dinheiro > 0:
z = input('voce quer jogar?s/n')
if z == 's':
a = random.randint(0,36)
aposta = int(input('Digite o valor de sua aposta'))
aposta_1 = input('Escolha entre as seguintes opções n,i,p')
dinheiro -=aposta
print('Seu dinheiro é depois da aposta é',dinheiro)
if aposta_1 == 'n':
numero136 = int(input('Digite um numero de 1 a 36'))
if numero136 ==a:
dinheiro = dinheiro+(35*aposta)
print(dinheiro)
else:
print(dinheiro)
if aposta_1 == 'i':
if a % 2 != 0:
dinheiro +=aposta*2
print(dinheiro)
else:
print(dinheiro)
if aposta_1 == 'p':
if a%2 == 0:
dinheiro +=aposta*2
print(dinheiro)
else:
print(dinheiro)
else:
dinheiro == 0
|
[
"you@example.com"
] |
you@example.com
|
c5e24d6c07b96e188c1a829df99df94748b07fd0
|
caf9b3b8164f8c3d10de9076950eb91208982066
|
/hw2/hw2.py
|
eb4b43290da6c712075f137e059341205fe7680d
|
[] |
no_license
|
malviyanaman/startercode
|
e6574053b0cb87fcbbd0b21e676dfa83686d1507
|
fdfc725c3434c0af5a626170c80b5a1ab20ad2d1
|
refs/heads/master
| 2023-01-08T06:09:26.987629 | 2020-11-04T18:06:46 | 2020-11-04T18:06:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,380 |
py
|
# I collaborated with:
#
# 1)
# 2)
# ...
#
from dask import delayed
from dask.distributed import Client
from typing import List, Dict, Tuple, Any
import re
def tokenize(line: str) -> List[str]:
""" Splits a line into words """
trimmed = line.strip()
return re.split("\W+", trimmed) if trimmed else []
def count_them(word_list: List[str], file_list: List[str]) -> Dict[str, int]:
""" Returns a dictionary of {word: count}
Input:
word_list: a python list of words we are interested in
file_list: a list of file names
Output:
a python dictionary where the key is a word (from word_list) and the value
is the number of times that word appears in all of the files.
"""
pass
def sortfile(f: str) -> List[str]:
""" Returns an array consisting of the sorted words in f"""
with open(f, "r") as infile:
words = [word for line in infile.readlines() for word in tokenize(line)]
words.sort()
return words
def mergesort(file_list: List[str]) -> Tuple[Any, List[str]]:
""" Performas a parallelized merge sort with branching factor 2 over the files in file_list
Input:
file_list: list of file names
Output:
a tuple. The first part of the tuple is the delayed object for the computation, the second part is a list
of the sorted words
"""
pass
|
[
"dkifer@cse.psu.edu"
] |
dkifer@cse.psu.edu
|
e8f23a7336cd50d2264bb098f7518212daa11cac
|
900a7285b2fc4d203717e09c88e8afe5bba9144f
|
/axonius_api_client/cli/grp_tools/cmd_signup.py
|
66b2ef2ed8634543da42f66131719152e429d498
|
[
"MIT"
] |
permissive
|
geransmith/axonius_api_client
|
5694eb60964141b3473d57e9a97929d4bff28110
|
09fd564d62f0ddf7aa44db14a509eaafaf0c930f
|
refs/heads/master
| 2022-11-23T01:43:52.205716 | 2020-06-12T14:15:38 | 2020-06-12T14:15:38 | 280,499,094 | 0 | 0 |
MIT
| 2020-07-17T18:35:48 | 2020-07-17T18:35:47 | null |
UTF-8
|
Python
| false | false | 1,870 |
py
|
# -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
import click
from ...api.entry import Entry
from ...tools import json_dump
from ..options import URL, add_options
EXPORT = click.option(
"--export-format",
"-xf",
"export_format",
type=click.Choice(["json", "str"]),
help="Format of to export data in",
default="str",
show_envvar=True,
show_default=True,
)
PASSWORD = click.option(
"--password",
"-p",
"password",
required=True,
help="Password to assign to admin user",
prompt="Password to assign to admin user",
hide_input=True,
show_envvar=True,
show_default=True,
)
COMPANY = click.option(
"--company-name",
"-cn",
"company_name",
required=True,
help="Company Name",
prompt="Company Name",
show_envvar=True,
show_default=True,
)
CONTACT = click.option(
"--contact-email",
"-ce",
"contact_email",
required=True,
help="Contact Email",
prompt="Contact Email",
show_envvar=True,
show_default=True,
)
OPTIONS = [URL, PASSWORD, COMPANY, CONTACT, EXPORT]
@click.command(name="signup")
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, password, company_name, contact_email, export_format):
"""Perform the initial signup to an instance."""
entry = Entry(url=url)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
data = entry.signup(
password=password, company_name=company_name, contact_email=contact_email
)
if export_format == "str":
lines = [
f"AX_URL={url}",
f"AX_KEY={data['api_key']}",
f"AX_SECRET={data['api_secret']}",
]
click.secho("\n".join(lines))
ctx.exit(0)
if export_format == "json":
data["url"] = url
click.secho(json_dump(data))
ctx.exit(1)
|
[
"jimbosan@gmail.com"
] |
jimbosan@gmail.com
|
628f30ffe75a06c4baceaaac598d49f3f4f6a13b
|
650ed42e30e2fd2794f034601b41c19d198a323c
|
/venv/bin/django-admin.py
|
868a6c33d20d2f7221d5c6e241ad3b78e03577c7
|
[] |
no_license
|
Jaanger123/TaskDjango
|
a4b2ee6e8feef97c74512948aefa1a43fbfcb5da
|
5ab1a7aef3fcde04a5d718fc4c0b0ac7bd79fe84
|
refs/heads/master
| 2023-06-23T16:10:54.970106 | 2021-07-24T02:58:31 | 2021-07-24T02:58:31 | 388,983,949 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
#!/home/hello/PycharmProjects/TaskDjango/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"jbarakanov@inbox.ru"
] |
jbarakanov@inbox.ru
|
8cbda765bda1b28cea26eaa2510f79462cfd1360
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/third_party/blink/web_tests/external/wpt/webdriver/tests/execute_script/promise.py
|
1eab7822c5c2f3c8d3fa51ac0713ebdc2c19269c
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 |
BSD-3-Clause
| 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null |
UTF-8
|
Python
| false | false | 2,965 |
py
|
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def execute_script(session, script, args=None):
if args is None:
args = []
body = {"script": script, "args": args}
return session.transport.send(
"POST", "/session/{session_id}/execute/sync".format(
session_id=session.session_id),
body)
def test_promise_resolve(session):
response = execute_script(session, """
return Promise.resolve('foobar');
""")
assert_success(response, "foobar")
def test_promise_resolve_delayed(session):
response = execute_script(session, """
return new Promise(
(resolve) => setTimeout(
() => resolve('foobar'),
50
)
);
""")
assert_success(response, "foobar")
def test_promise_all_resolve(session):
response = execute_script(session, """
return Promise.all([
Promise.resolve(1),
Promise.resolve(2)
]);
""")
assert_success(response, [1, 2])
def test_await_promise_resolve(session):
response = execute_script(session, """
const res = await Promise.resolve('foobar');
return res;
""")
assert_success(response, "foobar")
def test_promise_reject(session):
response = execute_script(session, """
return Promise.reject(new Error('my error'));
""")
assert_error(response, "javascript error")
def test_promise_reject_delayed(session):
response = execute_script(session, """
return new Promise(
(resolve, reject) => setTimeout(
() => reject(new Error('my error')),
50
)
);
""")
assert_error(response, "javascript error")
def test_promise_all_reject(session):
response = execute_script(session, """
return Promise.all([
Promise.resolve(1),
Promise.reject(new Error('error'))
]);
""")
assert_error(response, "javascript error")
def test_await_promise_reject(session):
response = execute_script(session, """
await Promise.reject(new Error('my error'));
return 'foo';
""")
assert_error(response, "javascript error")
def test_promise_resolve_timeout(session):
session.timeouts.script = .1
response = execute_script(session, """
return new Promise(
(resolve) => setTimeout(
() => resolve(),
1000
)
);
""")
assert_error(response, "timeout error")
def test_promise_reject_timeout(session):
session.timeouts.script = .1
response = execute_script(session, """
return new Promise(
(resolve, reject) => setTimeout(
() => reject(new Error('my error')),
1000
)
);
""")
assert_error(response, "timeout error")
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
27f8d38ef71fc17960dd91b21b536958713450a9
|
06038042424c83fcf1c5f8626e6cc3597b8a3696
|
/load.py
|
1921d562d03c4e2907962ba8dc03f2f8c3d51ce5
|
[] |
no_license
|
cellinlab/Shot_Detection
|
f6141969220aa7c056a40d598453fe9631a52dd1
|
1e8b6430504182f766d3613074ad53818bdfb93d
|
refs/heads/master
| 2022-02-20T22:57:41.306612 | 2018-12-07T07:16:53 | 2018-12-07T07:16:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,625 |
py
|
# coding: utf-8
"""
データをロードするクラス
データ整形を行うクラス
"""
import numpy as np
import os, sys, csv
from keras.preprocessing.image import load_img, img_to_array
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from keras.utils import Sequence
class ImageDataGenerator(object):
def __init__(self, args):
self.path = args.datasetpath
self.seq_length = args.seqlength
self.strides = args.strides
self.batch_size = args.batchsize
self.imgsize = args.imgsize
self.reset()
def reset(self):
""" reset data load list """
#print("reset data load list")
self.X = []
self.Y = []
self.X_data = []
self.Y_data = []
def flow_from_directory(self):
while True:
with open(self.path, 'r')as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
self.Y_data.append(int(row[1]))
img = load_img(row[0], target_size=(self.imgsize, self.imgsize))
img_array = img_to_array(img)
x = (img_array/255.).astype(np.float32)
#print("x.shape", x.shape)
self.X_data.append(x)
# バッチサイズの数だけ格納したらデータ整形ののち,return
if len(self.Y_data) == self.batch_size * self.seq_length:
""" data format """
length_of_sequence = len(self.Y_data)
for i in range(0, length_of_sequence-self.seq_length+1, self.strides):
self.X.append(self.X_data[i: i+self.seq_length])
# Y_dataのデータ整形
if self.Y_data[i] == 1:
self.Y_data[i]= 0
# ショット点があれば1,そうでなければ0
#print("Y_data list: ", self.Y_data[i:i+self.seq_length])
if 1 in self.Y_data[i: i+self.seq_length]:
#print("this data include shot")
self.Y.append(1)
else:
#print("no include shot")
self.Y.append(0)
X_train = np.array(self.X).reshape(len(self.X), self.seq_length, self.imgsize, self.imgsize, 3)
Y_train = np.array(self.Y).reshape(len(self.Y), 1)
self.reset()
yield X_train, Y_train
def load_csv_data(args):
X_data = []
Y_data = []
X = []
Y = []
seq_length = args.seqlength
strides = args.strides
# load csv file
with open(args.datasetpath, 'r') as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
Y_data.append(int(row[1]))
img = load_img(row[0], target_size=(args.imgsize, args.imgsize))
img_array = img_to_array(img)
x = (img_array/255.).astype(np.float32)
#print("x.shape", x.shape)
X_data.append(x)
""" data format """
length_of_sequence = len(Y_data)
for i in range(0, length_of_sequence-seq_length+1, strides):
X.append(X_data[i: i+seq_length])
# Y_dataのデータ整形
if Y_data[i] == 1:
Y_data[i]= 0
# ショット点があれば1,そうでなければ0
#print("Y_data list: ", Y_data[i:i+seq_length])
if 1 in Y_data[i: i+seq_length]:
#print("this data include shot")
Y.append(1)
else:
#print("no include shot")
Y.append(0)
# convert np.array
X = np.array(X).reshape(len(X), seq_length, args.imgsize, args.imgsize, 3)
Y = np.array(Y).reshape(len(Y), 1)
print("convert!!!!!!!!!!!!")
print (X.shape)
print (Y.shape)
""" split train data/ validation data """
train_len = int(len(Y)* 0.8)
validation_len = len(Y) - train_len
X_train, X_valid, Y_train, Y_valid =\
train_test_split(X, Y, test_size=validation_len)
return X_train, X_valid, Y_train, Y_valid
# クラス分類用のデータ整形読み込み
def load_csv_data_classes(args):
X_data = []
Y_data = []
X = []
Y = []
seq_length = args.seqlength
strides = args.strides
# load csv file
with open(args.datasetpath, 'r') as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
Y_data.append(int(row[1]))
img = load_img(row[0], target_size=(args.imgsize, args.imgsize))
img_array = img_to_array(img)
x = (img_array/255.).astype(np.float32)
#print("x.shape", x.shape)
X_data.append(x)
""" data format """
length_of_sequence = len(Y_data)
for i in range(0, length_of_sequence-seq_length+1, strides):
# Y_dataのデータ整形
if Y_data[i] == 1:
Y_data[i]= 0
# ショット点があれば1,そうでなければ0
print("Y_data list: ", Y_data[i:i+seq_length])
# ショット点が1つまたは,0であれば追加
if np.sum(Y_data[i:i+seq_length] == 1) < 2:
X.append(X_data[i: i+seq_length])
Y.append(Y_data[i:i+seq_length])
# convert np.array
X = np.array(X).reshape(len(X), seq_length, args.imgsize, args.imgsize, 3)
Y = np.array(Y).reshape(len(Y), seq_length)
print("convert!!!!!!!!!!!!")
print (X.shape)
print (Y.shape)
""" split train data/ validation data """
train_len = int(len(Y)* 0.8)
validation_len = len(Y) - train_len
X_train, X_valid, Y_train, Y_valid =\
train_test_split(X, Y, test_size=validation_len)
return X_train, X_valid, Y_train, Y_valid
class Load_Feature_Data():
def __init__(self, args):
self.X_data = []
self.Y_data = []
self.X_ = []
self.Y_ = []
self.feature_length = args.featurelength
self.seq_length = args.seqlength
self.stride = args.stride
self.datasetpath = args.datasetpath
def load(self):
with open(self.datasetpath, 'r') as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
self.Y_data.append(int(row[1]))
feaure = list(map(float, row[2:]))
self.X_data.append(feaure)
#self.X_data.astype('float32')
#self.Y_data.astype('float32')
# data normalize
#scaler = MinMaxScaler(feature_range=(0, 1))
#self.X_data = scaler.fit_transform(self.X_data)
#return (self.X_data, self.Y_data)
""" データ整形, 正規化など """
ms = MinMaxScaler()
self.X_data = ms.fit_transform(self.X_data)
# 時系列の水増し
length_of_sequence = len(self.X_data) # 全時系列の長さ
for i in range(0, length_of_sequence - self.seq_length+1, self.stride):
self.X_.append(self.X_data[i: i + self.seq_length])
self.Y_.append(self.Y_data[i: i + self.seq_length])
""" 丁寧に書いた場合
X_data = np.zeros((len(X), seq_length, features_length), dtype=float)
Y_data = np.zeros((len(Y), features_length), dtype=float)
for i, seq in enumerate(X_):
for t, value in enumerate(seq):
for u, feature in enumerate(velue):
X_data[i, t, u] = feature
Y_data[i, 0] = Y_[i]
"""
self.X_ = np.array(self.X_).reshape(len(self.X_), self.seq_length, self.feature_length)
self.Y_ = np.array(self.Y_).reshape(len(self.Y_), self.seq_length, 1)
print (self.X_.shape)
print (self.Y_.shape)
""" 訓練データと検証データに分割 """
train_len = int(len(self.X_) * 0.9)
validation_len = len(self.X_) - train_len
X_train, X_valid, Y_train, Y_valid =\
train_test_split(self.X_, self.Y_, test_size=validation_len)
return X_train, X_valid, Y_train, Y_valid
|
[
"famichiki.yuuki@gmail.com"
] |
famichiki.yuuki@gmail.com
|
af013237e41c052d47cb81786e488a2d266b59e6
|
7c18960ab331885a9011a371f25a75842267e874
|
/mi-collectors/event_collectors/linkedin.py
|
0c7f36d6b507b175623e91bbc00b6b78df1a4eaf
|
[] |
no_license
|
hburrows/thisis.me-MVP
|
3ffa1d31d16cbcfb1a25d0bbbafff9fcba082a82
|
1ca86e418cb9fdd6971005291e306c0bc85907b2
|
refs/heads/master
| 2020-05-08T22:04:42.141604 | 2012-04-26T18:37:37 | 2012-04-26T18:37:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,417 |
py
|
'''
Created on Feb 8, 2012
@author: howard
'''
import json, urllib
import oauth2 as oauth
from datetime import timedelta
from time import mktime
from mi_utils.oauth import make_request
from mi_schema.models import Author
from full_collector import FullCollector
from mi_model.Event import LinkedInEvent
DATETIME_STRING_FORMAT = '%a %b %d %H:%M:%S +0000 %Y'
USER_INFO = 'account/verify_credentials.json'
USER_TIMELINE = 'statuses/user_timeline.json'
FULL_LOOKBACK_WINDOW = timedelta (days = 365)
PAGE_SIZE = 200
class LinkedInFullCollector(FullCollector):
def getServiceName(self):
return 'linkedin'
# update_author
def build_one(self,afm,dbSession,oauthConfig,incremental):
super(LinkedInFullCollector, self).build_one(afm,dbSession,oauthConfig,incremental)
# get the name of the author
authorName = dbSession.query(Author.author_name).filter_by(id=afm.author_id).one()
auxData = json.loads(afm.auxillary_data)
userId = auxData['id']
# setup what we need for oauth
consumer = oauth.Consumer(oauthConfig['key'], oauthConfig['secret'])
token = oauth.Token(key=afm.access_token,secret=afm.access_token_secret)
client = oauth.Client(consumer, token)
try:
# request the user's profile
response = make_request(client,'http://api.linkedin.com/v1/people/~:(picture-url)',{'x-li-format':'json'})
respJSON = json.loads(response)
profileImageURL = respJSON['pictureUrl'] if respJSON.has_key('pictureUrl') else None
traversal = self.beginTraversal(dbSession,afm,profileImageURL)
# optimization to request only those since we've last updated
args = {'scope':'self',
'type':['APPS','CMPY','CONN','JOBS','JGRP','PICT','PRFX','RECU','PRFU','QSTN','SHAR','VIRL'],
'count':PAGE_SIZE}
# incremental
if traversal.baselineLastUpdateTime:
# since a little before the last update time
args['after'] = '%s000' % int(mktime(traversal.baselineLastUpdateTime.timetuple()))
# full
else:
# limit to only one year of data
args['after'] = '%s000' % int(mktime((traversal.now - FULL_LOOKBACK_WINDOW).timetuple()))
offset = 0;
# args['start'] = offset
url = '%s?%s' % ('http://api.linkedin.com/v1/people/~/network/updates',urllib.urlencode(args, True))
while url and traversal.totalAccepted < 200:
# request the user's updates
content = make_request(client,url,{'x-li-format':'json'})
try:
rawJSON = json.loads(content)
except:
self.log.error('***ERROR*** parse error')
self.log.error(content)
continue
# print json.dumps(rawJSON, sort_keys=True, indent=2)
if rawJSON.get('_total',0) == 0:
url = None
continue
LinkedInEvent.eventsFromJSON(self,rawJSON,traversal,afm.author_id,userId,client)
# setup for the next page (if any)
if rawJSON['_total'] < PAGE_SIZE:
url = None
else:
offset = offset + PAGE_SIZE
# args['start'] = offset
url = '%s?%s' % ('http://api.linkedin.com/v1/people/~/network/updates',urllib.urlencode(args, True))
self.endTraversal(traversal,authorName)
except Exception, e:
self.log.error('****ERROR****')
self.log.error(e)
dbSession.rollback()
raise #continue
|
[
"howard@mobileidentity.me"
] |
howard@mobileidentity.me
|
c09a4f65adcc93679812ddebbc0009b229c0a622
|
31cbced08ee989dd5832cbd65cd182fc8b12ac32
|
/Drills/Drill05/Drill05.py
|
5822fbd3e18b59a12b095c04286a7231fc4888b3
|
[] |
no_license
|
yessol9096/2DGP
|
c274679d0b6949621b59a4a25411dbf47778c21f
|
b599cdaf6faa356b368ef44d7597b170ed07d70e
|
refs/heads/master
| 2020-03-28T12:25:32.899391 | 2018-11-22T09:32:49 | 2018-11-22T09:32:49 | 148,296,262 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,581 |
py
|
from pico2d import *
open_canvas()
grass = load_image('grass.png')
character = load_image('animation_sheet.png')
def go_to(sx, sy, ex, ey):
if (sx < ex and sy < ey):
move_right_up(sx, sy, ex, ey)
if (sx < ex and sy > ey):
move_right_down(sx, sy, ex, ey)
if (sx > ex and sy < ey):
move_left_up(sx, sy, ex, ey)
if (sx > ex and sy > ey):
move_left_down(sx, sy, ex, ey)
def move_right_up(sx, sy, ex, ey):
frame = 0
while True:
clear_canvas()
grass.draw(400, 30)
character.clip_draw(frame * 100, 100, 100, 100, sx, sy)
update_canvas()
frame = (frame + 1) % 8
if(sx < ex):
sx += 2
if(sy < ey):
sy += 2
delay(0.01)
if((sx > ex or sx == ex) and (sy > ey or sy == ey)):
break
delay(0.05)
get_events()
def move_right_down(sx, sy, ex, ey):
frame = 0
while True:
clear_canvas()
grass.draw(400, 30)
character.clip_draw(frame * 100, 100, 100, 100, sx, sy)
update_canvas()
frame = (frame + 1) % 8
if (sx < ex):
sx += 2
if (sy > ey):
sy -= 2
delay(0.01)
if ((sx > ex or sx == ex)and (sy < ey or sy == ey)):
break
delay(0.05)
get_events()
def move_left_up(sx, sy, ex, ey):
frame = 0
while True:
clear_canvas()
grass.draw(400, 30)
character.clip_draw(frame * 100, 0, 100, 100, sx, sy)
update_canvas()
frame = (frame + 1) % 8
if (sx > ex):
sx -= 2
if (sy < ey):
sy += 2
delay(0.01)
if ((sx < ex or sx == ex)and (sy > ey or sy == ey)):
break
delay(0.05)
get_events()
def move_left_down(sx, sy, ex, ey):
frame = 0
while True:
clear_canvas()
grass.draw(400, 30)
character.clip_draw(frame * 100, 0, 100, 100, sx, sy)
update_canvas()
frame = (frame + 1) % 8
if (sx > ex):
sx -= 2
if (sy > ey):
sy -= 2
delay(0.01)
if ((sx < ex or sx == ex)and (sy < ey or sy == ey)):
break
delay(0.05)
get_events()
# 완성
# 유튜브 추가
while True:
go_to(203, 535, 132, 243)
go_to(132, 243, 535, 470)
go_to(535, 470, 477, 203)
go_to(477, 203, 715, 136)
go_to(715, 136, 316, 225)
go_to(316, 255, 510, 92)
go_to(510, 92, 692, 518)
go_to(692, 518, 682, 336)
go_to(682, 336, 712, 349)
go_to(712, 349, 203, 535)
close_canvas()
|
[
"yessol9096@naver.com"
] |
yessol9096@naver.com
|
99a308971b21faefc0432f1b6dce573594d67ded
|
8e364aff5c70a165ba4f3f78e466a26a871400e4
|
/questao_15.py
|
b6fff181a2d84352b689b735a149ec8ce23f391f
|
[] |
no_license
|
ifpb-cz-ads/pw1-2020-2-ac04-team-sergiolucas
|
90a637a17a9fbaa5fa2e347cb346c11e181e76f9
|
a15deeb7b97827b0c2c63602d7e6ba89438e793d
|
refs/heads/main
| 2023-03-14T00:48:57.395205 | 2021-03-03T18:27:02 | 2021-03-03T18:27:02 | 343,500,608 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,456 |
py
|
# Escreva um programa que exiba uma lista de opções (menu): adição, subtração,
# divisão, multiplicação e sair. Imprima a tabuada da operação escolhida. Repita até que a
# opção "saída" seja escolhida.
print('1- Adição')
print('2- Subtração')
print('3- Multiplicação')
print('4- Divisão')
print('5- Sair')
opcao = input('Escolha uma opção, ou digite 5 para sair: ')
while opcao != '5':
tabuada = int(input('Digite um número para exibir a tabuada: '))
if opcao == '1':
i = 1
while i <= 10:
n = tabuada + i
print('{0:2d} + {1:2d} = {2:3d}'.format(tabuada, i, n))
i = i + 1
elif opcao == '2':
i = 1
while i <= 10:
n = tabuada - i
print('{0:2d} - {1:2d} = {2:3d}'.format(tabuada, i, n))
i = i + 1
elif opcao == '3':
i = 1
while i <= 10:
n = tabuada * i
print('{0:2d} x {1:2d} = {2:3d}'.format(tabuada, i, n))
i = i + 1
elif opcao == '4':
i = 1
while i <= 10:
n = tabuada / i
print('{0:2d} / {1:2d} = {2:3f}'.format(tabuada, i, n))
i = i + 1
else:
print('Por favor, escolha uma opção válida!')
print('1- Adição')
print('2- Subtração')
print('3- Multiplicação')
print('4- Divisão')
print('5- Sair')
opcao = input('Escolha uma opção, ou digite 5 para sair:')
|
[
"sergiotutorshd@gmail.com"
] |
sergiotutorshd@gmail.com
|
8c9b89e76f032bfe58bb45cc453d944a7e5a9f3a
|
9b50a71960b610df9d3b6ac9394c92881be7e85e
|
/web_scraping/main.py
|
1475549e72b95c8a997f0482942bf07fbaa70c3c
|
[] |
no_license
|
mayures/web-scraping
|
67e4e7f6d2b7abc446aa07fb177c84f3724c67a6
|
2264b65eff1412fbf78566ce680c4f369a848956
|
refs/heads/main
| 2023-06-29T02:36:23.337249 | 2021-07-22T13:59:59 | 2021-07-22T13:59:59 | 388,481,913 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
#from an alredy present html file
from bs4 import BeautifulSoup
with open ('home.html', 'r') as html_file:
content=html_file.read()
soup= BeautifulSoup(content,'lxml')
course_html_tags=soup.findAll('h1')
for course in course_html_tags:
print(course.text)
|
[
"kumarmayuresh906@gmail.com"
] |
kumarmayuresh906@gmail.com
|
619d0815b20fa7056c05a7995a8e02504ee79582
|
a2b1e3de2b5730c9c9174b583d84a6a17ca4c81d
|
/linear_classification/linear_classification.py
|
3e90626fdede0c9924bb18b490f0adc6c1397f8c
|
[] |
no_license
|
suryanshkumar/pyTorchPractice
|
bcbf7a4144865e21d0caae127d14eeaa451cf7df
|
dc6fbc06643d4de2b7e27c3e019bdaa58dabf6b2
|
refs/heads/master
| 2020-03-16T20:36:57.205796 | 2018-05-19T22:30:44 | 2018-05-19T22:30:44 | 132,965,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,064 |
py
|
#Author: Suryansh Kumar
#Step0: import the essential library
import torch
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
#Step1: Load data using dataloader
class Diabetes_Dataset(Dataset):
def __init__(self):
data = np.loadtxt('diabetes.csv.gz', delimiter=',', dtype=np.float32)
self.len = data.shape[0]
self.train_data = torch.from_numpy(data[:, 0:-1])
self.train_label = torch.from_numpy(data[:, [-1]])
def __getitem__(self, index):
return self.train_data[index], self.train_label[index]
def __len__(self):
return self.len
dataset = Diabetes_Dataset()
train_loader = DataLoader(dataset=dataset, batch_size=40, shuffle=True)
#Step2 Define model class (In this example 3 layers)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer1 = torch.nn.Linear(8, 6)
self.layer2 = torch.nn.Linear(6, 4)
self.layer3 = torch.nn.Linear(4, 1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
output1 = self.sigmoid(self.layer1(x))
output2 = self.sigmoid(self.layer2(output1))
y_pred = self.sigmoid(self.layer3(output2))
return y_pred
model = Model()
#Step3: Select the inbuilt loss and optimizer
criterion = torch.nn.BCELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
#Step4: train the network
#Follow the rythm 1. do prediction on present parameters(forward) 2. estimate loss 3.compute gradient backward, update the parameter
for epoch in range(2):
for i, data in enumerate(train_loader, 0):
input_data, input_label = data
input_data, input_label = Variable(input_data), Variable(input_label)
#1 .Forward pass
y_pred = model(input_data)
#2. estimate loss
loss = criterion(y_pred, input_label)
print(epoch, i, loss.data[0])
#3. backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
|
[
"k.sur46@gmail.com"
] |
k.sur46@gmail.com
|
0dfef170f1ed689a3af6295f05acabf4d8348904
|
79ad961f3c7a171bbff98fafc443f881e02bd8e3
|
/blog_project/celery.py
|
8936c3009f24009107b2d18ff2a468f2fd073886
|
[] |
no_license
|
St-D/Django_Blog
|
c06447f3db73699bea6f45f40f8a1efadb8ae287
|
5540e4b0994d38511f49a4e128c9a5423a381788
|
refs/heads/master
| 2020-03-26T15:21:56.178125 | 2018-08-27T20:56:31 | 2018-08-27T20:58:57 | 145,038,689 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 280 |
py
|
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_project.settings')
app = Celery('blog_project')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
[
"stastastas@mail.ru"
] |
stastastas@mail.ru
|
c7bb8147a12a9f7442ea75ff49519f5296a6368b
|
0b480b28455d4ea133eaeec5625e2ce62660dbb1
|
/rango/migrations/0003_auto_20160718_1304.py
|
0020a44fea459a28b21e409be01887e4bd7732f7
|
[] |
no_license
|
jtr109/tango_with_django_exercise
|
8ff6c05321be8ca614a550abc6c66aef55886136
|
ce2aa7c5a12eae0352b435dc726bef4e378ef3c5
|
refs/heads/master
| 2020-09-22T09:28:34.217081 | 2016-08-30T02:49:35 | 2016-08-30T02:49:35 | 66,900,401 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20160718_1300'),
]
operations = [
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
|
[
"lyp_login@outlook.com"
] |
lyp_login@outlook.com
|
990d8e4ed4a0ad60d18e3facf7da99334b0f7bb9
|
287b2ad6d654331bc04a83ad34a84ef718a34d99
|
/src/pbmgx/nucmer/snp.py
|
b8e047638fa50a6ee6b4d057068ce94b2e53cdae
|
[] |
no_license
|
bnbowman/MetagenomicTools
|
05fb90b05b47ab9e1b78a676ca3506d2e1015d5c
|
b140bfd79e189bb4b6a57be36ca753ab94f03348
|
refs/heads/master
| 2016-08-05T16:45:56.470452 | 2014-01-14T23:23:59 | 2014-01-14T23:23:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,060 |
py
|
#! /usr/bin/env python
__author__ = 'bbowman@pacificbiosciences.com'
from pbcore.io.base import ReaderBase
class NucmerSnp( object ):
"""
A Class for representing a Nucmer SNP
"""
def __init__(self, line):
parts = line.strip().split()
parts = [p for p in parts if p != '|']
if len(parts) == 10:
self.P1 = int(parts[0])
self.S1 = parts[1]
self.S2 = parts[2]
self.P2 = int(parts[3])
self.BUFF = int(parts[4])
self.DIST = int(parts[5])
self.reference_length = None
self.query_length = None
self.reference = parts[8]
self.query = parts[9]
elif len(parts) == 12:
self.P1 = int(parts[0])
self.S1 = parts[1]
self.S2 = parts[2]
self.P2 = int(parts[3])
self.BUFF = int(parts[4])
self.DIST = int(parts[5])
self.reference_length = int(parts[6])
self.query_length = int(parts[7])
self.reference = parts[10]
self.query = parts[11]
else:
raise ValueError("Invalid Nucmer SNP record")
def has_lengths(self):
return (self.reference_length is not None and
self.query_length is not None)
class NucmerSnpReader( ReaderBase ):
"""
A Class for reading Nucmer Coordinate files
"""
def __iter__(self):
try:
parts = split_nucmer_snp_file(self.file)
for line in parts:
yield NucmerSnp( line )
except AssertionError:
raise ValueError("Invalid Nucmer Coordinate file")
def split_nucmer_snp_file( handle ):
"""
Split a nucmer coordinate file line-by-line, skipping any header rows
"""
for line in handle:
line = line.strip()
if not line:
continue
if line.startswith('/') or line.startswith('NUCMER') or \
line.startswith('[P1]') or line.startswith('==='):
continue
yield line
|
[
"bbowman@pacificbiosciences.com"
] |
bbowman@pacificbiosciences.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.