blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7863d8927d006aaf6bb1f2450af7fe6550ab070
|
e34d4bf879910b8f41068c1efb90915897e53d53
|
/system_design_ladder/GeoHashII.py
|
b15bec1dd5ca21a631b684b5a96092a0772cec5f
|
[] |
no_license
|
ZhouningMan/LeetCodePython
|
6cfc30f0b76f6162502410fef5639fde4801bd74
|
cad9585c440efb329c9321648f94c58ded198438
|
refs/heads/master
| 2020-12-10T03:53:48.824344 | 2020-01-13T02:29:02 | 2020-01-13T02:29:02 | 233,494,907 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,179 |
py
|
class GeoHash:
BASE32 = "0123456789bcdefghjkmnpqrstuvwxyz"
"""
@param: geohash: geohash a base32 string
@return: latitude and longitude a location coordinate pair
"""
def decode(self, geohash):
binary = self._to_bin(geohash)
lon_bin = [binary[i] for i in range(0, len(binary), 2)]
lat_bin = [binary[i] for i in range(1, len(binary), 2)]
longitude = self._bin_to_val(-180, 180, lon_bin)
latitude = self._bin_to_val(-90, 90, lat_bin)
return latitude, longitude
def _to_bin(self, geohash):
binary = ''
for c in geohash:
idx = GeoHash.BASE32.index(c)
b = ''
for i in range(5):
b = str(idx % 2) + b
idx = idx // 2
binary += b
return binary
def _bin_to_val(self, low, high, binary):
for b in binary:
mid = (high + low) / 2
if b == '1': # our value is higher
low = mid
else: # our value is lower
high = mid
return (low + high) / 2
if __name__ == '__main__':
geoHash = GeoHash()
geoHash.decode("wx4g0s")
|
[
"linfenglee321@gmail.com"
] |
linfenglee321@gmail.com
|
f6b693f1370e3d80c736a6b08d507d671d4a8bc5
|
008c065391d766fec2f2af252dd8a5e9bf5cb815
|
/Even Matrix.py
|
7e545a6a78adeb1c5ec75a406ef4644cbe57e481
|
[] |
no_license
|
22Rahul22/Codechef
|
b261ab43ff5ff64648a75ad1195e33cac2cfec52
|
1f645c779a250a71d75598e1eabad7e52dd6b031
|
refs/heads/master
| 2022-11-29T21:51:09.578798 | 2020-08-19T06:20:23 | 2020-08-19T06:20:23 | 288,650,009 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 890 |
py
|
t = int(input())
for _ in range(t):
n = int(input())
arr = [[0 for i in range(n)] for j in range(n)]
sr = 0
er = n
sc = 0
ec = n
z = 0
num = 1
if n % 2 == 0:
x = n // 2
else:
x = 1 + (n // 2)
while z != x:
j = sc
while j < ec:
arr[sr][j] = num
num += 1
j += 1
sr += 1
i = sr
while i < er:
arr[i][ec - 1] = num
num += 1
i += 1
ec -= 1
j = ec - 1
while j >= sc:
arr[er - 1][j] = num
num += 1
j -= 1
er -= 1
i = er - 1
while i >= sr:
arr[i][sc] = num
num += 1
i -= 1
sc += 1
z += 1
for i in range(n):
for j in range(n):
print(arr[i][j], end=" ")
print()
|
[
"rahulbhl22@gmail.com"
] |
rahulbhl22@gmail.com
|
30afeecf7a442f626392bcc9b54728254bb8a8be
|
60d5ea4f007d49768d250ef394003f554003e4d0
|
/python/Linked List/142.Linked List Cycle II.py
|
dec51f534aabccb931d8e8932d39d11aac643c6f
|
[] |
no_license
|
EvanJamesMG/Leetcode
|
dd7771beb119ea1250dbb3b147a09053298cd63b
|
fa638c7fda3802e9f4e0751a2c4c084edf09a441
|
refs/heads/master
| 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,911 |
py
|
# coding=utf-8
'''
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
Note: Do not modify the linked list.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
使用快慢指针。若链表存在环,两指针必在环中相遇,此时将慢指针移回头结点,
两指针以相同的速度移动,在环开始的节点处再次相遇。
图中(http://www.cnblogs.com/zuoyuan/p/3701877.html),head到环路起点的距离为K,起点到fast和slow的相遇点的距离为M,环路周长为L。假设,在fast和slow相遇时,fast走过了Lfast,slow走过了Lslow。根据题意:
Lslow=K+M;Lfast=K+M+n*L(n为正整数);Lfast=2*Lslow
可以推出:Lslow=n*L;K=n*L-M
则当slow重新回到head,而fast还在相遇点,slow和fast都向前走,且每次走一个节点。
则slow从head走到起点走了K,而fast从相遇点出发也走了K,而fast向前走了距离K后到了哪里呢?由于K=(n-1)*L+(L-M),所以fast转了n-1圈,再走L-M,也到了起点。这样起点就找到了。
'''
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None or head.next == None:
return None
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if fast == slow:
break
if slow == fast:
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
return None
# if __name__ == "__main__":
#
# result = Solution().numTrees(3)
# print result
|
[
"Evan123mg@gmail.com"
] |
Evan123mg@gmail.com
|
6f54f5939a8fda03c24dfa9d9dbe33c08f498424
|
096ccaca86872b03a137edf58221413073d770cb
|
/helpers/24_web_apis_sources.py
|
0a219f85661a944bd17fb1db67075e5cf05ea372
|
[] |
no_license
|
DH-heima/webscrapping
|
f142962b50deed2628052dd7a48098a4afbcbada
|
1dc8f81f45db0d4366391c3052c5ab36f4d4bc5d
|
refs/heads/master
| 2022-02-02T23:26:22.520064 | 2019-06-13T13:38:10 | 2019-06-13T13:38:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,587 |
py
|
import os
# from ers import shops, COLLECTION_DATE, web_apis_traffic_sources_csv, web_apis_traffic_sources_aggregates_csv
import os.path as op
import numpy as np
import pandas as pd
BASE_DIR = "/code/mhers"
WAVE_NUMBER = 8
shops = pd.read_excel(op.join(BASE_DIR, "ressources/ERS-referential-shops.xlsx"), index_col=None)
COLLECTION_DATE = "2018-06-10"
web_apis_traffic_sources_csv = os.path.join(BASE_DIR,'data/w_{}/final_csvs'.format(WAVE_NUMBER), 'shopgrid_details - web_apis_traffic_sources_w{}.csv'.format(WAVE_NUMBER))
web_apis_traffic_sources_aggregates_csv = os.path.join(BASE_DIR,'data/w_{}/final_csvs'.format(WAVE_NUMBER), 'shopgrid_summary - web_apis_traffic_sources_w{}.csv'.format(WAVE_NUMBER))
# #####################################################################################################################
# web_apis_demographics_csv
# #####################################################################################################################
# This generates the dummy data and shouldn't be in production
mask = pd.DataFrame({'to_delete': [1]})
df = pd.DataFrame()
for c, row in shops.iterrows():
tmp = pd.DataFrame(mask.copy())
for k in ['shop_id', 'continent', 'country', 'region', 'segment']:
tmp[k] = row[k]
df = df.append(tmp)
df.drop(columns=['to_delete'], inplace=True)
# TODO : delete the random data creation and fetch the data in the proper dataset
df['direct'] = np.random.random(size=(df.shape[0], 1)) * 0.3
df['email'] = np.random.random(size=(df.shape[0], 1)) * 0.2
df['referrals'] = np.random.random(size=(df.shape[0], 1)) * 0.2
df['social'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['paid_search'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['display_ads'] = np.random.random(size=(df.shape[0], 1)) * 0.1
df['organic_search'] = 1 - df['direct'] - df['email'] - df['referrals'] - df['social'] - df['paid_search'] - df['display_ads']
# Todo : Time Span is the time over which the aggregates are calculated
df['time_span'] = "Apr. 2016 - Aug. 2018"
# Collection date
print('WARNING : PLEASE ENSURE THE COLLECTION_DATE is accurate :', COLLECTION_DATE)
df['collection_date'] = COLLECTION_DATE
final_cols = ['collection_date', 'time_span', 'continent', 'country', 'region', 'segment', 'shop_id', 'direct', 'email',
'referrals', 'social', 'paid_search', 'display_ads', 'organic_search']
df = df[final_cols]
df.to_csv(web_apis_traffic_sources_csv, sep=';', index=False, encoding='utf-8')
print("File web_apis_traffic_sources_csv stored at : ", web_apis_traffic_sources_csv)
# #####################################################################################################################
# web_apis_demographics_aggregates_csv
# #####################################################################################################################
df['region'].fillna("", inplace=True)
# Aggregating
res = []
agregation_levels_list = [
['continent', 'country', 'region', 'segment'],
['continent', 'country', 'segment'],
['continent', 'segment'],
['segment'],
['continent', 'country', 'region'],
['continent', 'country'],
['continent'],
['collection_date']
]
# All agregations
for agg_level in agregation_levels_list:
dfG2 = df.groupby(agg_level, as_index=False)
dfG2 = dfG2.agg({
'direct': {'direct': 'mean'},
'email': {'email': 'mean'},
'referrals': {'referrals': 'mean'},
'social': {'social': 'mean'},
'paid_search': {'paid_search': 'mean'},
'display_ads': {'display_ads': 'mean'},
'organic_search': {'organic_search': 'mean'},
}).reset_index()
dfG2.columns = dfG2.columns.droplevel(1)
dfG2 = pd.DataFrame(dfG2)
print(agg_level, 'adding', dfG2.shape)
res.append(dfG2)
# Aggregate on all-levels
all_dfs = pd.concat(res, axis=0, ignore_index=True)
# Collection date
print('WARNING : PLEASE ENSURE THE COLLECTION_DATE is accurate :', COLLECTION_DATE)
all_dfs['collection_date'] = COLLECTION_DATE
# Todo : Time Span is the time over which the aggregates are calculated
all_dfs['time_span'] = "Apr. 2016 - Aug. 2018"
final_cols = ['collection_date', 'time_span', 'continent', 'country', 'region', 'segment', 'direct', 'display_ads',
'email', 'organic_search', 'paid_search', 'referrals', 'social']
all_dfs = all_dfs[final_cols]
all_dfs.to_csv(web_apis_traffic_sources_aggregates_csv, sep=';', index=None, encoding='utf-8')
print("File web_apis_traffic_sources_aggregates_csv stored at : ", web_apis_traffic_sources_aggregates_csv, " -")
|
[
"pierre.chevalier@epitech.eu"
] |
pierre.chevalier@epitech.eu
|
f7e2098e769e91b1838c62aee43d87264b9aa9cb
|
052d6ac57f2026aba22249368149b18027c78342
|
/frontstage_api/resources/register/confirm_organisation_survey.py
|
6331b7150306a3ab3887cebaf9c1d5eb733780ca
|
[
"MIT"
] |
permissive
|
ONSdigital/ras-frontstage-api
|
c34b41185cc825b49262c1879ad559778a54dbfc
|
7bb32a85868e2a241b8a0331b884155a36450669
|
refs/heads/master
| 2018-07-15T00:35:22.130352 | 2018-06-01T14:09:13 | 2018-06-01T14:09:13 | 105,001,932 | 2 | 1 |
MIT
| 2018-06-01T14:09:14 | 2017-09-27T09:54:26 |
Python
|
UTF-8
|
Python
| false | false | 1,917 |
py
|
import logging
from flask import jsonify, make_response, request
from flask_restplus import Resource, fields
from structlog import wrap_logger
from frontstage_api import auth, register_api
from frontstage_api.controllers import case_controller, collection_exercise_controller, iac_controller, party_controller, survey_controller
logger = wrap_logger(logging.getLogger(__name__))
enrolment_details = register_api.model('EnrolmentDetails', {
'enrolment_code': fields.String(required=True),
})
@register_api.route('/confirm-organisation-survey')
class ConfirmOrganisationSurvey(Resource):
@staticmethod
@auth.login_required
@register_api.expect(enrolment_details, validate=True)
def post():
logger.info('Attempting to retrieve organisation and survey data')
enrolment_code = request.get_json().get('enrolment_code')
# Verify enrolment code is active
iac = iac_controller.get_iac_from_enrolment(enrolment_code)
if not iac['active']:
return make_response(jsonify(iac), 401)
# Get organisation name
case = case_controller.get_case_by_enrolment_code(enrolment_code)
business_party_id = case['caseGroup']['partyId']
organisation_name = party_controller.get_party_by_business_id(business_party_id).get('name')
# Get survey name
collection_exercise_id = case['caseGroup']['collectionExerciseId']
collection_exercise = collection_exercise_controller.get_collection_exercise(collection_exercise_id)
survey_id = collection_exercise['surveyId']
survey_name = survey_controller.get_survey(survey_id).get('longName')
response_json = {
"organisation_name": organisation_name,
"survey_name": survey_name
}
logger.info('Successfully retrieved organisation and survey data')
return make_response(jsonify(response_json), 200)
|
[
"noreply@github.com"
] |
ONSdigital.noreply@github.com
|
6dd47cf9abf6588f76b33b1300c80b06fe34f86b
|
304e75224229786ba64c6ef2124007c305019b23
|
/src/easy/test_build_array_from_permutation.py
|
8fd8efbd03f279c3c5d2f1ed987d934e5687eadc
|
[] |
no_license
|
Takuma-Ikeda/other-LeetCode
|
9179a8100e07d56138fd3f3f626951195e285da2
|
499616d07011bee730b9967e9861e341e62d606d
|
refs/heads/master
| 2023-04-14T06:09:35.341039 | 2023-04-10T02:29:18 | 2023-04-10T02:29:18 | 226,260,312 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 637 |
py
|
import unittest
from answer.build_array_from_permutation import Solution
class TestSolution(unittest.TestCase):
def setUp(self):
self.nums = [
[0, 2, 1, 5, 3, 4],
[5, 0, 1, 2, 3, 4],
]
self.answers = [
[0, 1, 2, 4, 5, 3],
[4, 5, 0, 1, 2, 3],
]
def solution(self, i):
s = Solution()
result = s.buildArray(self.nums[i])
self.assertEqual(self.answers[i], result)
def test_solution0(self):
self.solution(0)
def test_solution1(self):
self.solution(1)
if __name__ == "__main__":
unittest.main()
|
[
"el.programdear@gmail.com"
] |
el.programdear@gmail.com
|
d8cb4d738e3fca2d4ddb17040fa4fe5a789e0334
|
63e2bed7329c79bf67279f9071194c9cba88a82c
|
/SevOneApi/python-client/test/test_flow_falcon_visualization.py
|
51a57732e471078c158cccc29b73d4aae5586ecf
|
[] |
no_license
|
jsthomason/LearningPython
|
12422b969dbef89578ed326852dd65f65ab77496
|
2f71223250b6a198f2736bcb1b8681c51aa12c03
|
refs/heads/master
| 2021-01-21T01:05:46.208994 | 2019-06-27T13:40:37 | 2019-06-27T13:40:37 | 63,447,703 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 977 |
py
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.flow_falcon_visualization import FlowFalconVisualization # noqa: E501
from swagger_client.rest import ApiException
class TestFlowFalconVisualization(unittest.TestCase):
"""FlowFalconVisualization unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowFalconVisualization(self):
"""Test FlowFalconVisualization"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.flow_falcon_visualization.FlowFalconVisualization() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"johnsthomason@gmail.com"
] |
johnsthomason@gmail.com
|
652e8c01463ca031788666db93024bbc761ec75d
|
14856ffe01c711af7a41af0b1abf0378ba4ffde6
|
/Python/Fundamentals/Fun_with_Functions.py
|
4db600213841d74d4382c1514cc6f369abdc29a8
|
[] |
no_license
|
sharonanchel/coding-dojo
|
9a8db24eec17b0ae0c220592e6864510297371c3
|
d6c4a7efd0804353b27a49e16255984c4f4b7f2a
|
refs/heads/master
| 2021-05-05T18:17:48.101853 | 2017-06-23T23:53:51 | 2017-06-23T23:53:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 531 |
py
|
# Odd/Even
def odd_even():
for i in range(1,2001):
if i%2 == 0:
type = 'even'
else:
type = 'odd'
print 'Number is',i, 'This is an',type,'number.'
odd_even()
# Multiply
def multiply(arr, num):
for i in range(0,len(arr)):
arr[i] *= num
return arr
print multiply([2,4,10,16],5)
# Hacker Challenge
def layered_multiples(arr):
newerArray = []
for i in arr:
newArray = []
for x in range(0,i):
newArray.append(1)
newerArray.append(newArray)
return newerArray
print layered_multiples(multiply([2,4,5],3))
|
[
"jao.colin@gmail.com"
] |
jao.colin@gmail.com
|
b0a496a2adad7d4299f3c94ceb3f5651a373a629
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/manage.py
|
436b9b54ecb5b87023cfad764e11bb94a803445a
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 366 |
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('manage', __name__, url_prefix='/manage')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
79c1cfdd225efbe367313be720d75fd7941a44b2
|
4eebce0d0c1132aed8227325bd58faf61a4010c7
|
/CONTEXT_178/d2.py
|
7a83e4ac92f0948ab14911f4a674624665be9101
|
[] |
no_license
|
xu1718191411/AT_CODE_BEGINNER_SELECTION
|
05836cfcc63dab2a0a82166c8f4c43c82b72686b
|
e4e412733d1a632ce6c33c739064fe036367899e
|
refs/heads/master
| 2021-07-17T00:59:46.315645 | 2020-09-22T06:14:27 | 2020-09-22T06:14:27 | 214,153,758 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 251 |
py
|
def calculate(n):
arr = [0 for i in range(2000 + 1)]
arr[3] = 1
arr[4] = 1
arr[5] = 1
for i in range(6, n + 1):
arr[i] = (arr[i - 2] + arr[i - 3] + arr[i - 4]) % (1000000000 + 7)
print(arr[n])
calculate(int(input()))
|
[
"xu1718191411@gmail.com"
] |
xu1718191411@gmail.com
|
1d04d3d4f51fb6e30bcbf047d655a4f3121f14ce
|
73dc6b3fdb07592f10b8e02b7ca053deb61a2dc9
|
/msof_api/comments/admin.py
|
4e3be735e10ca856e607a6d04ccf86bad757bf99
|
[] |
no_license
|
likelion-kookmin/msof_api
|
4143c09f93b68d219aa20de3bd57ec544c2bdf32
|
f9fec7d31ebdb465a8935711da715db6d87c0fce
|
refs/heads/develop
| 2023-06-28T15:35:45.240871 | 2021-07-31T16:38:35 | 2021-07-31T16:38:35 | 347,298,658 | 3 | 1 | null | 2021-07-31T16:38:36 | 2021-03-13T07:02:56 |
Python
|
UTF-8
|
Python
| false | false | 784 |
py
|
"""# comments admin
- CommentAdmin
"""
from django.contrib import admin
from .models import Comment
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
"""## CommentAdmin
- admin에서 관리할 Comment 모델 설정
"""
list_display = [
'author',
'question',
'parent',
'selected',
'content',
'status',
'liked_count',
'disliked_count',
]
list_editable = [
'status',
]
list_filter = [
'author',
'question',
'parent',
'selected',
'status',
]
search_fields = [
'content',
'author__name',
'question__title',
'question__content',
]
ordering = [
'-updated_at',
]
|
[
"singun11@kookmin.ac.kr"
] |
singun11@kookmin.ac.kr
|
b089edef3519feb7f892bdd66d7ebb57fe321c27
|
d214b72b3ae340d288c683afe356de6846a9b09d
|
/动态规划/最大矩形_85.py
|
d5fa9f35ee7dab90956eab9b4c2c0e9f34d1993c
|
[] |
no_license
|
Xiaoctw/LeetCode1_python
|
540af6402e82b3221dad8648bbdcce44954a9832
|
b2228230c90d7c91b0a40399fa631520c290b61d
|
refs/heads/master
| 2021-08-29T15:02:37.786181 | 2021-08-22T11:12:07 | 2021-08-22T11:12:07 | 168,444,276 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,071 |
py
|
from typing import *
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
m, n = len(matrix), len(matrix[0])
num_up = [[0] * n for _ in range(m)]
for i in range(n):
if matrix[0][i] == '1':
num_up[0][i] = 1
for i in range(1, m):
for j in range(n):
if matrix[i][j] == '1':
num_up[i][j] = num_up[i - 1][j] + 1
ans = 0
for i in range(m):
pre_zero = -1
min_up = float('inf')
for j in range(n):
if matrix[i][j] == '0':
pre_zero = j
min_up=float('inf')
else:
min_up = min(min_up, num_up[i][j])
ans = max(ans, min_up * (j - pre_zero))
return ans
if __name__ == '__main__':
matrix = [["1", "0", "1", "0", "0"], ["1", "0", "1", "1", "1"], ["1", "1", "1", "1", "1"],
["1", "0", "0", "1", "0"]]
sol=Solution()
print(sol.maximalRectangle(matrix))
|
[
"m18846183092@163.com"
] |
m18846183092@163.com
|
19907e7cb61cd025d174242e51357e774a777801
|
d257ddf7e6959d0989d76080a8a048e82393657f
|
/002_TemplateMatching/002_template_match_implemented.py
|
112464bcd0690858ab97442b59d77b3d552eca7f
|
[
"MIT"
] |
permissive
|
remichartier/027_selfDrivingCarND_ObjectDetectionExercises
|
d210f37b7baf306dd034c09f62e125b263f8270d
|
ccd853c975d35df5f31e1a445a1a8757b8bd13f5
|
refs/heads/main
| 2023-04-17T08:09:55.465143 | 2021-05-03T07:11:16 | 2021-05-03T07:11:16 | 362,013,745 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,661 |
py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# All the 6 methods for comparison in a list
# methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
# 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
imcopy = np.copy(img)
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
for temp in template_list:
# Read in templates one by one
templ = mpimg.imread(temp)
print(templ.shape[::-1])
l, w, h = templ.shape[::-1]
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
#meth = 'cv2.TM_SQDIFF' # --> Not working
meth = 'cv2.TM_CCOEFF' # --> Working
#meth = 'cv2.TM_CCOEFF_NORMED' # --> Working
#meth = 'cv2.TM_CCORR' # --> Not working
#meth = 'cv2.TM_CCORR_NORMED' # --> Working
#meth = 'cv2.TM_SQDIFF' # --> Not working
#meth = 'cv2.TM_SQDIFF_NORMED' # --> Not working
method = eval(meth)
res = cv2.matchTemplate(img,templ,method)
# Use cv2.minMaxLoc() to extract the location of the best match
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# Determine bounding box corners for the match
if meth in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left,bottom_right))
# Return the list of bounding boxes
return bbox_list
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
|
[
"remipr.chartier@gmail.com"
] |
remipr.chartier@gmail.com
|
76958178b7438bb05a58d4bf3edd04bf9ee28403
|
cc212540f928a95fa56f4679e3eb58e2ad329ca5
|
/annpy/training/trainer.py
|
c93d497850a77427e0a1ba0888254a24da4a10e7
|
[
"LicenseRef-scancode-mit-taylor-variant",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
nirvguy/annpy
|
ec05e07316bddd4bc5fbbd3d9e73ec94dc52a4b9
|
ea5f92048173d0ebd1ad134cf626fa623569905e
|
refs/heads/master
| 2018-06-03T06:11:21.911758 | 2018-05-30T16:16:46 | 2018-05-30T16:16:48 | 118,555,614 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,346 |
py
|
# See LICENSE file for copyright and license details.
import torch
class Trainer(object):
def __init__(self, learning_rule):
self._learning_rule = learning_rule
self._epoch = 0
self._hooks = []
self._remaining_epochs = 0
@property
def epoch(self):
return self._epoch
@staticmethod
def check_batch(batch):
if not isinstance(batch, torch.Tensor):
raise Exception("Batchs must be torch.Tensor's")
if len(batch.shape) <= 1:
raise Exception("Batch shape must have at least dimension two")
def _notify(self, msg):
for hook in self._hooks:
hook.notify(msg)
def train(self, batchs, epochs=1):
if len(batchs) == 0:
return
for batch in batchs:
self.check_batch(batch)
self._remaining_epochs = epochs
self._notify('pre_training')
for _ in range(epochs):
self._notify('pre_epoch')
for batch in batchs:
self._learning_rule.step(batch)
self._epoch += 1
self._remaining_epochs -= 1
self._notify('post_epoch')
self._notify('post_training')
def remaining_epochs(self):
return self._remaining_epochs
def attach(self, hook):
self._hooks.append(hook)
|
[
"nirvguy@gmail.com"
] |
nirvguy@gmail.com
|
f841e9e9170838ca8c2972ca807eedb0e4ecd954
|
e905abd9bb7bd7017657d0a0c4d724d16e37044c
|
/.history/article/settings_20210208181317.py
|
5959719e37fa4bb9dcbc2f1420a4a206f030284f
|
[] |
no_license
|
tabdelbari/articles
|
a8b921841f84fb473f5ed1cdcda743863e6bc246
|
f0e1dfdc9e818e43095933139b6379a232647898
|
refs/heads/main
| 2023-03-05T10:21:35.565767 | 2021-02-10T13:35:14 | 2021-02-10T13:35:14 | 325,654,973 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,437 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for article project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
MONGO_URI = 'mongodb://localhost:27017/'
MONGO_DATABASE = 'articles'
BOT_NAME = 'article'
SPIDER_MODULES = ['article.spiders']
NEWSPIDER_MODULE = 'article.spiders'
SPLASH_URL = 'http://localhost:8050'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'article (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 10
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 1
CONCURRENT_REQUESTS_PER_IP = 1
# Disable cookies (enabled by default)
COOKIES_ENABLED = True
COOKIES_DEBUG = True
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'article.pipelines.MongoPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"abdelbari1996@hotmail.com"
] |
abdelbari1996@hotmail.com
|
c42d909697d0db5a72ae51a3c5d635841a1787f8
|
a8fca7b6bc1f0eeaba12b682a81d880dc71cc929
|
/FlaskEndpoint/tests/system/test_home.py
|
38225c4925d80136cac8cbc7e3a04b5a0ac7ca4e
|
[] |
no_license
|
sineczek/Automated-Software-Testing-with-Python
|
cb74d8714ad5b2ec9a6ffc013a400f0181f8095b
|
2e7c4ff4bb5acfd53afb43a4bfa7191eb58a899c
|
refs/heads/main
| 2023-04-14T08:15:53.917614 | 2021-04-24T17:18:23 | 2021-04-24T17:18:23 | 345,342,351 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 454 |
py
|
from tests.system.base_test import BaseTest
import json
class TestHome(BaseTest):
def test_home(self):
with self.app() as c:
resp = c.get('/')
self.assertEqual(
resp.status_code, 200
)
self.assertEqual(
json.loads(resp.get_data()), # loads - ładuje stringa; potem zmienia go na json'a czyli słownik
{'message': 'Hello, world!'}
)
|
[
"michalzaitz@gmail.com"
] |
michalzaitz@gmail.com
|
d8a49d368a82b0008bacdd568c57aa745bde3133
|
d86ed2c37a55b4a3118131a04f9a68dbd3b51a7f
|
/sherpatest/lev3fft-bar.py
|
384437c626b0fbb39addb9d1c5274f6e57b5fd62
|
[] |
no_license
|
hamogu/sherpa-test-data
|
f745cc907c2535a721d46472b33f7281bd6e6711
|
77d9fc563875c59a4acff2960d46180ee7a8ec14
|
refs/heads/master
| 2023-06-18T22:30:44.947033 | 2020-08-03T12:07:13 | 2020-08-03T12:07:13 | 275,202,255 | 0 | 0 | null | 2020-06-26T16:38:19 | 2020-06-26T16:38:19 | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
#!/usr/bin/env python
from sherpa.astro.ui import *
image_file = "acisf07999_000N001_r0035_regevt3_srcimg.fits"
psf_file = "acisf07999_000N001_r0035b_psf3.fits"
reg_file = "ellipse(3145.8947368421,4520.7894736842,37.0615234375,15.3881587982,92.2273254395)"
srcid = 1
load_data(srcid, image_file)
load_psf("psf%i" % srcid, psf_file)
set_psf(srcid, "psf%i" % srcid)
set_coord(srcid, "physical")
notice2d_id(srcid, reg_file)
# Switch to WCS for fitting
set_coord(srcid, "wcs")
# Use Nelder-Mead, C-statistic as fit method, statistic
set_method("neldermead")
set_stat("cstat")
set_source(srcid, 'gauss2d.src + const2d.bkg')
guess(srcid, src)
image_file = "acisf08478_000N001_r0043_regevt3_srcimg.fits"
psf_file = "acisf08478_000N001_r0043b_psf3.fits"
reg_file = "ellipse(3144.5238095238,4518.8095238095,25.2978591919,19.1118583679,42.9872131348)"
srcid = 2
load_data(srcid, image_file)
load_psf("psf%i" % srcid, psf_file)
set_psf(srcid, "psf%i" % srcid)
set_coord(srcid, "physical")
notice2d_id(srcid, reg_file)
# Switch to WCS for fitting
set_coord(srcid, "wcs")
# Use Nelder-Mead, C-statistic as fit method, statistic
set_method("neldermead")
set_stat("cstat")
set_source(srcid, 'gauss2d.src + const2d.bkg')
guess(srcid, src)
fit()
|
[
"olaurino@cfa.harvard.edu"
] |
olaurino@cfa.harvard.edu
|
77c55d04b0a750c8b0c0dc571cf5927a6d78e179
|
356f3f1b7caf0ccb20cc830d40821dfb2cbda046
|
/sfit/sfit/doctype/items/items.py
|
c1943c13dec9e21c63e99267eb3e87e7de102726
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/sfit
|
f4b75b9a8b2de08d0eaa4eadbcd3d5e432ffba56
|
a96afbf35b0e1635e44cb5f83d7f86c83abedb8f
|
refs/heads/master
| 2021-09-05T18:22:43.494208 | 2018-01-30T07:23:02 | 2018-01-30T07:23:02 | 104,332,803 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 257 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Items(Document):
pass
|
[
"vigneshwaran@valiantsystems.com"
] |
vigneshwaran@valiantsystems.com
|
8f885274db507628a34e8f8f094526a25c935972
|
cc9d1aeb8aefe3d4f86c94b4279a64e70bf5fd80
|
/setup.py
|
be0365371238e8e2c7a86eb0bd4aa3c81f749446
|
[
"MIT"
] |
permissive
|
sdelquin/sendgrify
|
a520a2da7d6c6d7c4707c325f6d67523e53803eb
|
fe8ee1d0efd0c8d8034d1c57cfc07672f77d7e8e
|
refs/heads/main
| 2023-06-11T15:49:27.284693 | 2023-05-28T12:54:34 | 2023-05-28T12:54:34 | 342,843,979 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
# read the contents of your README file
from pathlib import Path
from setuptools import setup
this_directory = Path(__file__).parent
long_description = (this_directory / 'README.md').read_text()
REQUIREMENTS = (
'sendgrid==5.3.0',
'markdown',
)
setup(
name='sendgrify',
version='2.0.3',
url='https://github.com/sdelquin/sendgrify.git',
author='Sergio Delgado Quintero',
author_email='sdelquin@gmail.com',
description='SendGrid for Humans',
license='MIT',
packages=['sendgrify'],
install_requires=REQUIREMENTS,
long_description=long_description,
long_description_content_type='text/markdown',
)
|
[
"sdelquin@gmail.com"
] |
sdelquin@gmail.com
|
39d7269798832e93cc7391c6516b8df87b50ca36
|
59c0669a38c4178f2f5cf8f9dca7553849c286a2
|
/MyPro/pythonScript/QRCodeDetect/Invoice/hough_tansform_bad.py
|
437f292bb460649c54b3fb981f99722309b81288
|
[] |
no_license
|
AUGUSTRUSH8/ImageProcess
|
f33ceaabaac67436df47fd1e1f115a8f44a6f556
|
46fc85b61dab52c3876dfacb4dfd22c962dc13bf
|
refs/heads/master
| 2023-04-27T21:39:36.044320 | 2022-07-04T14:59:35 | 2022-07-04T14:59:35 | 174,789,186 | 31 | 17 | null | 2022-07-06T20:07:14 | 2019-03-10T07:01:13 |
Java
|
UTF-8
|
Python
| false | false | 4,007 |
py
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
def rotate_about_center2(src, radian, scale=1.):
#入参:弧度
w = src.shape[1]
h = src.shape[0]
angle = radian * 180 / np.pi
# now calculate new image width and height
nw = (abs(np.sin(radian)*h) + abs(np.cos(radian)*w))*scale
nh = (abs(np.cos(radian)*h) + abs(np.sin(radian)*w))*scale
# ask OpenCV for the rotation matrix
rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
# calculate the move from the old center to the new center combined
# with the rotation
rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
# the move only affects the translation, so update the translation
# part of the transform
rot_mat[0,2] += rot_move[0]
rot_mat[1,2] += rot_move[1]
return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
def get_group(arr):
#按照4个弧度区间分组,返回不为空的分组数据
radian_45 = np.pi/4
radian_90 = np.pi/2
radian_135 = radian_45 * 3
radian_180 = np.pi
ret_arr = [[],[],[],[]]
for i in range(len(arr)):
if arr[i] < radian_45:
ret_arr[0].append(arr[i])
elif arr[i] < radian_90:
ret_arr[1].append(arr[i])
elif arr[i] < radian_135:
ret_arr[2].append(arr[i])
else:
ret_arr[3].append(arr[i])
while [] in ret_arr:
ret_arr.remove([])
#print ret_arr
return ret_arr
def get_min_var_avg(arr):
#按照不同弧度区间分组,返回方差最小的一个分组的弧度平均值
group_arr = get_group(arr)
print(group_arr)
cv2.waitKey(0)
var_arr = []
if len(group_arr) <= 1:
var_arr.append(np.var(group_arr[0]))
print(var_arr)
cv2.waitKey(0)
else:
for i in range(len(group_arr)):
var_arr.append(np.var(group_arr[i]))
print(var_arr)
min_var = 10000
min_i = 0
for i in range(len(var_arr)):
if var_arr[i] < min_var:
min_var = var_arr[i]
min_i = i
#print min_var, i
avg = np.mean(group_arr[min_i])
return avg
def get_rotate_radian(radian, reverse = False):
#旋转弧度转换
radian_45 = np.pi/4
radian_90 = np.pi/2
radian_135 = radian_45 * 3
radian_180 = np.pi
ret_radian = 0
if radian < radian_45:
ret_radian = radian
elif radian < radian_90:
ret_radian = radian - radian_90
elif radian < radian_135:
ret_radian = radian - radian_90
else:
ret_radian = radian - radian_180
if reverse:
ret_radian += radian_90
print(ret_radian)
return ret_radian
def rotate():
image = cv2.imread("test3.jpg", 0)
print(image.shape)
#高斯模糊
blur = cv2.GaussianBlur(image,(7,7),0)#自己调整,经验数据
cv2.imshow('image',blur)
cv2.waitKey(0)
#Canny边缘检测
canny = cv2.Canny(blur, 20, 150, 3)
cv2.imshow("canny",canny)
lines = cv2.HoughLines(canny, 1, np.pi/180, 200)#自己调整,经验数据
#求平均弧度
l = len(lines[0])
print(l)
theta_arr = [lines[0][i][1] for i in range(l)]
print(theta_arr)
cv2.waitKey(0)
rotate_theta = get_min_var_avg(theta_arr)
print(rotate_theta)
#print lines
'''for line in lines[0]:
rho = line[0]
theta = line[1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
cv2.line(image, (int(x0 - 1000*b), int(y0 + 1000*a)), (int(x0 + 1000*b), int(y0 - 1000*a)), (0,255,0), 2)
#cv2.imshow('image',image)
#cv2.waitKey(0)'''
img2 = rotate_about_center2(image, get_rotate_radian(rotate_theta, image.shape[0] > image.shape[1])) # hight > width
plt.imshow(img2)
plt.show()
if __name__ == '__main__':
rotate()
|
[
"l"
] |
l
|
4080d41a60b85ff5500efacfc8fa63c51b33899f
|
2d1ffb862ec65116f88b0986e4f36d36110cbfe5
|
/app/views.py
|
ced21fb3eae0537fbf78312e2c9f3eb801e59a90
|
[] |
no_license
|
stkc282/wedding
|
c38afc7861119b8cf4490fa35007841d58e161c7
|
1799b72820787a59d0d5b7edf7748b1ab7af9a98
|
refs/heads/master
| 2021-06-18T04:15:20.293547 | 2019-08-19T10:17:13 | 2019-08-19T10:17:13 | 202,826,952 | 0 | 0 | null | 2021-06-10T21:52:12 | 2019-08-17T02:48:38 |
JavaScript
|
UTF-8
|
Python
| false | false | 3,288 |
py
|
# from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django_filters.views import FilterView
from .filters import ItemFilter
from .forms import ItemForm
from .models import Item
from django.shortcuts import render
# # Create your views here.
# # 検索一覧画面
class ItemFilterView(FilterView):
model = Item
#
# # デフォルトの並び順を新しい順とする
# queryset = Item.objects.all().order_by('-created_at')
#
# # django-filter用設定
# filterset_class = ItemFilter
# strict = False
#
# # 1ページあたりの表示件数
# paginate_by = 10
#
# # 検索条件をセッションに保存する
# def get(self, request, **kwargs):
# if request.GET:
# request.session['query'] = request.GET
# else:
# request.GET = request.GET.copy()
# if 'query' in request.session.keys():
# for key in request.session['query'].keys():
# request.GET[key] = request.session['query'][key]
#
# return super().get(request, **kwargs)
# # 検索一覧画面
# class ItemFilterView(LoginRequiredMixin, FilterView):
# model = Item
#
# # デフォルトの並び順を新しい順とする
# queryset = Item.objects.all().order_by('-created_at')
#
# # django-filter用設定
# filterset_class = ItemFilter
# strict = False
#
# # 1ページあたりの表示件数
# paginate_by = 10
#
# # 検索条件をセッションに保存する
# def get(self, request, **kwargs):
# if request.GET:
# request.session['query'] = request.GET
# else:
# request.GET = request.GET.copy()
# if 'query' in request.session.keys():
# for key in request.session['query'].keys():
# request.GET[key] = request.session['query'][key]
#
# return super().get(request, **kwargs)
#
# 詳細画面
class ItemDetailView( DetailView):
model = Item
# # 詳細画面
# class ItemDetailView(LoginRequiredMixin, DetailView):
# model = Item
# 登録画面
class ItemCreateView(CreateView):
model = Item
form_class = ItemForm
success_url = reverse_lazy('thanks')
# 更新画面
class ItemUpdateView(UpdateView):
model = Item
form_class = ItemForm
success_url = reverse_lazy('index')
# 削除画面
class ItemDeleteView(DeleteView):
model = Item
success_url = reverse_lazy('index')
def invitation(request):
# post = get_object_or_404(Post, pk=pk )
return render(request, 'app/invitation.html', {})
def thanks(request):
return render(request, 'app/thanks.html', {})
def access(request):
return render(request, 'app/access.html', {})
# def create(request):
# if request.method == 'POST':
# form_class = ItemForm(request.POST)
# if form_class.is_valid():
# model = form_class.save(commit=False)
# model.save()
# return redirect('index', pk=form_class.pk)
# else:
# form_class = ItemForm
# return render(request, 'app/thanks.html', {'form': form_class})
|
[
"you@example.com"
] |
you@example.com
|
e396119de92c2a9d0442f560d6abcdd894436e17
|
484f111548e9d7192a5748eb202c08802484d747
|
/fw/flash.py
|
8361fc57a27f60367e21952493f6068dcb8a037a
|
[
"Apache-2.0"
] |
permissive
|
cmcmurrough/moteus
|
dafb2e5224409aaf1d57b66f58965d298845678d
|
6780967ec40ad7f1ab76cdbd7021f2d07b739efe
|
refs/heads/main
| 2023-07-11T10:29:58.645291 | 2021-08-13T13:38:32 | 2021-08-13T13:38:32 | 396,627,837 | 2 | 0 |
Apache-2.0
| 2021-08-16T05:07:08 | 2021-08-16T05:07:07 | null |
UTF-8
|
Python
| false | false | 2,193 |
py
|
#!/usr/bin/python3
# Copyright 2021 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import subprocess
import sys
import tempfile
BINPREFIX = '' if platform.machine().startswith('arm') else 'arm-none-eabi-'
OBJCOPY = BINPREFIX + 'objcopy'
OPENOCD = 'openocd -f interface/stlink.cfg -f target/stm32g4x.cfg '
def main():
tmpdir = tempfile.TemporaryDirectory()
moteus_elffile = (
sys.argv[1]
if len(sys.argv) > 1 else
'bazel-out/stm32g4-opt/bin/fw/moteus.elf')
bootloader_elffile = (
sys.argv[2]
if len(sys.argv) > 2 else
'bazel-out/stm32g4-opt/bin/fw/can_bootloader.elf')
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .isr_vector ' +
f'{moteus_elffile} {tmpdir.name}/out.08000000.bin',
shell=True)
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .text -j .ARM.extab -j .ARM.exidx -j .data -j .bss ' +
f'{bootloader_elffile} {tmpdir.name}/out.0800c000.bin',
shell=True)
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .text -j .ARM.extab -j .ARM.exidx -j .data -j .ccmram -j .bss ' +
f'{moteus_elffile} {tmpdir.name}/out.08010000.bin',
shell=True)
subprocess.check_call(
f'{OPENOCD} -c "init" ' +
f'-c "reset_config none separate; ' +
f' program {tmpdir.name}/out.08000000.bin verify 0x8000000; ' +
f' program {tmpdir.name}/out.0800c000.bin verify 0x800c000; ' +
f' program {tmpdir.name}/out.08010000.bin verify ' +
f' reset exit 0x08010000"',
shell=True)
if __name__ == '__main__':
main()
|
[
"jjp@pobox.com"
] |
jjp@pobox.com
|
bb6e52fee441903389167e2b4292125b69cdb8b8
|
ce3964c7195de67e07818b08a43286f7ec9fec3e
|
/dl_poly/get_pvt.py
|
6fd5f7613ff6286470a47abe111c368b60d57ff7
|
[] |
no_license
|
zhuligs/physics
|
82b601c856f12817c0cfedb17394b7b6ce6b843c
|
7cbac1be7904612fd65b66b34edef453aac77973
|
refs/heads/master
| 2021-05-28T07:39:19.822692 | 2013-06-05T04:53:08 | 2013-06-05T04:53:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,852 |
py
|
#!/usr/bin/env python
# Try retrieving P,V,T, etc. from the STATIS file, may be easier than from OUTPUT...
import os, sys, commands
def readlines(FILE,n):
'''Read n lines from FILE'''
for i in range(n):
FILE.readline()
try:
s = open('STATIS','r')
header1 = s.readline()
header2 = s.readline()
c = open('CONTROL','r')
lines = c.readlines()
for line in lines:
if len(line.split()) == 2:
var, value = line.split()
if var == 'steps':
steps = int(value)
elif var == 'stats':
stats = int(value)
c.close()
except:
print 'Could not open STATIS and CONTROL files successfully--stopping'
sys.exit(0)
# Total energy is row 1 value 1
# Temp is row 1, value 2
# Pres is row 6, value 2
# Vol is row 4, value 4
nblocks = int(steps)/int(stats)
out = open('pvt.dat','w')
out.write('# --Data extracted from STATIS file--\n')
out.write('#tstep\tpres (GPa)\tvol (ang^3)\ttemp (K)\tetot (eV)\t\tpot (eV)\n')
for i in range(nblocks):
tstep, t, elements = s.readline().split()
row1 = s.readline().split()
Etot = str( float(row1[0]) * 1.036426865E-4 ) # convert unit to eV
T = row1[1]
s.readline()
s.readline()
V = s.readline().split()[3]
s.readline()
P = str( float(s.readline().split()[1]) * 0.016605402 ) # convert atm unit to GPa
# Every line has 5 values, each line read is 5 elements gone
leftover = int(elements) - 5*6
if leftover % 5 == 0:
extra_lines = leftover/5
else:
extra_lines = leftover/5 + 1
readlines(s,extra_lines)
# Calculate Etot - 3*k_b*T
k_b = 8.617343E-5 # Boltzmann's const in eV/K
pot = str( float(Etot) - 3*k_b*float(T) )
out.write(tstep+'\t'+P+' \t'+V+'\t'+T+'\t'+Etot+'\t'+pot+'\n')
s.close()
out.close()
|
[
"boates@gmail.com"
] |
boates@gmail.com
|
fbb7c0b773c663b598397c813719054f055a6897
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/lib/python2.7/site-packages/openpyxl/worksheet/pivot.py
|
b1905be6298ea1c57f774cae821fbc482b8bf25b
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 |
Apache-2.0
| 2022-10-05T12:10:32 | 2016-12-15T05:26:12 |
Python
|
UTF-8
|
Python
| false | false | 4,984 |
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors import (
Bool,
Integer,
String,
Set,
NoneSet,
)
from openpyxl.descriptors.serialisable import Serialisable
class PivotSelection(Serialisable):
pane = Set(values=("bottomRight", "topRight", "bottomLeft", "topLeft"))
showHeader = Bool()
label = Bool()
data = Bool()
extendable = Bool()
count = Integer()
axis = String(allow_none=True)
dimension = Integer()
start = Integer()
min = Integer()
max = Integer()
activeRow = Integer()
activeCol = Integer()
previousRow = Integer()
previousCol = Integer()
click = Integer()
def __init__(self,
pane=None,
showHeader=None,
label=None,
data=None,
extendable=None,
count=None,
axis=None,
dimension=None,
start=None,
min=None,
max=None,
activeRow=None,
activeCol=None,
previousRow=None,
previousCol=None,
click=None):
self.pane = pane
self.showHeader = showHeader
self.label = label
self.data = data
self.extendable = extendable
self.count = count
self.axis = axis
self.dimension = dimension
self.start = start
self.min = min
self.max = max
self.activeRow = activeRow
self.activeCol = activeCol
self.previousRow = previousRow
self.previousCol = previousCol
self.click = click
class PivotArea(Serialisable):
field = Integer(allow_none=True)
type = NoneSet(values=("normal", "data", "all", "origin", "button", "topEnd"))
dataOnly = Bool()
labelOnly = Bool()
grandRow = Bool()
grandCol = Bool()
cacheIndex = Bool()
outline = Bool()
offset = String()
collapsedLevelsAreSubtotals = Bool()
axis = String(allow_none=True)
fieldPosition = Integer(allow_none=True)
def __init__(self,
field=None,
type=None,
dataOnly=None,
labelOnly=None,
grandRow=None,
grandCol=None,
cacheIndex=None,
outline=None,
offset=None,
collapsedLevelsAreSubtotals=None,
axis=None,
fieldPosition=None):
self.field = field
self.type = type
self.dataOnly = dataOnly
self.labelOnly = labelOnly
self.grandRow = grandRow
self.grandCol = grandCol
self.cacheIndex = cacheIndex
self.outline = outline
self.offset = offset
self.collapsedLevelsAreSubtotals = collapsedLevelsAreSubtotals
self.axis = axis
self.fieldPosition = fieldPosition
class PivotAreaReferences(Serialisable):
count = Integer()
def __init__(self, count=None):
count = count
class PivotAreaReference(Serialisable):
field = Integer(allow_none=True)
count = Integer()
selected = Bool()
byPosition = Bool()
relative = Bool()
defaultSubtotal = Bool()
sumSubtotal = Bool()
countASubtotal = Bool()
avgSubtotal = Bool()
maxSubtotal = Bool()
minSubtotal = Bool()
productSubtotal = Bool()
countSubtotal = Bool()
stdDevSubtotal = Bool()
stdDevPSubtotal = Bool()
varSubtotal = Bool()
varPSubtotal = Bool()
def __init__(self,
field=None,
count=None,
selected=None,
byPosition=None,
relative=None,
defaultSubtotal=None,
sumSubtotal=None,
countASubtotal=None,
avgSubtotal=None,
maxSubtotal=None,
minSubtotal=None,
productSubtotal=None,
countSubtotal=None,
stdDevSubtotal=None,
stdDevPSubtotal=None,
varSubtotal=None,
varPSubtotal=None):
self.field = field
self.count = count
self.selected = selected
self.byPosition = byPosition
self.relative = relative
self.defaultSubtotal = defaultSubtotal
self.sumSubtotal = sumSubtotal
self.countASubtotal = countASubtotal
self.avgSubtotal = avgSubtotal
self.maxSubtotal = maxSubtotal
self.minSubtotal = minSubtotal
self.productSubtotal = productSubtotal
self.countSubtotal = countSubtotal
self.stdDevSubtotal = stdDevSubtotal
self.stdDevPSubtotal = stdDevPSubtotal
self.varSubtotal = varSubtotal
self.varPSubtotal = varPSubtotal
class Index(Serialisable):
v = Integer()
def __init__(self, v=None):
self.v = v
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
40b5a7f814ed68cbc12969cb867747a1687e0e1b
|
ac1e60fd4bb3b7cc04e413ae394836abad8947b1
|
/email_verification_api/wsgi.py
|
e60483842d64ef833b28dfd12be0cfe5d6bf9eba
|
[] |
no_license
|
Taycode/email-verification-api
|
9c48642f34671232c388a7c763541f02ff9ae614
|
f3abe35a010d5b2d3d2c269fa728eb40f26630a0
|
refs/heads/master
| 2020-08-04T11:00:29.103892 | 2019-10-01T14:49:14 | 2019-10-01T14:49:14 | 212,114,710 | 0 | 0 | null | 2019-10-01T14:16:59 | 2019-10-01T14:16:58 | null |
UTF-8
|
Python
| false | false | 421 |
py
|
"""
WSGI config for email_verification_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'email_verification_api.settings')
application = get_wsgi_application()
|
[
"tay2druh@gmail.com"
] |
tay2druh@gmail.com
|
490df8c8807c725fdf915ccba2ff1496bd0ac937
|
60cb975f3e0251c73c457271bce8a7b2036e422b
|
/studysrc/mytest/websppider/transtest.py
|
23c308fa3b1c83bba1c6cd379e0c29e746a2f19d
|
[] |
no_license
|
49257620/reboot
|
0a2341f23bc1a6f3ae47b59f772919228c623544
|
86b348228d1a25d78c45b0e9022d7c773544373b
|
refs/heads/master
| 2018-11-17T19:19:58.969710 | 2018-09-25T03:15:57 | 2018-09-25T03:15:57 | 125,727,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 976 |
py
|
# encoding: utf-8
# Author: LW
import urllib.request
import urllib.parse
import time
import random
import hashlib
content = 'what fuck'
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
'''
1523493384696
1523493371204
351ac046404e1bbcb9442615f964a96d
cb2731255a15489013919b3788953bdc
'''
u = 'fanyideskweb'
d = content
f = str(int(time.time()*1000) + random.randint(1,10))
c = 'ebSeFb%=XZ%T[KZ)c(sy!'
sign = hashlib.md5((u + d + f + c).encode('utf-8')).hexdigest()
print(f)
print(sign)
data['i']: content
data['from']: 'AUTO'
data['to']: 'AUTO'
data['smartresult']: 'dict'
data['client']: 'fanyideskweb'
data['salt'] = f
data['sign'] = sign
data['doctype']: 'json'
data['version']: '2.1'
data['keyfrom']: 'fanyi.web'
data['action']: 'FY_BY_CLICKBUTTION'
data['typoResult']: 'false'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url, data)
html = response.read().decode('utf-8')
print(html)
|
[
"49257620@qq.com"
] |
49257620@qq.com
|
08273d87152e339e41af2407ff4bbad8cc28e79c
|
f2b91692a434ee79ff5d68ed3111d60d90315f00
|
/src/command_modules/azure-cli-servicebus/azure/cli/command_modules/servicebus/_validators.py
|
6a4509e9f662b17fe8494f89fce3441aa9719205
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cal5barton/azure-cli
|
f883bc7d481b163d4c4af1fa154a990182e5de80
|
6ebc6f810f32b8fce30a360633a70fcfdea15e7b
|
refs/heads/dev
| 2023-05-24T18:12:36.151238 | 2018-07-12T16:16:29 | 2018-07-12T16:16:29 | 140,749,210 | 0 | 0 |
MIT
| 2023-05-15T18:58:31 | 2018-07-12T18:13:18 |
Python
|
UTF-8
|
Python
| false | false | 4,322 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=unused-variable
import re
from datetime import timedelta
from isodate import parse_duration
from knack.util import CLIError
# PARAMETER VALIDATORS
# Type ISO 8061 duration
iso8601pattern = re.compile("^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+.)?(\\d+S)?)?$")
timedeltapattern = re.compile("^\\d+:\\d+:\\d+$")
def _validate_lock_duration(namespace):
if namespace.lock_duration:
if iso8601pattern.match(namespace.lock_duration):
if parse_duration(namespace.lock_duration) > timedelta(days=0, minutes=6, seconds=0):
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
elif timedeltapattern.match(namespace.lock_duration):
day, miniute, seconds = namespace.lock_duration.split(":")
if int(day) > 0 or int(miniute) > 6:
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
else:
raise CLIError('--lock-duration Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g.'
' PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.lock_duration))
def _validate_default_message_time_to_live(namespace):
if namespace.default_message_time_to_live:
if not iso8601pattern.match(namespace.default_message_time_to_live) and not timedeltapattern.match(namespace.default_message_time_to_live):
raise CLIError('--default-message-time-to-live Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.default_message_time_to_live))
def _validate_duplicate_detection_history_time_window(namespace):
if namespace.duplicate_detection_history_time_window:
if iso8601pattern.match(namespace.duplicate_detection_history_time_window):
pass
elif timedeltapattern.match(namespace.duplicate_detection_history_time_window):
pass
else:
raise CLIError('--duplicate-detection-history-time-window Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.duplicate_detection_history_time_window))
def _validate_auto_delete_on_idle(namespace):
if namespace.auto_delete_on_idle:
if iso8601pattern.match(namespace.auto_delete_on_idle):
pass
elif timedeltapattern.match(namespace.auto_delete_on_idle):
pass
else:
raise CLIError('--auto-delete-on-idle Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.auto_delete_on_idle))
def validate_partner_namespace(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.partner_namespace:
if not is_valid_resource_id(namespace.partner_namespace):
namespace.partner_namespace = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.ServiceBus',
type='namespaces',
name=namespace.partner_namespace)
def validate_premiumsku_capacity(namespace):
if namespace.sku and namespace.sku != 'Premium' and namespace.capacity:
raise CLIError('--capacity - This property is only applicable to namespaces of Premium SKU')
|
[
"tjprescott@users.noreply.github.com"
] |
tjprescott@users.noreply.github.com
|
9ec5875503577bf114e6521a6174ca229c968b95
|
c1e0874f55d05ee990ed2d637c2910701b32d246
|
/soft_uni_OOP/Defining Classes/lab/scope_mess_3.py
|
03d81f4774c92bdc435a7583da245e72d79f8461
|
[] |
no_license
|
borislavstoychev/Soft_Uni
|
5d047bef402c50215e0abc825476326889ffd0be
|
ccc0b2fb18f8ad6809b475eb20e82a9e4eb4b0b0
|
refs/heads/master
| 2023-05-11T12:27:08.672058 | 2021-05-28T18:00:10 | 2021-05-28T18:00:10 | 277,556,731 | 3 | 2 | null | 2021-02-11T19:57:37 | 2020-07-06T13:58:23 |
Python
|
UTF-8
|
Python
| false | false | 311 |
py
|
x = "global"
def outer():
x = "local"
def inner():
nonlocal x
x = "nonlocal"
print("inner:", x)
def change_global():
global x
x = "global: changed!"
print("outer:", x)
inner()
print("outer:", x)
change_global()
print(x)
outer()
print(x)
|
[
"stoy4ew@gmail.com"
] |
stoy4ew@gmail.com
|
4d641b7b452b7e43378724205d8c5690b44cd11a
|
5b9c50baaa3182868c9f4a744a7361abe422a510
|
/tests/test_base.py
|
f7f5133f7951074f1287e3257df0b73b129805e8
|
[
"MIT"
] |
permissive
|
jasontangxf/geometer
|
3307889c087a1f498d58b5ae6bbf1b037119ca46
|
931df0aff6c680ad13a6c5989f2a89c276370c5e
|
refs/heads/master
| 2023-01-06T17:39:41.837342 | 2020-11-07T15:42:10 | 2020-11-07T15:42:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,251 |
py
|
import numpy as np
from geometer.base import TensorDiagram, Tensor, TensorCollection, LeviCivitaTensor, KroneckerDelta
class TestTensor:
def test_arithmetic(self):
a = Tensor(2, 3)
b = Tensor(5, 4)
# vector operations
assert a + b == Tensor(7, 7)
assert a - b == Tensor(-3, -1)
assert -a == Tensor(-2, -3)
# scalar operations
assert a + 6 == Tensor(8, 9)
assert a - 6 == Tensor(-4, -3)
assert a * 6 == Tensor(12, 18)
assert a / 6 == Tensor(1/3, 0.5)
def test_transpose(self):
a = Tensor([[1, 2],
[3, 4]], covariant=[0])
assert a.transpose() == Tensor([[1, 3], [2, 4]])
assert a.T._covariant_indices == {1}
assert a.T.T == a
def test_getitem(self):
a = Tensor([[1, 2],
[3, 4]], covariant=[0])
assert a[0, 1] == 2
assert a[None, 1] == [[3, 4]]
assert a[None, 1].tensor_shape == (0, 1)
assert a[::-1, 0] == [3, 1]
assert a[::-1, 0].tensor_shape == (1, 0)
def test_dtype(self):
a = Tensor(2, 3, dtype=np.float32)
assert a.dtype == np.float32
a = Tensor(2, 3, dtype=np.complex64)
assert a.dtype == np.complex64
class TestTensorCollection:
def test_init(self):
# empty list
a = TensorCollection([])
assert len(a) == 0
# numpy array
a = TensorCollection(np.ones((1, 2, 3)))
assert len(a) == 1
assert a.size == 2
# nested list of numbers
a = TensorCollection([[1, 2], [3, 4]])
assert len(a) == 2
assert a.size == 2
# nested tuple of numbers
a = TensorCollection(((1, 2), (3, 4)))
assert len(a) == 2
assert a.size == 2
# nested list of Tensor objects
a = TensorCollection([[Tensor(1, 2, 3), Tensor(3, 4, 5)]])
assert a.shape == (1, 2, 3)
assert len(a) == 1
assert a.size == 2
# object with __array__ function
class A:
def __array__(self):
return np.array([Tensor(1, 2), Tensor(3, 4)])
a = TensorCollection(A())
assert len(a) == 2
assert a.size == 2
def test_flat(self):
a = [Tensor([[1, 2], [3, 4]]), Tensor([[5, 6], [7, 8]])]
b = TensorCollection([a], tensor_rank=2)
assert list(b.flat) == a
def test_getitem(self):
a = Tensor([[1, 2],
[3, 4]])
b = Tensor([[5, 6],
[7, 8]])
c = TensorCollection([a, b])
assert c[0] == a
assert c[1] == b
assert list(c) == [a, b]
assert c[:, 1] == TensorCollection([Tensor([3, 4]), Tensor([7, 8])])
assert c[:, 0, 0] == [1, 5]
class TestTensorDiagram:
def test_add_edge(self):
a = Tensor([1, 0, 0, 0])
b = Tensor([[42, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], covariant=False)
diagram = TensorDiagram((a, b))
assert diagram.calculate() == Tensor([42, 0, 0, 0])
diagram.add_edge(a.copy(), b)
assert diagram.calculate() == 42
def test_tensor_product(self):
e1 = Tensor(1, 0)
e2 = Tensor(0, 1)
a = Tensor([0, 1],
[1, 0], covariant=[0])
b = Tensor([1, 0],
[0, 1], covariant=[0])
m = a.tensor_product(b)
e = e1.tensor_product(e2)
assert TensorDiagram((e, m), (e, m)).calculate() == (a * e1).tensor_product(b * e2)
d = TensorDiagram()
d.add_node(a)
d.add_node(b)
assert d.calculate() == a.tensor_product(b)
def test_epsilon_delta_rule(self):
e1 = LeviCivitaTensor(3, True)
e2 = LeviCivitaTensor(3, False)
d = KroneckerDelta(3)
d2 = d.tensor_product(d)
d1 = d2.transpose((0, 1))
diagram = TensorDiagram((e1, e2.transpose()))
assert diagram.calculate() == d1 - d2
def test_kronecker_delta(self):
d = KroneckerDelta(4, 3)
assert d.array.shape == (4,)*6
assert d.array[0, 1, 2, 0, 1, 2] == 1
assert d.array[0, 2, 1, 0, 1, 2] == -1
|
[
"jan.rv@t-online.de"
] |
jan.rv@t-online.de
|
4c10f5dbe66a1ecd6b2cb0e0d1cb6a3481ac2ca0
|
1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2
|
/old_py2/controllers/apiai_controller.py
|
dfff3930d0c210a7d0d4eb8c2af95d15d9d7e374
|
[
"MIT"
] |
permissive
|
the-blue-alliance/the-blue-alliance
|
3dc210a9611ce9b240907ffd420f78040318dcdc
|
6d42f3cdb2f785d192f2871419e58aaae3445029
|
refs/heads/py3
| 2023-08-22T21:02:36.398100 | 2023-08-22T19:14:01 | 2023-08-22T19:14:01 | 888,427 | 344 | 263 |
MIT
| 2023-09-14T18:35:20 | 2010-09-04T20:34:11 |
HTML
|
UTF-8
|
Python
| false | false | 635 |
py
|
import json
from base_controller import LoggedInHandler
from helpers.apiai_helper import APIAIHelper
from models.sitevar import Sitevar
class APIAIHandler(LoggedInHandler):
def __init__(self, *args, **kw):
super(APIAIHandler, self).__init__(*args, **kw)
def post(self):
if self.request.headers.get('X-TBA-APIAI-Auth') != Sitevar.get_by_id('apiai.secrets').contents['key']:
return
request = json.loads(self.request.body)
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
self.response.out.write(json.dumps(APIAIHelper.process_request(request)))
|
[
"noreply@github.com"
] |
the-blue-alliance.noreply@github.com
|
d716a64d25d8ed53904876bd54c1a98a7b88deb5
|
9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d
|
/python/uline/uline/uline/handlers/app/distributor/balance/distributorBalanceList.py
|
4116d637e99da40fb08daa5c8fdc82a1bdbb023b
|
[] |
no_license
|
apollowesley/Demo
|
f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8
|
471c4af95d3a7222d6933afc571a8e52e8fe4aee
|
refs/heads/master
| 2021-02-15T04:01:51.590697 | 2018-01-29T01:44:29 | 2018-01-29T01:44:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,367 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import tornado.web
import tornado.gen
from uline.handlers.baseHandlers import DtAdminHandler
from .form import DistributorBalanceList
from uline.public.constants import TO_PAY, PAY_CHANNEL
from datetime import timedelta, datetime
from uline.public.permit import check_permission
class DistributorBalanceListHandler(DtAdminHandler):
@tornado.web.authenticated
@check_permission
def prepare(self):
form = DistributorBalanceList(self)
if not form.validate():
self.redirect('/dist/balance/dt/list')
return
self.dt_daily_balance_no = form.ddb_no.data
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
data = self.do_execute()
self.render('distributor/balance/distributorBalanceList.html', data=data)
def do_execute(self):
dt_id = self.current_user
query = """select
to_char(ddbi.pay_start_time, 'YYYY-MM-DD HH24:MI:SS'),
to_char(ddbi.need_pay_time,'YYYY-MM-DD'),
ddbi.rcvAcctName,
ddbi.channel,
ddbi.rcvacctno,
ddbi.rcvBankName,
ddbi.tranAmt,
ddbi.pay_status,
ddbi.failure_details
from dt_daily_balance_info as ddbi
inner join dt_balance db on db.dt_id = ddbi.dt_id
where ddbi.dt_id=%(dt_id)s
and ddbi.dt_daily_balance_no=%(dt_daily_balance_no)s;"""
ret = self.db.selectSQL(query, {'dt_daily_balance_no': self.dt_daily_balance_no, 'dt_id': dt_id})
fields = ['create_at', 'need_pay_time', 'rcvAcctName', 'channel', 'balance_account', 'rcvBankName',
'tranAmt', 'pay_status', 'failure_details']
dt_info = dict(zip(fields, ret))
dt_info['tranAmt'], dt_info['pay_status'], dt_info['channel'] = dt_info['tranAmt'] / 100, \
TO_PAY[str(dt_info['pay_status'])], \
PAY_CHANNEL[str(dt_info['channel'])],
dt_info['need_pay_time'] = datetime.strptime(dt_info['need_pay_time'], '%Y-%m-%d') - timedelta(days=1)
dt_info['need_pay_time'] = datetime.strftime(dt_info['need_pay_time'], '%Y-%m-%d')
# todo 缺少划付状态详情数据表
return dt_info
|
[
"36821277@qq.com"
] |
36821277@qq.com
|
8b37209b33d201b789d2658845aa87843ef7a8e0
|
db144fdc9a1948cce066bed20912c32e1a18a8aa
|
/accounts/views.py
|
49c0aa25bf7c13a2faa3ed61bf4acc3c6a75f458
|
[] |
no_license
|
masato932/django-blog3
|
cd01101cbffdbaa33d2cb9bf696e5a5cdf8cd6fa
|
769068ba356cf8e0cc0bbde76e82e116e58b8bab
|
refs/heads/main
| 2023-05-13T20:14:43.706480 | 2021-06-05T14:03:13 | 2021-06-05T14:03:13 | 365,480,981 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 484 |
py
|
from django.shortcuts import render, redirect
from allauth.account import views
class LoginView(views.LoginView):
template_name = 'accounts/login.html'
class LogoutView(views.LogoutView):
template_name = 'accounts/logout.html'
def post(self, *args, **kwargs):
if self.request.user.is_authenticated:
self.logout()
return redirect('/')
class SignupView(views.SignupView):
template_name = 'accounts/signup.html'
# Create your views here.
|
[
"masatowada66@gmail.com"
] |
masatowada66@gmail.com
|
b62893ee1712e3ddf4365071e6596e2d820ac5dc
|
cf57cd3355471f035ca429302742b4eb4baf1214
|
/Comparações/SHI-TOMASI/SHI-TOMASI_sift.py
|
7a91ba9cb2b4ae56f47b6d8069c64cbee54c797b
|
[] |
no_license
|
RobotColony-UEFS/feature-match
|
c56d78230d86948e5612a9645c71a0647eb94604
|
ac421989aa1ee3893243122a0cf041b30e038a28
|
refs/heads/master
| 2022-11-27T15:31:20.570505 | 2020-08-04T19:24:17 | 2020-08-04T19:24:17 | 285,063,878 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,365 |
py
|
#coding: utf-8
import cv2
import numpy as np
import mysql.connector
import math
mydb = mysql.connector.connect(
host="localhost",
user="descritores",
passwd="12345678",
database="referencias"
)
def desvio (vetResult):
# Desvio padrão populacional
soma = float(sum(vetResult))
media = soma/len(vetResult)
res = 0
for valor in vetResult:
res += ((valor - media)**2)
desvio = (math.sqrt(res/len(vetResult)))
return (media, desvio)
vet_matches = []
vet_corretos = []
img11 = cv2.imread("../../imgReferencia/img00.jpg", 0)
altura = img11.shape[0]
largura = img11.shape[1]
img1 = cv2.resize(img11, (int(largura*0.4), int(altura*0.4)))
corners11 = cv2.goodFeaturesToTrack(img1, 100, 0.01, 10)
corners1 = np.int0(corners11)
kp1 = cv2.KeyPoint_convert(corners1)
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
kp1, des1 = sift.compute(img1, kp1)
quantidadeImagens = 1
while(quantidadeImagens<=13):
acertos = 0
img22 = cv2.imread("../../imgTeste/img"+str(quantidadeImagens)+".jpg", 0)
altura2 = img22.shape[0]
largura2 = img22.shape[1]
img2 = cv2.resize(img22, (int(largura2*0.4), int(altura2*0.4)))
corners22 = cv2.goodFeaturesToTrack(img2, 100, 0.01, 10)
corners2 = np.int0(corners22)
kp2 = cv2.KeyPoint_convert(corners2)
kp2, des2 = sift.compute(img2, kp2)
mat = bf.match(des1,des2)
mat = sorted(mat, key = lambda x:x.distance)
matches = mat[0:150]
with open("../../imgTeste/img"+str(quantidadeImagens)+".txt",'r') as f:
texto=f.readlines()
posicao_x= np.float_(texto[0:4])
posicao_y = np.float_(texto[4:8])
min_x = float(min(posicao_x))
max_x = float(max(posicao_x))
min_y = float(min(posicao_y))
max_y = float(max(posicao_y))
if len(matches)>10:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
for pos in dst_pts:
if((pos[0][0]>(min_x) and pos[0][0]<(max_x)) and (pos[0][1]>(min_y) and pos[0][1]<(max_y))):
acertos+=1
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:],None,flags=2)
cv2.imwrite("../resultados/shiTomasi-sift/img"+str(quantidadeImagens)+".jpg", img3)
vet_matches.append(len(matches))
vet_corretos.append(acertos)
mycursor = mydb.cursor()
sql = "INSERT INTO shiTomasi_sift(Nome, Matches, Correto, ImgReferente) VALUES (%s, %s, %s, %s)"
valor = ("ShiTomasi-Sift"+str(quantidadeImagens), len(matches), acertos, "img"+str(quantidadeImagens)+".jpg")
mycursor.execute(sql, valor)
mydb.commit()
print(len(matches), acertos)
quantidadeImagens+=1
media_matches, desvio_matches = desvio(vet_matches)
media_corretos, desvio_corretos = desvio(vet_corretos)
porcentagem = (media_corretos/media_matches)*100
sql2 = "INSERT INTO medias_desvios(Nome, MediaMatches, DesvioMatches, MediaCorretos, DesvioCorretos, Porcentagem) VALUES (%s, %s, %s, %s, %s, %s)"
valor2 = ("shiTomasi_sift", media_matches, desvio_matches, media_corretos, desvio_corretos, porcentagem)
mycursor.execute(sql2, valor2)
mydb.commit()
|
[
"samuelreboucas07@hotmail.com"
] |
samuelreboucas07@hotmail.com
|
24d38b1c79dc504b389b64276c398a8a39f2423d
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2022_02_01_preview/aio/operations/_operations.py
|
2a6e7c95997bb3ead85375c355f3241e726885e6
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 |
MIT
| 2022-07-19T08:05:23 | 2018-11-16T22:15:30 |
Python
|
UTF-8
|
Python
| false | false | 4,963 |
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2022_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OperationListResult"]:
"""Lists all of the available Azure Container Registry REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2022_02_01_preview.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.ContainerRegistry/operations"} # type: ignore
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
6430ad985b5c08e8f0e7f98428386d3713eb65b2
|
c45c9e74ffafcceebf395cc1c5f5d31659988c19
|
/answer_search.py
|
ff4bf3d7d7e148d57a000bb5cd58779991814eb8
|
[] |
no_license
|
tedrepo/QABasedOnMedicalKnowledgeGraph
|
f68ca297254218c72ef18a26c98f1910610f7154
|
f690b80e2a7fb85455b45d3829b6998be9ebc739
|
refs/heads/master
| 2020-03-30T23:14:39.416415 | 2018-10-05T04:12:19 | 2018-10-05T04:12:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 643 |
py
|
#!/usr/bin/env python3
# coding: utf-8
# File: answer_search.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-10-5
from py2neo import Graph,Node
class AnswerSearcher:
def __init__(self):
self.g = Graph(
host="127.0.0.1",
http_port=7474,
user="lhy",
password="lhy123")
'''执行cypher查询,并返回相应结果'''
def search_main(self, sqls):
for sql in sqls:
ress = self.g.run(sql).data()
for res in ress:
print(res)
return
if __name__ == '__main__':
searcher = AnswerSearch()
|
[
"lhy_in_blcu@126.com"
] |
lhy_in_blcu@126.com
|
b47c9a85013089dec45758e6489eb731972070ee
|
4ece3041f2ed0cd312dc70fd3c7c240924dbb6ae
|
/pyathena/__init__.py
|
8335fb21281d596d87e5bc8a90d091895483fde9
|
[
"MIT"
] |
permissive
|
ivssh/PyAthena
|
175c5dfff0289a7ceccfe9a47ac490985535f669
|
156c51f19b46ea2f89612b3383937d78942bc990
|
refs/heads/master
| 2020-03-27T13:07:58.417397 | 2018-07-21T13:08:41 | 2018-07-21T13:08:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,351 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from pyathena.error import * # noqa
__version__ = '1.3.0'
# Globals https://www.python.org/dev/peps/pep-0249/#globals
apilevel = '2.0'
threadsafety = 3
paramstyle = 'pyformat'
class DBAPITypeObject:
"""Type Objects and Constructors
https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors
"""
def __init__(self, *values):
self.values = values
def __cmp__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
def __eq__(self, other):
return other in self.values
# https://docs.aws.amazon.com/athena/latest/ug/data-types.html
STRING = DBAPITypeObject('char', 'varchar', 'map', 'array', 'row')
BINARY = DBAPITypeObject('varbinary')
BOOLEAN = DBAPITypeObject('boolean')
NUMBER = DBAPITypeObject('tinyint', 'smallint', 'bigint', 'integer',
'real', 'double', 'float', 'decimal')
DATE = DBAPITypeObject('date')
DATETIME = DBAPITypeObject('timestamp')
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def connect(*args, **kwargs):
from pyathena.connection import Connection
return Connection(*args, **kwargs)
|
[
"laughingman7743@gmail.com"
] |
laughingman7743@gmail.com
|
2a012620dfe09c0f6c1c04320e49696991285bed
|
8e6203db7383475f1c24a590f0456330b969bb4b
|
/optbinning/binning/distributed/plots.py
|
dba20f0cab79a00b42588937c020ed96d925680e
|
[
"Apache-2.0"
] |
permissive
|
guillermo-navas-palencia/optbinning
|
6fdfc764a214052b4d7d8e0b59114f0a63e6d5a8
|
73aee82008ebe88b732430e7c5764da57fb4d3ae
|
refs/heads/master
| 2023-08-28T13:33:43.536143 | 2023-08-22T19:20:18 | 2023-08-22T19:20:18 | 231,076,826 | 377 | 91 |
Apache-2.0
| 2023-09-05T20:14:14 | 2019-12-31T11:17:44 |
Python
|
UTF-8
|
Python
| false | false | 1,370 |
py
|
"""
Binning sketch plots.
"""
# Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
# Copyright (C) 2020
import matplotlib.pyplot as plt
import numpy as np
def plot_progress_divergence(df, divergence):
n = len(df)
n_add = df.n_add
n_records = df.n_records
div = df.divergence
mv_div_mean = div.rolling(n, min_periods=1).mean()
mv_div_std = div.rolling(n, min_periods=1).std()
mv_div_std /= np.sqrt(np.arange(1, n+1))
div_low = np.maximum(0, div - mv_div_std * 1.959963984540054)
div_high = div + mv_div_std * 1.959963984540054
div_label = "divergence ({:.5f})".format(div.values[-1])
mv_div_label = "moving mean ({:.5f})".format(mv_div_mean.values[-1])
mv_std_label = "standard error ({:.5f})".format(mv_div_std.values[-1])
plt.plot(n_records, div, label=div_label)
plt.plot(n_records, mv_div_mean, linestyle="-.", color="green",
label=mv_div_label)
plt.fill_between(n_records, div_low, div_high, alpha=0.2, color="green",
label=mv_std_label)
plt.title("Progress after {:} add and {} processed records".
format(int(n_add.values[-1]), int(n_records.values[-1])),
fontsize=14)
plt.xlabel("Processed records", fontsize=12)
plt.ylabel("Divergence: {}".format(divergence), fontsize=12)
plt.legend(fontsize=12)
plt.show()
|
[
"g.navas.palencia@gmail.com"
] |
g.navas.palencia@gmail.com
|
142b4edaf5e0cb5022cd5869f8cbdf4542e77689
|
a4df0ee67d0d56fc8595877470318aed20dd4511
|
/vplexapi-6.2.0.3/vplexapi/models/health_state.py
|
819d13f492a6fb68862c506a14264a4633267ac3
|
[
"Apache-2.0"
] |
permissive
|
QD888/python-vplex
|
b5a7de6766840a205583165c88480d446778e529
|
e2c49faee3bfed343881c22e6595096c7f8d923d
|
refs/heads/main
| 2022-12-26T17:11:43.625308 | 2020-10-07T09:40:04 | 2020-10-07T09:40:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,553 |
py
|
# coding: utf-8
"""
VPlex REST API
A defnition for the next-gen VPlex API # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class HealthState(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
UNKNOWN = "unknown"
OK = "ok"
DEGRADED = "degraded"
MINOR_FAILURE = "minor-failure"
MAJOR_FAILURE = "major-failure"
CRITICAL_FAILURE = "critical_failure"
NON_RECOVERABLE_ERROR = "non-recoverable-error"
ISOLATED = "isolated"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""HealthState - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HealthState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"anil.degwekar@emc.com"
] |
anil.degwekar@emc.com
|
56b442f8b7bfc47ef533d1a9d1c90373518ecca3
|
df7736726d5b041e46b490e409a1d4481ef8c7f1
|
/tools/rosmaster/src/rosmaster/threadpool.py
|
1261e2f5e4aa3947450c12ff477e0830735e537e
|
[] |
no_license
|
strawlab/ros_comm
|
62f5d2bc68d6cbe85c071eabb7487164d6c328be
|
6f7ea2feeb3c890699518cb6eb3d33faa15c5306
|
refs/heads/master
| 2020-05-18T02:26:43.463444 | 2012-08-05T07:10:58 | 2012-08-05T07:10:58 | 5,301,610 | 13 | 31 | null | 2019-09-24T22:49:12 | 2012-08-05T07:10:44 |
Python
|
UTF-8
|
Python
| false | false | 8,088 |
py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: threadpool.py 8327 2010-02-17 01:23:15Z kwc $
"""
Internal threadpool library for zenmaster.
Adapted from U{http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/203871}
Added a 'marker' to tasks so that multiple tasks with the same
marker are not executed. As we are using the thread pool for i/o
tasks, the marker is set to the i/o name. This prevents a slow i/o
for gobbling up all of our threads
"""
import threading, logging, traceback
from time import sleep
class MarkedThreadPool:
"""Flexible thread pool class. Creates a pool of threads, then
accepts tasks that will be dispatched to the next available
thread."""
def __init__(self, numThreads):
"""Initialize the thread pool with numThreads workers."""
self.__threads = []
self.__resizeLock = threading.Condition(threading.Lock())
self.__taskLock = threading.Condition(threading.Lock())
self.__tasks = []
self.__markers = set()
self.__isJoining = False
self.set_thread_count(numThreads)
def set_thread_count(self, newNumThreads):
""" External method to set the current pool size. Acquires
the resizing lock, then calls the internal version to do real
work."""
# Can't change the thread count if we're shutting down the pool!
if self.__isJoining:
return False
self.__resizeLock.acquire()
try:
self.__set_thread_count_nolock(newNumThreads)
finally:
self.__resizeLock.release()
return True
def __set_thread_count_nolock(self, newNumThreads):
"""Set the current pool size, spawning or terminating threads
if necessary. Internal use only; assumes the resizing lock is
held."""
# If we need to grow the pool, do so
while newNumThreads > len(self.__threads):
newThread = ThreadPoolThread(self)
self.__threads.append(newThread)
newThread.start()
# If we need to shrink the pool, do so
while newNumThreads < len(self.__threads):
self.__threads[0].go_away()
del self.__threads[0]
def get_thread_count(self):
"""@return: number of threads in the pool."""
self.__resizeLock.acquire()
try:
return len(self.__threads)
finally:
self.__resizeLock.release()
def queue_task(self, marker, task, args=None, taskCallback=None):
"""Insert a task into the queue. task must be callable;
args and taskCallback can be None."""
if self.__isJoining == True:
return False
if not callable(task):
return False
self.__taskLock.acquire()
try:
self.__tasks.append((marker, task, args, taskCallback))
return True
finally:
self.__taskLock.release()
def remove_marker(self, marker):
"""Remove the marker from the currently executing tasks. Only one
task with the given marker can be executed at a given time"""
if marker is None:
return
self.__taskLock.acquire()
try:
self.__markers.remove(marker)
finally:
self.__taskLock.release()
def get_next_task(self):
""" Retrieve the next task from the task queue. For use
only by ThreadPoolThread objects contained in the pool."""
self.__taskLock.acquire()
try:
retval = None
for marker, task, args, callback in self.__tasks:
# unmarked or not currently executing
if marker is None or marker not in self.__markers:
retval = (marker, task, args, callback)
break
if retval:
# add the marker so we don't do any similar tasks
self.__tasks.remove(retval)
if marker is not None:
self.__markers.add(marker)
return retval
else:
return (None, None, None, None)
finally:
self.__taskLock.release()
def join_all(self, wait_for_tasks = True, wait_for_threads = True):
""" Clear the task queue and terminate all pooled threads,
optionally allowing the tasks and threads to finish."""
# Mark the pool as joining to prevent any more task queueing
self.__isJoining = True
# Wait for tasks to finish
if wait_for_tasks:
while self.__tasks != []:
sleep(.1)
# Tell all the threads to quit
self.__resizeLock.acquire()
try:
self.__set_thread_count_nolock(0)
self.__isJoining = True
# Wait until all threads have exited
if wait_for_threads:
for t in self.__threads:
t.join()
del t
# Reset the pool for potential reuse
self.__isJoining = False
finally:
self.__resizeLock.release()
class ThreadPoolThread(threading.Thread):
"""
Pooled thread class.
"""
threadSleepTime = 0.1
def __init__(self, pool):
"""Initialize the thread and remember the pool."""
threading.Thread.__init__(self)
self.setDaemon(True) #don't block program exit
self.__pool = pool
self.__isDying = False
def run(self):
"""
Until told to quit, retrieve the next task and execute
it, calling the callback if any.
"""
while self.__isDying == False:
marker, cmd, args, callback = self.__pool.get_next_task()
# If there's nothing to do, just sleep a bit
if cmd is None:
sleep(ThreadPoolThread.threadSleepTime)
else:
try:
try:
result = cmd(*args)
finally:
self.__pool.remove_marker(marker)
if callback is not None:
callback(result)
except Exception, e:
logging.getLogger('rosmaster.threadpool').error(traceback.format_exc())
def go_away(self):
""" Exit the run loop next time through."""
self.__isDying = True
|
[
"strawman@astraw.com"
] |
strawman@astraw.com
|
05f98c995114c13d415121f855678ae770c9123b
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/AtCoder/ABC-D/107probD.py
|
261c5013ca5189665dd06803268802f1623a399f
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907 | 2021-05-02T13:05:21 | 2021-05-02T13:05:21 | 264,400,706 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,046 |
py
|
# 足す時は0~iまで一律に足し、返すのはi番目の値
class imosBIT():
def __init__(self, N):
self.N = N
self.bit = [0 for _ in range(self.N+1)]
def __str__(self):
ret = []
for i in range(1, self.N+1):
ret.append(self.__getitem__(i))
return "[" + ", ".join([str(a) for a in ret]) + "]"
def __getitem__(self, i):
s = 0
while i > 0:
s += self.bit[i]
i -= i & -i
return s
def add(self, i, x):
while i <= self.N:
self.bit[i] += x
i += i & -i
import sys
input = sys.stdin.readline
N = int(input())
A = list(map(int, input().split()))
l = 0
r = max(A) + 1
while r-l > 1:
x = (l+r)//2
P = [0]
for a in A:
p = +1 if a >= x else -1
P.append(P[-1]+p)
score = 0
bit = imosBIT(2*N+5)
for p in P:
p += N+1
score += bit[p]
bit.add(p, 1)
if score >= (N*(N+1)//2+1)//2:
l = x
else:
r = x
print(l)
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
16526d6d991321e879d46e8d8cd308ef7e4677b9
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Difference/trend_MovingMedian/cycle_5/ar_/test_artificial_1024_Difference_MovingMedian_5__20.py
|
f424dd4077963cad7c75f615bce42289c823621a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 |
BSD-3-Clause
| 2018-12-17T22:08:12 | 2018-06-12T17:15:43 |
Python
|
UTF-8
|
Python
| false | false | 274 |
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
2d9579c4f46e697e49c1ff2919d1f2e549706639
|
51305c54f8a316b6878a4462e1ba58a55c8e320f
|
/manager/thumbgen.py
|
49950650252902242789d77d34a36ed34f1f3ad4
|
[] |
no_license
|
coinmenace/gck
|
5b15b460335c0b52925f1875ccb4fecd416008e7
|
fade84780cda218291cb2066808310c4871a06c8
|
refs/heads/master
| 2020-03-27T06:58:54.878353 | 2018-10-22T12:32:20 | 2018-10-22T12:32:20 | 146,153,068 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,078 |
py
|
from PIL import Image, ImageFile
import glob, os
from threading import *
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Thumbgen:
def __init__(self,file,fullname,identifier):
sizes = [(32, 32),(64, 64),(128, 128),(256, 256),(512, 512),(1024, 1024),(2048, 2048)]
self.generateThumb(identifier,file,fullname,sizes)
def generateThumb(self,identifier,file,fullname,sizes):
for size in sizes:
t=Thread(target=generateImages,args=(identifier,file,fullname,size,))
t.start()
t.join()
def generateImages(identifier,file,fullname,size):
#print "Open "+fullname
im = Image.open(fullname)
im.thumbnail(size)
if not os.path.exists("website/static/thumbs/"+identifier+"/"):
os.mkdir("website/static/thumbs/"+identifier+"/")
file="website/static/thumbs/"+identifier+"/"+file.split(".")[0]+"_"+str(size[0])+"_"+str(size[1])
im.save(file + ".png",format="PNG", quality=95, optimize=True, progressive=True)
if __name__=="__main__":
filename="sample.png"
t=Thumbgen(filename)
|
[
"webframes@gmail.com"
] |
webframes@gmail.com
|
82a203f3a27ae3767dc8c58441b3f4644e5a1399
|
a2e607593dcbe5feaeedd9e9bd4caeaf06e46733
|
/tests/ui/menus/test_opmenu.py
|
464f5422d23c0778525972d3ce32d53d5aa537af
|
[] |
no_license
|
all-in-one-of/Houdini-Toolbox
|
dd05b2c869e663b185c1997d326bfe7548fbf55f
|
c10663c46c0f1249a9b3c6b32d4384a4399849ed
|
refs/heads/master
| 2020-06-13T01:10:11.832715 | 2019-08-30T07:24:47 | 2019-08-30T07:24:47 | 194,484,242 | 0 | 0 | null | 2019-06-30T06:42:17 | 2019-06-30T06:42:17 | null |
UTF-8
|
Python
| false | false | 1,738 |
py
|
"""Tests for ht.ui.menus.opmenu module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Python Imports
from mock import MagicMock, patch
import unittest
# Houdini Toolbox Imports
import ht.ui.menus.opmenu
# Houdini Imports
import hou
reload(ht.ui.menus.opmenu)
# =============================================================================
# CLASSES
# =============================================================================
class Test_create_absolute_reference_copy(unittest.TestCase):
"""Test ht.ui.menus.opmenu.create_absolute_reference_copy."""
def test(self):
"""Test creating an absolute reference copy."""
mock_node = MagicMock(spec=hou.Node)
scriptargs = {
"node": mock_node
}
mock_ui = MagicMock()
hou.ui = mock_ui
ht.ui.menus.opmenu.create_absolute_reference_copy(scriptargs)
mock_node.parent.return_value.copyItems.assert_called_with([mock_node], channel_reference_originals=True, relative_references=False)
del hou.ui
class Test_save_item_to_file(unittest.TestCase):
"""Test ht.ui.menus.opmenu.save_item_to_file."""
@patch("ht.ui.menus.opmenu.copy_item")
def test(self, mock_copy):
"""Test saving an item to a file."""
mock_node = MagicMock(spec=hou.Node)
scriptargs = {
"node": mock_node
}
ht.ui.menus.opmenu.save_item_to_file(scriptargs)
mock_copy.assert_called_with(mock_node)
# =============================================================================
if __name__ == '__main__':
unittest.main()
|
[
"captainhammy@gmail.com"
] |
captainhammy@gmail.com
|
cfbc0b358cbc8a73771ab602b38fe9a5b825e242
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/488/usersdata/341/112971/submittedfiles/AvF_Parte3.py
|
25d6392521b197f54357fe6d625293d8a2655e93
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
# -*- coding: utf-8 -*-
n = int(input('Digite a quantidade de números: '))
a = []
for i in range (n):
a.append('Digite os respectivos números: '))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
b524fe5caa3d77e5a88deb2e1aca3844f930eedf
|
40491d4649bc8f981cfd531657b0970e2577edd1
|
/Policy_Gradient/Tank_1/params.py
|
591b4c09a4383ccea277dcc219593c967ce568b8
|
[] |
no_license
|
emedd33/Reinforcement-Learning-in-Process-Control
|
d82ddab87dc6727a70ee38d53889aa8af87ade25
|
24bc1d9b72c0762bd92c215837347d6548099902
|
refs/heads/master
| 2022-07-12T02:53:52.208320 | 2022-04-05T15:23:48 | 2022-04-05T15:23:48 | 161,691,463 | 29 | 11 | null | 2022-06-21T21:39:15 | 2018-12-13T20:29:27 |
Python
|
UTF-8
|
Python
| false | false | 1,408 |
py
|
MAIN_PARAMS = {
"EPISODES": 20000,
"MEAN_EPISODE": 50,
"MAX_TIME": 200,
"RENDER": True,
"MAX_MEAN_REWARD": 200, # minimum reward before saving model
}
AGENT_PARAMS = {
"N_TANKS": 1,
"SS_POSITION": 0.5,
"VALVE_START_POSITION": 0.2,
"ACTION_DELAY": [5],
"INIT_ACTION": 0.3,
"VALVEPOS_UNCERTAINTY": 0,
"EPSILON_DECAY": [1],
"LEARNING_RATE": [0.0005],
"HIDDEN_LAYER_SIZE": [[5, 5]],
"BATCH_SIZE": 5,
"MEMORY_LENGTH": 10000,
"OBSERVATIONS": 4, # level, gradient, is_above 0.5, prevous valve position
"GAMMA": 0.9,
"EPSILON": [0],
"EPSILON_MIN": [0],
"BASE_LINE_LENGTH": 1,
"Z_VARIANCE": [0.05],
"SAVE_MODEL": [True],
"LOAD_MODEL": [False],
"TRAIN_MODEL": [True],
"LOAD_MODEL_NAME": [""],
"LOAD_MODEL_PATH": "Policy_Gradient/Tank_1/",
"SAVE_MODEL_PATH": "Policy_Gradient/Tank_1/",
}
# Model parameters Tank 1
TANK1_PARAMS = {
"height": 10,
"init_level": 0.5,
"width": 10,
"pipe_radius": 0.5,
"max_level": 0.75,
"min_level": 0.25,
}
TANK1_DIST = {
"add": True,
"pre_def_dist": False,
"nom_flow": 1, # 2.7503
"var_flow": 0.1,
"max_flow": 2,
"min_flow": 0.7,
"add_step": False,
"step_time": int(MAIN_PARAMS["MAX_TIME"] / 2),
"step_flow": 2,
"max_time": MAIN_PARAMS["MAX_TIME"],
}
TANK_PARAMS = [TANK1_PARAMS]
TANK_DIST = [TANK1_DIST]
|
[
"eskild.emedd33@gmail.com"
] |
eskild.emedd33@gmail.com
|
c3a893c3d848b53fed2af2a0af5ef2a746813b2d
|
352f7d1258e51d3b7e8cfcbb4b527c3e27a68fe5
|
/tests/test_img_server.py
|
b8eca0fb172da1de0c121455a4bcb1751b25020c
|
[] |
no_license
|
lidingke/fiberGeometry
|
67b53535ca1060af1ab29de915f1190258d7986e
|
1455fd815884a735d5b9e87aff07244ca9a95a23
|
refs/heads/master
| 2020-05-21T16:45:06.374649 | 2018-02-25T06:30:15 | 2018-02-25T06:30:15 | 62,809,512 | 1 | 1 | null | 2017-08-29T03:21:54 | 2016-07-07T13:37:45 |
C
|
UTF-8
|
Python
| false | false | 2,947 |
py
|
# coding:utf-8
from setting.config import SIMULATOR_IMG_SERVER_COFIG
from SDK.simulator.client import Client
from SDK.simulator.server import ImgServer, SeverMain, SharpSever
from threading import Thread
import multiprocessing
from tornado.ioloop import IOLoop
from functools import partial
from util.getimg import getImage
from tornado.iostream import StreamClosedError
import time
import logging
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
def test_sharpserver():
ss = SharpSever()
ss.getAll()
def test_imgserver():
u"""测试摄像头模拟器/图片服务器的性能
:return:
"""
host, port, method, path = SIMULATOR_IMG_SERVER_COFIG
port = 9885
# port = 9801
Thread(target = SeverMain, args=(host, port, method, path)).start()
# multiprocessing.Process(target=servermain).start()
# time.sleep(1)
img = getImage('IMG/midoc.BMP')
imgstr = img.tobytes()
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
assert imgstr != result
print len(result)
para = ('getImage', 'IMG/midoc.BMP')
IOLoop.current().run_sync(partial(Client(port=port).get_change,para))
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
assert imgstr == result
para = ('randomImg', 'IMG/G652/pk/')
IOLoop.current().run_sync(partial(Client(port=port).get_change, para))
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
assert imgstr != result
IOLoop.current().run_sync(Client(port=port).close_server)
def test_getimg_multi_connect():
u"""测试连接池取图片
:return:
"""
host, port, method, path = SIMULATOR_IMG_SERVER_COFIG
port = 9883
# port = 9801
img = getImage('IMG/midoc.BMP')
imgstr = img.tobytes()
# port = 9801
Thread(target = SeverMain, args=(host, port, method, path)).start()
# multiprocessing.Process(target=SeverMain, args=(port,)).start()
print 'start multi connect'
for x in range(0,100):
try:
# time.sleep(0.5)
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
except StreamClosedError:
logger.warning("Lost host at client %s")
return
except Exception as e:
print 'range time', x
raise e
if x%50 == 0:
print 'create times',x, time.time()
IOLoop.current().run_sync(Client(port=port).close_server)
# def test_imgserver():
# Thread(target = SeverMain).start()
# multiprocessing.Process(target=servermain).start()
# time.sleep(1)
if __name__ == "__main__":
port = 9880
para = ('randomImg', 'IMG/emptytuple/eptlight2')
IOLoop.current().run_sync(partial(Client(port=port).get_change, para))
|
[
"lidingke@hust.edu.cn"
] |
lidingke@hust.edu.cn
|
ccffdde7de02461543a3f4f909b19626b7520c9f
|
f516b7561b93f640bcb376766a7ecc3440dcbb99
|
/leetcode/easy/add-binary.py
|
a7a66ad52358184d587c15dba4b509ef2bcc902c
|
[
"Apache-2.0"
] |
permissive
|
vtemian/interviews-prep
|
c41e1399cdaac9653c76d09598612f7450e6d302
|
ddef96b5ecc699a590376a892a804c143fe18034
|
refs/heads/master
| 2020-04-30T15:44:42.116286 | 2019-09-10T19:41:41 | 2019-09-10T19:41:41 | 176,928,167 | 8 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 872 |
py
|
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
if len(b) > len(a):
a, b = b, a
a = a[::-1]
b = b[::-1]
count = 0
remainder = 0
result = ""
while count < len(b):
b_a = a[count]
b_b = b[count]
result += str((int(b_a) + int(b_b) + remainder) % 2)
remainder = (int(b_a) + int(b_b) + remainder) / 2
count += 1
while count < len(a):
b_a = a[count]
result += str((int(b_a) + remainder) % 2)
remainder = (int(b_a) + remainder) / 2
count += 1
if remainder:
result += str(remainder)
return result[::-1]
result = Solution().addBinary('1010', '1011')
print(result)
|
[
"vladtemian@gmail.com"
] |
vladtemian@gmail.com
|
3bc801af96cf998efd961d2ff892da8cd5f95e93
|
3de11c5630cad4ca816ad17dd2f6c743b8799108
|
/djangorestframework/tutorial/tutorial/settings.py
|
57a3ef605fb5ea039f858ff6b08cc8fa7ff71296
|
[] |
no_license
|
greenfrog82/TIL_Python
|
a6f03b0ae6f2260310faa5ef59d4bd01dcf6a1ed
|
015116c5ff4a14f531e3693f9cfd3a921a674b81
|
refs/heads/master
| 2022-12-09T22:34:49.485937 | 2021-05-11T10:59:41 | 2021-05-11T10:59:41 | 154,969,150 | 0 | 1 | null | 2022-12-08T01:20:11 | 2018-10-27T13:44:56 |
Python
|
UTF-8
|
Python
| false | false | 4,216 |
py
|
"""
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import datetime
from django.core.management.utils import get_random_secret_key
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%pt7&cwica7@md!culsrv)0u)v$p*)ivk2-w5&lgv^5&2q5h7%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
# 'allauth',
# 'allauth.account',
'rest_auth.registration',
'snippets.apps.SnippetsConfig',
# 'users.apps.UsersConfig',
# 'comment.apps.CommnetConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
# JWT_AUTH = {
# 'JWT_EXPIRATION_DELTA': datetime.timedelta(minutes=15),
# }
CUSTOM_USER_CONFIG = {
'PAGINATION_INFO': {
'PAGE_SIZE': 5,
'MAX_PAGE_SIZE': 10000
}
}
# For django-rest-auth
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 1
ACCOUNT_EMAIL_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_VERIFICATION = 'optional'
# For Hash ID
HASHID_FIELD_SALT = get_random_secret_key()
|
[
"greenfrog82@naver.com"
] |
greenfrog82@naver.com
|
01fa61b61414d147b0eea7f2609800fd9d303acb
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/codeabbey/_Python_Problem_Solving-master/Greatest Common Divisor.py
|
a5d6275ab69ff29ca8c3202f4e265872f942f71d
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 363 |
py
|
a = int(input())
string = ''
for i in range(a):
temp1,temp2 = num1, num2 = [int(ele) for ele in input().split()]
while num1 != num2:
if num1 > num2:
num1 = num1 - num2
else:
num2 = num2 - num1
lcm = temp1 * temp2 / num1
string += '('+str(num1)+' '+str(int(lcm))+')'
string += ' '
print(string)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
b85d7944f883d5fc1dae7e069f5d5cb234104815
|
0df124c41cbaa94750df79fc70bf911d298610a7
|
/train_kFold.py
|
af272569fc2d9f5c6934814ab1624fffa7f18f92
|
[] |
no_license
|
bcaitech1/p2-klue-HYLee1008
|
7093a9245fe3ad9bf29251a4c12f12a801b9f4f5
|
c22d1a1ba8e3aa89198d786845a0ad6efc69e27c
|
refs/heads/main
| 2023-04-10T11:18:11.500052 | 2021-04-22T11:23:23 | 2021-04-22T11:23:23 | 360,466,733 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,318 |
py
|
import argparse
import pickle as pickle
import os
import pandas as pd
import numpy as np
import torch
import random
import transformers
import glob
import time
import json
import wandb
from sklearn.metrics import accuracy_score
from transformers import AutoTokenizer, BertForSequenceClassification, Trainer, TrainingArguments, BertConfig, BertTokenizerFast, BertModel, XLMRobertaTokenizer
from pathlib import Path
from sklearn.model_selection import KFold
from load_data import *
from model import BERTClassifier, XLMRoBERTAClassifier, BERTLarge, KoElectraClassifier, mbart
from loss import LabelSmoothingLoss
from torch.utils.tensorboard import SummaryWriter
def increment_path(path, exist_ok=False):
""" Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc.
Args:
path (str or pathlib.Path): f"{model_dir}/{args.name}".
exist_ok (bool): whether increment path (increment if False).
"""
path = Path(path)
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}*")
matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m]
n = max(i) + 1 if i else 2
return f"{path}{n}"
# seed 고정
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def train(args):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# model save path
save_dir = increment_path(os.path.join(args.model_dir, args.bert_model))
os.makedirs(save_dir, exist_ok=True)
# save args on .json file
with open(os.path.join(save_dir, 'config.json'), 'w', encoding='utf-8') as f:
json.dump(vars(args), f, ensure_ascii=False, indent=4)
# set random seed
seed_everything(args.seed)
# load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(args.bert_model)
# load dataset
train_dataset = load_data("/opt/ml/input/data/train/train.tsv")
train_label = train_dataset['label'].values
# tokenizing dataset
tokenized_train = tokenized_dataset(train_dataset, tokenizer)
# make dataset for pytorch.
RE_train_dataset = RE_Dataset(tokenized_train, train_label)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
kfold = KFold(n_splits=5)
for fold, (train_index, valid_index) in enumerate(kfold.split(train_dataset), 1):
train_sub = torch.utils.data.Subset(RE_train_dataset, train_index)
valid_sub = torch.utils.data.Subset(RE_train_dataset, valid_index)
train_loader = torch.utils.data.DataLoader(
train_sub,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers
)
valid_loader = torch.utils.data.DataLoader(
valid_sub,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers
)
# load model
model = XLMRoBERTAClassifier(args.bert_model).to(device)
model = mbart(args.bert_model).to(device)
# load optimizer & criterion
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
criterion = LabelSmoothingLoss(smoothing=args.smoothing)
best_acc, last_epoch = 0, 0
for epoch in range(1, args.epochs + 1):
model.train()
loss_value = 0
start_time = time.time()
for batch_id, item in enumerate(train_loader):
input_ids = item['input_ids'].to(device)
# token_type_ids = item['token_type_ids'].to(device)
attention_mask = item['attention_mask'].to(device)
labels = item['labels'].to(device)
optimizer.zero_grad()
output = model(input_ids, attention_mask)
loss = criterion(output, labels)
loss_value += loss.item()
loss.backward()
optimizer.step()
# scheduler.step()
train_loss = loss_value / (batch_id + 1)
# evaluate model on dev set
with torch.no_grad():
model.eval()
acc_vals = 0
for batch_id, item in enumerate(valid_loader):
input_ids = item['input_ids'].to(device)
# token_type_ids = item['token_type_ids'].to(device)
attention_mask = item['attention_mask'].to(device)
labels = item['labels'].to(device)
output = model(input_ids, attention_mask)
pred = torch.argmax(output, dim=-1)
acc_item = (labels == pred).sum().item()
acc_vals += acc_item
val_acc = acc_vals / len(valid_sub)
time_taken = time.time() - start_time
# metric = {'val_acc': val_acc}
# wandb.log(metric)
print("fold: {} epoch: {}, loss: {}, val_acc: {}, time taken: {}".format(fold, epoch, train_loss, val_acc, time_taken))
if best_acc < val_acc:
print(f'best model! saved at fold {fold} epoch {epoch}')
if os.path.isfile(f"{save_dir}/{fold}_best_{last_epoch}.pth"):
os.remove(f"{save_dir}/{fold}_best_{last_epoch}.pth")
torch.save(model.state_dict(), f"{save_dir}/{fold}_best_{epoch}.pth")
best_acc = val_acc
last_epoch = epoch
# save model
torch.save(model.state_dict(), f"{save_dir}/{fold}_last_{epoch}.pth")
def main(args):
train(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data and model checkpoints directories
parser.add_argument('--seed', type=int, default=1024, help='random seed (default: 1024)')
parser.add_argument('--epochs', type=int, default=10, help='number of epochs for train (deafult: 10)')
parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training (deafult: 16)')
parser.add_argument('--num_workers', type=int, default=4, help='number of workers for dataloader (default: 4)')
parser.add_argument('--smoothing', type=float, default=0.2, help='label smoothing facotr for label smoothing loss (default: 0.2)')
parser.add_argument('--learning_rate', type=float, default=1e-5, help='learning rate for training (default: 1e-5)')
parser.add_argument('--weight_decay', type=float, default=0.01, help='weight decay (default: 0.01)')
parser.add_argument('--model_dir', type=str, default='./results/kfold', help='directory where model would be saved (default: ./results)')
# xlm-roberta-large
# joeddav/xlm-roberta-large-xnli
# monologg/koelectra-base-v3-discriminator
# facebook/mbart-large-cc25
parser.add_argument('--bert_model', type=str, default='xlm-roberta-large', help='backbone bert model for training (default: xlm-roberta-large)')
args = parser.parse_args()
main(args)
|
[
"discone1008@gmail.com"
] |
discone1008@gmail.com
|
b8e2120fcd66ff56ce5658f05e466269e248c642
|
99459cd11263f721155316164afddd1accf6419f
|
/stack.py
|
7dd3b5ad53c0b158b87031a28ec838fc68eca0de
|
[] |
no_license
|
dvmazuera/cs-data-structures-assessment
|
5dc767241bb8a1821726c5b13a96140a59d0babf
|
21082045955fa23cf26dd9dd52fdf9c22c0db31b
|
refs/heads/master
| 2021-01-22T11:29:32.020412 | 2017-05-29T03:55:31 | 2017-05-29T03:55:31 | 92,704,751 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,950 |
py
|
class StackEmptyError(IndexError):
"""Attempt to pop an empty stack."""
class Stack(object):
"""LIFO stack.
Implemented using a Python list; since stacks just need
to pop and push, a list is a good implementation, as
these are O(1) for native Python lists. However, in cases
where performance really matters, it might be best to
use a Python list directly, as it avoids the overhead
of a custom class.
Or, for even better performance (& typically smaller
memory footprint), you can use the `collections.deque`
object, which can act like a stack.
(We could also write our own LinkedList class for a
stack, where we push things onto the head and pop things
off the head (effectively reversing it), but that would be less
efficient than using a built-in Python list or a
`collections.deque` object)
"""
def __init__(self):
self._list = []
def __repr__(self):
if not self._list:
return "<Stack (empty)>"
else:
return "<Stack tail=%s length=%d>" % (
self._list[-1], len(self._list))
def push(self, item):
"""Add item to end of stack."""
self._list.append(item)
def pop(self):
"""Remove item from end of stack and return it."""
if not self._list:
raise StackEmptyError()
return self._list.pop()
def __iter__(self):
"""Allow iteration over list.
__iter__ is a special method that, when defined,
allows you to loop over a list, so you can say things
like "for item in my_stack", and it will pop
successive items off.
"""
while True:
try:
yield self.pop()
except StackEmptyError:
raise StopIteration
def length(self):
"""Return length of stack::
>>> s = Stack()
>>> s.length()
0
>>> s.push("dog")
>>> s.push("cat")
>>> s.push("fish")
>>> s.length()
3
"""
return len(self._list)
def empty(self):
"""Empty stack::
>>> s = Stack()
>>> s.push("dog")
>>> s.push("cat")
>>> s.push("fish")
>>> s.length()
3
>>> s.empty()
>>> s.length()
0
"""
self._list = []
def is_empty(self):
"""Is stack empty?
>>> s = Stack()
>>> s.is_empty()
True
>>> s.push("dog")
>>> s.push("cat")
>>> s.push("fish")
>>> s.is_empty()
False
"""
return not bool(self._list)
if __name__ == "__main__":
import doctest
print
result = doctest.testmod()
if not result.failed:
print "ALL TESTS PASSED. GOOD WORK!"
print
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
0a81afd1bccfde119b3571c2a5ba4395ebb7b44f
|
e5cd01fd620e8e746a20b883de7ac32bec4feb5c
|
/Ejercicios python/PE4/PE4E3.py
|
53f599b8b515986576a6731ce8932f4c3575fac2
|
[] |
no_license
|
eazapata/python
|
0f6a422032d8fb70d26f1055dc97eed83fcdc572
|
559aa4151093a120527c459a406abd8f2ff6a7d8
|
refs/heads/master
| 2020-08-27T09:19:29.395109 | 2019-11-23T20:11:14 | 2019-11-23T20:11:14 | 217,314,818 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 667 |
py
|
#PE4E3 Eduardo Antonio Zapata Valero
#Pida al usuario si quiere calcular el área de un triángulo o un cuadrado,
#y pida los datos según que caso y muestre el resultado.
fig=(input("Quieres calcular el área de un triángulo (t) o de un cuadrado (c) "))
if (fig=="t"):
print ("Ha elegido triángulo, introduce base y altura del triángulo\n")
b=float(input())
h=float(input())
print("El área del triángulo es ",(b*h)/2)
elif(fig=="c"):
l=float(input("Has elegido cuadrado, introduce el valor del lado\n"))
print("El área del cudrado es ",(l*l))
else:
print("No se reconoce la figura que de la que quieres sacar el área")
|
[
"you@example.com"
] |
you@example.com
|
88f3a978e1ccdf33914b845f1988779d03433a82
|
3a2af7b4b801d9ba8d78713dcd1ed57ee35c0992
|
/zerver/migrations/0051_realmalias_add_allow_subdomains.py
|
dec9cce79560fb47f11fae6a6962e964cc2a4a00
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
timabbott/zulip
|
2b69bd3bb63539adbfc4c732a3ff9d52657f40ac
|
42f239915526180a1a0cd6c3761c0efcd13ffe6f
|
refs/heads/master
| 2023-08-30T21:45:39.197724 | 2020-02-13T23:09:22 | 2020-06-25T21:46:33 | 43,171,533 | 6 | 9 |
Apache-2.0
| 2020-02-24T20:12:52 | 2015-09-25T19:34:16 |
Python
|
UTF-8
|
Python
| false | false | 541 |
py
|
# Generated by Django 1.10.5 on 2017-01-25 20:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0050_userprofile_avatar_version'),
]
operations = [
migrations.AddField(
model_name='realmalias',
name='allow_subdomains',
field=models.BooleanField(default=False),
),
migrations.AlterUniqueTogether(
name='realmalias',
unique_together={('realm', 'domain')},
),
]
|
[
"tabbott@zulipchat.com"
] |
tabbott@zulipchat.com
|
1176757494ee948beb10dc386770bfbd2a823956
|
a29310948867f5f07109fcd225a84282ad7eea16
|
/design_models/template_method.py
|
c4f800913310ae0c850b9c6b745efc7ed06b179d
|
[] |
no_license
|
likeweilikewei/Python-study-demo
|
09b266c0756b6e340e8b8e3153a7e497be8ee1a9
|
7dd4bc851273a5815d8980f9857828abfa5364a7
|
refs/heads/master
| 2020-06-26T21:17:27.095532 | 2019-07-31T02:17:43 | 2019-07-31T02:17:43 | 199,760,324 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,374 |
py
|
#!/usr/bin/python
# coding:utf8
'''
Template Method
模板方法模式:
应用特性:重复做相同逻辑的事情,但是具体细节不同的场景
结构特性:相同逻辑抽取至父类,具体细节留置子类。可以说是对逻辑的抽象
'''
ingredients = "spam eggs apple"
line = '-' * 10
# Skeletons
def iter_elements(getter, action):
"""Template skeleton that iterates items"""
for element in getter():
action(element)
print(line)
def rev_elements(getter, action):
"""Template skeleton that iterates items in reverse order"""
for element in getter()[::-1]:
action(element)
print(line)
# Getters
def get_list():
return ingredients.split()
def get_lists():
return [list(x) for x in ingredients.split()]
# Actions
def print_item(item):
print(item)
def reverse_item(item):
print(item[::-1])
# Makes templates
def make_template(skeleton, getter, action):
"""Instantiate a template method with getter and action"""
def template():
skeleton(getter, action)
return template
# Create our template functions
templates = [make_template(s, g, a)
for g in (get_list, get_lists)
for a in (print_item, reverse_item)
for s in (iter_elements, rev_elements)]
# Execute them
for template in templates:
template()
|
[
"1293120583@qq,com"
] |
1293120583@qq,com
|
3d58de779e6e9ce278cac6d0c11ec7646a8fb43e
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/maximum-average-subarray-i.py
|
a92f4789fc9c877e00d034b9d34aa0c4a577f269
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 |
C++
|
UTF-8
|
Python
| false | false | 398 |
py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
result = total = sum(nums[:k])
for i in xrange(k, len(nums)):
total += nums[i] - nums[i-k]
result = max(result, total)
return float(result) / k
|
[
"noreply@github.com"
] |
black-shadows.noreply@github.com
|
308bff52ce577ba49c9ba46d0fd7277f04669f7f
|
0e94b21a64e01b992cdc0fff274af8d77b2ae430
|
/spider/004_kekeenglish_daysentence.py
|
35df50ab90f89a35b0fc40370d4b4fef02e20b22
|
[] |
no_license
|
yangnaGitHub/LearningProcess
|
1aed2da306fd98f027dcca61309082f42b860975
|
250a8b791f7deda1e716f361a2f847f4d12846d3
|
refs/heads/master
| 2020-04-15T16:49:38.053846 | 2019-09-05T05:52:04 | 2019-09-05T05:52:04 | 164,852,337 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,383 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 13:18:16 2018
@author: Administrator
"""
import re
from lxml import etree
import urllib.request
import xlwt
response=urllib.request.urlopen('http://www.kekenet.com/kouyu/primary/chuji/')
html = response.read().decode("utf-8")
tr = etree.HTML(html)
#//div[@class="tb-btn-wait"]
#//ul[contains(@class,"J_TSaleProp")]
#//div[contains(@class,"tb-btn-buy")]/a[@id="J_LinkBuy"]
#contents = tr.xpath('//ul[@id="menu-list"]/li')
contents = tr.xpath('//div[@class="page th"]/a')
total_pages = 0
for content in contents:
total_pages = max(total_pages, int(content.text))
book = xlwt.Workbook()
sheet = book.add_sheet('translation')
row = 0
contentTexts = {}
errorRecords = {}
for page in range(total_pages, 0, -1):
if total_pages != page:
response=urllib.request.urlopen('http://www.kekenet.com/kouyu/primary/chuji/List_%d.shtml' % page)
html = response.read().decode("utf-8")
tr = etree.HTML(html)
allTests = tr.xpath("//text()")#所有的文本
contents = tr.xpath('//ul[@id="menu-list"]/li/h2/a')
prepareTexts = []
for content in contents:
prepareTexts.append(content.text)
for index, allTest in enumerate(allTests):
if allTest in prepareTexts:
needText = allTests[index + 3].replace('\n', '').replace(',', ',').replace('。', '.')
if re.findall('^[a-zA-Z]', needText):
pass
else:
needText = allTests[index + 2].replace('\n', '').replace(',', ',').replace('。', '.')
try:
slicePos = needText.find(re.findall('[\u2E80-\u9FFF]+', needText)[0])
contentTexts[needText[:slicePos].replace('\n', '')] = needText[slicePos:].replace('\n', '').replace(',', ',').replace('。', '.')
firstStr = needText[:slicePos].replace('\n', '')
secondStr = needText[slicePos:].replace('\n', '').replace(',', ',').replace('。', '.')
except IndexError:
print('find error (%d %d %d: %s)' % (page, index, row+1, allTest))
errorRecords[str(page) + str(index) + str(row+1)] = allTests
firstStr = ''
secondStr = ''
sheet.write(row, 0, firstStr)
sheet.write(row, 1, secondStr)
row += 1
book.save('translation.xlsx')
|
[
"ityangna0402@163.com"
] |
ityangna0402@163.com
|
3dbf3e87b4b004b83e913dd989ed2ab900c5eb16
|
b9e9c89567894fd7e5ddfd27fe9068a074a92df7
|
/pyramid_signup/tests/test_init.py
|
d1de398dc5429102f7465cb8ee45667f5212c697
|
[] |
no_license
|
AnneGilles/pyramid_signup
|
8aeea113176dd64a326caa5f7704026e0538c94a
|
0622d951e686f0926291d98559a6b4afa2c81241
|
refs/heads/master
| 2021-01-18T17:48:48.260300 | 2011-12-09T04:56:44 | 2011-12-09T04:56:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,435 |
py
|
from pyramid import testing
from pyramid_signup.models import User
from pyramid_signup.tests import UnitTestBase
from mock import patch
from mock import Mock
class TestInitCase(UnitTestBase):
def test_root_factory(self):
from pyramid_signup import RootFactory
from pyramid.security import Everyone
from pyramid.security import Authenticated
from pyramid.security import Allow
from pyramid.security import ALL_PERMISSIONS
root_factory = RootFactory(testing.DummyRequest())
assert len(root_factory.__acl__) == 2
for ace in root_factory.__acl__:
assert ace[0] == Allow
if ace[1] == 'group:admin':
assert ace[2] == ALL_PERMISSIONS
elif ace[1] == Authenticated:
assert ace[2] == 'view'
def test_request_factory(self):
from pyramid_signup import SignUpRequestFactory
user1 = User(username='sontek', first_name='john')
self.session.add(user1)
self.session.flush()
with patch('pyramid_signup.unauthenticated_userid') as unauth:
unauth.return_value = 1
request = SignUpRequestFactory({})
request.registry = Mock()
getUtility = Mock()
getUtility.return_value = self.session
request.registry.getUtility = getUtility
user = request.user
assert user == user1
|
[
"sontek@gmail.com"
] |
sontek@gmail.com
|
36d1cdb0cf14edfe05793a672c0556d8c5875baa
|
d1e4f29e583ee964d63bc48554eaa73d67d58eb2
|
/zerver/migrations/0222_userprofile_fluid_layout_width.py
|
3b5c232bee7088bb888cc76437ff9bc3df92ee7b
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
hygolei/zulip
|
299f636f9238f50b0d2746f1c371748f182f1f4e
|
39fe66ab0824bc439929debeb9883c3046c6ed70
|
refs/heads/master
| 2023-07-11T22:50:27.434398 | 2021-08-09T10:07:35 | 2021-08-09T10:07:35 | 375,401,165 | 1 | 1 |
Apache-2.0
| 2021-08-09T10:07:36 | 2021-06-09T15:20:09 |
Python
|
UTF-8
|
Python
| false | false | 428 |
py
|
# Generated by Django 1.11.20 on 2019-04-15 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0221_subscription_notifications_data_migration"),
]
operations = [
migrations.AddField(
model_name="userprofile",
name="fluid_layout_width",
field=models.BooleanField(default=False),
),
]
|
[
"tabbott@zulipchat.com"
] |
tabbott@zulipchat.com
|
590d2207a922188f883dab5476511635e22f0ab1
|
408f8c561a695ac20b792ba0c4a230c154dad347
|
/scripts/slurm.py
|
1526201ab8cdf66bbed803e8fe3ad1e4f9c182d0
|
[] |
no_license
|
andnp/acceleration-v2
|
a407888c74a247e6d441259d50d77cf6194f728b
|
52b8a42c3e315ddbb4549a3a941afda81e92be9b
|
refs/heads/master
| 2022-11-26T05:42:17.680125 | 2020-08-02T23:25:01 | 2020-08-02T23:25:01 | 204,991,770 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,639 |
py
|
import time
import sys
import os
sys.path.append(os.getcwd())
from src.utils.model import loadExperiment
from PyExpUtils.runner import SlurmArgs
from PyExpUtils.results.paths import listResultsPaths
from PyExpUtils.utils.generator import group
from PyExpUtils.runner.Slurm import schedule, slurmOptionsFromFile
if len(sys.argv) < 4:
print('Please run again using')
print('python -m scripts.scriptName [src/entry.py] [path/to/slurm-def] [base_path] [runs] [paths/to/descriptions]...')
exit(0)
args = SlurmArgs.SlurmArgsModel({
'experiment_paths': sys.argv[5:],
'base_path': sys.argv[3],
'runs': 1,
'slurm_path': sys.argv[2],
'executable': "python " + sys.argv[1] + " " + sys.argv[4],
})
def generateMissing(paths):
for i, p in enumerate(paths):
summary_path = p + '/errors_summary.npy'
if not os.path.exists(summary_path):
yield i
def printProgress(size, it):
for i, _ in enumerate(it):
print(f'{i + 1}/{size}', end='\r')
if i - 1 == size:
print()
yield _
for path in args.experiment_paths:
print(path)
exp = loadExperiment(path)
slurm = slurmOptionsFromFile(args.slurm_path)
size = exp.permutations() * args.runs
paths = listResultsPaths(exp, args.runs)
paths = printProgress(size, paths)
indices = generateMissing(paths)
groupSize = slurm.tasks * slurm.tasksPerNode
for g in group(indices, groupSize):
l = list(g)
print("scheduling:", path, l)
slurm.tasks = min([slurm.tasks, len(l)])
schedule(slurm, args.executable + ' ' + path, l)
time.sleep(2)
|
[
"andnpatterson@gmail.com"
] |
andnpatterson@gmail.com
|
c9b9126eb7cfe8ea67cc1dd7bf1da71936a45f80
|
5f4adc8c51f9b7dd67a47f37eaf31e8ddb066f71
|
/core/cp_plugins/systray.py
|
9fc45cfcbb597ecc1d8dd71abfc8c312a66c380d
|
[
"Apache-2.0"
] |
permissive
|
cryorooster/watcher
|
1a4f186cb9d0a0c84f80e30073b313a0bd049995
|
0dd25241a01d7dcb9ffcd312cc2472b2c9cb2983
|
refs/heads/master
| 2021-01-23T04:45:09.272825 | 2017-02-05T23:36:39 | 2017-02-05T23:36:49 | 80,380,818 | 0 | 0 | null | 2017-01-29T23:39:16 | 2017-01-29T23:39:15 | null |
UTF-8
|
Python
| false | false | 2,563 |
py
|
import logging
import sys
import webbrowser
import cherrypy
import core
from cherrypy.process import plugins
from infi.systray import SysTrayIcon
logging = logging.getLogger(__name__)
class SysTrayPlugin(plugins.SimplePlugin):
'''
CherryPy plugin that creates a system tray icon for Windows.
Because SysTrayIcon always fires off on_quit, we can't have on_quit
execute cherrypy.engine.exit() if the exit command is what triggered
SysTrayIcon to close. So conditions are set to only fire on_quit when
the quit_method == 'menu'.
This way, when the menu option is called, it destroys SysTrayIcon then
closes cherrypy. Cherrypy will try to close SysTrayIcon by calling
stop(), so stop() gets reassigned to None.
If the app is closed by cherrypy (whether catching a kb interrupt or the GUI
shutdown button), cherrypy stops the plugin by calling stop(). Stop()
reassigns SysTrayIcon._on_quit to None and calls SysTrayIcon.shutdown().
SysTrayIcon is then destroyed (twice for reasons I can't figure out),
then cherrypy finishes up the engine.stop() and engine.exit().
The chain is as such:
Trigger == systray menu 'Quit':
SysTrayIcon._destroy() >
SysTrayIcon._on_quit() > set SysTrayPlugin.quit_method = 'menu'
cherrypy.engine.exit() >
SysTrayPlugin.stop() > does nothing
sys.exit()
Trigger == KBInterrupt or GUI Shutdown:
cherrypy.engine.stop() >
SysTrayPlugin.stop() > disable SysTrayIcon._on_quit()
SysTrayIcon.shutdown() >
SysTrayIcon._destroy() >
SysTrayIcon._destroy() >
cherrypy.engine.exit() >
sys.exit()
'''
def __init__(self, bus):
plugins.SimplePlugin.__init__(self, bus)
menu_options = (('Open Browser', None, self.open),)
self.systray = SysTrayIcon('core/favicon.ico', 'Watcher',
menu_options, on_quit=self.on_quit)
self.quit_method = None
return
def start(self):
self.systray.start()
return
def stop(self):
if self.quit_method == 'menu':
return
else:
self.systray._on_quit = None
self.systray.shutdown()
return
def on_quit(self, systray):
self.quit_method = 'menu'
cherrypy.engine.exit()
sys.exit(0)
# sys tray functions:
def open(self, systray):
webbrowser.open('http://{}:{}{}'.format(
core.SERVER_ADDRESS, core.SERVER_PORT, core.URL_BASE))
return
|
[
"nosmokingbandit@gmail.com"
] |
nosmokingbandit@gmail.com
|
f62998c30aabd3f2ae38cf6aa13b33f4456ef7e1
|
d0fe389bae13abfc9d666dc880c50b894b7c212d
|
/software/tool/test_pipeline/move_file.py
|
5fd75b3eebadef6c39cadc438cc9d2d6974eda57
|
[] |
no_license
|
ab3nd/TinyRobo
|
965c060e95ef6446a609b4954dda042d1ff16311
|
b86d2f716fea4bcc420f81e1903484554fb33b51
|
refs/heads/master
| 2020-04-12T08:49:45.086755 | 2019-07-11T01:59:05 | 2019-07-11T01:59:05 | 39,583,602 | 7 | 2 | null | 2018-07-10T20:05:36 | 2015-07-23T18:17:14 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 819 |
py
|
#!/usr/bin/python
#Get a file that starts with "recognizer_test" in the ~/.ros/ directory, and move it to a new directory
import json
import rosbag
import rospy
import os
import fnmatch
import yaml
#From https://stackoverflow.com/questions/1724693/find-a-file-in-python
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
path = "/home/ams/.ros"
pattern = "recognizer_test*"
#Get the files
files = find(pattern, path)
#Because the file names contains dates, this should more or less get the oldest one
oldName = sorted(files)[0]
#Move it to an appropriately named directory
os.renames(oldName, "test_{0}/{0}_{0}.bag".format('foo'))
|
[
"orphrey@gmail.com"
] |
orphrey@gmail.com
|
c55812681bffcd67f705310e9d3133f402e043f6
|
9d8a3a2c0a15dbf1f90d801e6d705d1212cf09af
|
/services/web__morningstaronline_co_uk.py
|
ac5fc53d8d59be5e0045ca7297f649f07c83b74c
|
[] |
no_license
|
rudolphos/NewsGrabber
|
f9bddc9a9b3a9e02f716133fd746f48cee635b36
|
86354fb769b2710ac7cdd5bd8795e43158b70ad2
|
refs/heads/master
| 2021-01-12T12:07:55.335079 | 2016-10-09T22:39:17 | 2016-10-09T22:39:17 | 72,316,773 | 0 | 0 | null | 2016-10-30T00:35:08 | 2016-10-30T00:35:08 | null |
UTF-8
|
Python
| false | false | 417 |
py
|
refresh = 5
version = 20160312.01
urls = ['https://www.morningstaronline.co.uk/britain',
'https://www.morningstaronline.co.uk/world',
'https://www.morningstaronline.co.uk/editorial',
'https://www.morningstaronline.co.uk/features',
'https://www.morningstaronline.co.uk/sport',
'https://www.morningstaronline.co.uk/arts']
regex = [r'^https?:\/\/[^\/]*morningstaronline\.co\.uk']
videoregex = []
liveregex = []
|
[
"Arkiver@hotmail.com"
] |
Arkiver@hotmail.com
|
207bee7e203e906fc119bb7df61d83adcdec1d35
|
d49f28ea7867cf9ce9512c0521b136934e97b7d2
|
/tests/backends/base/test_client.py
|
4573bbe97bfb174d2998b800e8ce5e119a7d4da8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
tamirverthim/django
|
cdbc198a055deeb526caff6b18ae874445f217c5
|
666b7048a0dc6b067c1e3f58653f3c7ca00371a2
|
refs/heads/master
| 2023-04-14T00:51:11.507226 | 2020-12-07T12:19:20 | 2020-12-07T12:19:20 | 319,310,225 | 0 | 0 |
BSD-3-Clause
| 2023-04-03T23:53:00 | 2020-12-07T12:17:41 |
Python
|
UTF-8
|
Python
| false | false | 605 |
py
|
from django.db import connection
from django.db.backends.base.client import BaseDatabaseClient
from django.test import SimpleTestCase
class SimpleDatabaseClientTests(SimpleTestCase):
def setUp(self):
self.client = BaseDatabaseClient(connection=connection)
def test_settings_to_cmd_args_env(self):
msg = (
'subclasses of BaseDatabaseClient must provide a '
'settings_to_cmd_args_env() method or override a runshell().'
)
with self.assertRaisesMessage(NotImplementedError, msg):
self.client.settings_to_cmd_args_env(None, None)
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
760d04f4f37ec49446c5810324797d3ef73de59c
|
c947a71a16ed180c920d4b362347f980d93bd2fe
|
/src/Classes/MSDS400/Module 3/workout.py
|
c7f40dafdf59f5c1f52238d5010dc1fa5ddcbc10
|
[
"MIT"
] |
permissive
|
bmoretz/Python-Playground
|
b69cac015e95d97f46ebd678c4493a44befb556f
|
a367ec7659b85c24363c21b5c0ac25db08ffa1f6
|
refs/heads/master
| 2021-05-13T23:35:31.986884 | 2019-11-23T19:07:58 | 2019-11-23T19:07:58 | 116,520,816 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,542 |
py
|
# As part of a weight reduction program, a man designs a monthly exercise program consisting of bicycling, jogging, and swimming.
# He would like to
# exercise at most 28 hours,
# devote at most 6 hours to swimming,
# and jog for no more than the total number of hours bicycling and swimming.
# The calories burned by this person per hour by bicycling, jogging, and swimming are 200, 427, and 283, respectively.
# How many hours should be allotted to each activity to maximize the number of calories burned? What is the maximum number of calories he will burn?
# (Hint: Write the constraint involving jogging in the form less than or equals 0.)
# Let x 1 be the number of hours spent bicycling,
# let x 2 be the number of hours spent jogging,
# and let x 3 be the number of hours spent swimming.
#
# What is the objective function?
from pulp import *
workout = LpProblem( "Workout Problem", LpMaximize )
x1 = LpVariable( "x1", 0 ) # Bicycling
x2 = LpVariable( "x2", 0 ) # Jogging
x3 = LpVariable( "x3", 0 ) # Swimming
w = LpVariable( "w" )
workout += 200*x1 + 427*x2 + 283*x3
# Constraints
workout += x1 + x2 + x3 <= 28 # no more than total hours
workout += x3 <= 6 # at most hours swimming
workout += x2 <= x1 + x3 # jog no more than Bicycling + Swimming
workout.solve()
workout.LpStatus[ workout.status ]
for variable in workout.variables():
print("{0} = {1}".format( variable.name, variable.varValue ))
print( 'Optimal Sln: {0}'.format(pulp.value( workout.objective )))
|
[
"bmoretz@ionicsolutions.net"
] |
bmoretz@ionicsolutions.net
|
f0365d989dd7c876fa5c7fca77f76477b90906d6
|
44baa6621306c6b9810db48b3c1479cb8db294b3
|
/test/test_summaries.py
|
890a49aaf4ebb8b1bd8020b972c18679946c46be
|
[
"Apache-2.0"
] |
permissive
|
codeninja/tensorforce
|
ecc216e2970194d086209fb726fc64b4b9cd8e93
|
212b115d10a21b8241e1d9df56c4851ffd370f34
|
refs/heads/master
| 2020-08-13T08:16:11.046478 | 2019-10-18T17:36:03 | 2019-10-18T17:36:03 | 214,937,969 | 2 | 0 |
Apache-2.0
| 2019-10-18T17:36:04 | 2019-10-14T03:15:34 |
Python
|
UTF-8
|
Python
| false | false | 2,058 |
py
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import unittest
from test.unittest_base import UnittestBase
class TestSummaries(UnittestBase, unittest.TestCase):
exclude_bounded_action = True # TODO: shouldn't be necessary!
require_observe = True
directory = 'test-summaries'
def test_summaries(self):
# FEATURES.MD
self.start_tests()
# 'dropout', 'kl-divergence'
reward_estimation = dict(horizon=2, estimate_horizon='late')
baseline_policy = dict(network=dict(type='auto', size=8, internal_rnn=1))
baseline_objective = 'policy_gradient'
baseline_optimizer = 'adam'
self.unittest(
summarizer=dict(directory=self.__class__.directory, labels='all', frequency=2),
reward_estimation=reward_estimation, baseline_policy=baseline_policy,
baseline_objective=baseline_objective, baseline_optimizer=baseline_optimizer
)
for directory in os.listdir(path=self.__class__.directory):
directory = os.path.join(self.__class__.directory, directory)
for filename in os.listdir(path=directory):
os.remove(path=os.path.join(directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=directory)
os.rmdir(path=self.__class__.directory)
self.finished_test()
|
[
"alexkuhnle@t-online.de"
] |
alexkuhnle@t-online.de
|
570d5e5d5fbd8600a45c78d01b6b02a8b09ce153
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/set_database_user_privilege_request.py
|
150b872cab2546ae4611dfa32d9ac8d91350c989
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 4,906 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SetDatabaseUserPrivilegeRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'x_language': 'str',
'body': 'SetDatabaseUserPrivilegeReqV3'
}
attribute_map = {
'instance_id': 'instance_id',
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, instance_id=None, x_language=None, body=None):
"""SetDatabaseUserPrivilegeRequest
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param x_language: 语言
:type x_language: str
:param body: Body of the SetDatabaseUserPrivilegeRequest
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._instance_id = None
self._x_language = None
self._body = None
self.discriminator = None
self.instance_id = instance_id
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:return: The instance_id of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:param instance_id: The instance_id of this SetDatabaseUserPrivilegeRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def x_language(self):
"""Gets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:return: The x_language of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:param x_language: The x_language of this SetDatabaseUserPrivilegeRequest.
:type x_language: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this SetDatabaseUserPrivilegeRequest.
:return: The body of this SetDatabaseUserPrivilegeRequest.
:rtype: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SetDatabaseUserPrivilegeRequest.
:param body: The body of this SetDatabaseUserPrivilegeRequest.
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SetDatabaseUserPrivilegeRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
8cfd3c66b9a03394e87c6cbbac0e72ae02d96b6b
|
77ae7c76d36009daa01b2317439c1f975f7932b2
|
/exercicios/ex115/arquivo.py
|
dbcbd133583ca6ae2edba87857cfb65ef4e83003
|
[] |
no_license
|
MatheusOldAccount/Exerc-cios-de-Python-do-Curso-em-Video
|
5f26b5a2867fa1a2e36b486a809dfbe8b107b8c2
|
5696c49d3caf5cae817217a2da0598d1cf794f5b
|
refs/heads/master
| 2022-03-22T10:49:33.666660 | 2019-11-25T21:24:43 | 2019-11-25T21:24:43 | 224,052,682 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 639 |
py
|
def verPessoas():
print('-' * 30)
arq = open('lista.txt', 'r')
print(arq.read())
arq.close()
def adicionarPessoas():
print('-' * 30)
arq = open('lista.txt', 'a')
nome = str(input('Nome: ')).strip().capitalize()
válido = False
while True:
try:
idade = int(input('Idade: '))
except:
print('\033[31mERRO: por favor, digite um número inteiro válido.\033[m')
else:
print(f'Novo registro de {nome} adicionado')
arq.write(f'\n{nome:<30}{idade} anos')
válido = True
if válido:
break
arq.close()
|
[
"matheustavares1165@gmail.com"
] |
matheustavares1165@gmail.com
|
a1900950b36a1a0eeada9e202f153c8985039b65
|
e342abb1306e4b083f235a2992ffb863c96c9a86
|
/examples/user/user_playlists.py
|
f71f755bceeeb2c38e3122cc3e6f50cb403624cb
|
[
"MIT"
] |
permissive
|
LorenzoCavatorta/spotify.py
|
102422e6588cb6c49cff026562e37f28cb0650eb
|
7f375f030fbac4ef3dbbd577a898b4d72f37b72b
|
refs/heads/master
| 2020-08-01T17:09:06.795264 | 2019-09-30T12:24:57 | 2019-09-30T12:24:57 | 211,055,943 | 0 | 0 |
MIT
| 2019-09-26T09:50:46 | 2019-09-26T09:50:46 | null |
UTF-8
|
Python
| false | false | 453 |
py
|
import asyncio
import spotify
client = spotify.Client('someid', 'somesecret')
async def main():
# You can use a user with a http presence
user = await client.user_from_token('sometoken')
# Or you can get a generic user
user = await client.get_user(user_id)
# returns a list of spotify.Playlist objects
playlists = await user.get_playlists()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
|
[
"m3nta1@yahoo.com"
] |
m3nta1@yahoo.com
|
b0e487b584903313154d9dd72e6c085f2b3b95d9
|
4664328482163fd927603d66f47209b28471cf0f
|
/venv/lib/python3.7/site-packages/datalad/metadata/extractors/tests/test_datacite_xml.py
|
30ed2525d0915a74e0f941dc65be94d72cbe0d4c
|
[
"MIT"
] |
permissive
|
emmetaobrien/dats-validator
|
08706ddab795d272391b3611cd3ba0de8c4a91a1
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
refs/heads/master
| 2020-12-19T05:03:17.179117 | 2020-01-22T17:28:38 | 2020-01-22T17:28:38 | 235,626,049 | 0 | 0 |
MIT
| 2020-01-22T17:24:56 | 2020-01-22T17:24:56 | null |
UTF-8
|
Python
| false | false | 2,982 |
py
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test datacite metadata extractor """
from simplejson import dumps
from datalad.metadata.extractors.datacite import MetadataExtractor
from datalad.metadata.metadata import _get_metadatarelevant_paths
from nose.tools import assert_equal
from datalad.tests.utils import with_tree
from datalad.api import create
xml_content = """\
<?xml version="1.0" encoding="UTF-8"?>
<resource xmlns="http://datacite.org/schema/kernel-2.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://datacite.org/schema/kernel-2.2 http://schema.datacite.org/meta/kernel-2.2/metadata.xsd">
<identifier identifierType="DOI">10.6080/K0QN64NG</identifier>
<creators>
<creator>
<creatorName>Last1, First1</creatorName>
</creator>
<creator>
<creatorName>Last2, First2</creatorName>
</creator>
</creators>
<titles>
<title>Main
title</title>
<title titleType="AlternativeTitle">CRCNS.org xxx-1</title>
</titles>
<publisher>CRCNS.org</publisher>
<publicationYear>2011</publicationYear>
<subjects>
<subject>Neuroscience</subject>
<subject>fMRI</subject>
</subjects>
<language>eng</language>
<resourceType resourceTypeGeneral="Dataset">Dataset/Neurophysiology</resourceType>
<sizes>
<size>10 GB</size>
</sizes>
<formats>
<format>application/matlab</format>
<format>NIFTY</format>
</formats>
<version>1.0</version>
<descriptions>
<description descriptionType="Other">
Some long
description.
</description>
</descriptions>
<relatedIdentifiers>
<relatedIdentifier relatedIdentifierType="DOI" relationType="IsDocumentedBy">10.1016/j.cub.2011.08.031</relatedIdentifier>
</relatedIdentifiers>
</resource>
"""
@with_tree(tree={'.datalad': {'meta.datacite.xml': xml_content}})
@with_tree(tree={'elsewhere': {'meta.datacite.xml': xml_content}})
def test_get_metadata(path1, path2):
for p in (path1, path2):
print('PATH')
ds = create(p, force=True)
ds.add('.')
meta = MetadataExtractor(
ds,
_get_metadatarelevant_paths(ds, []))._get_dataset_metadata()
assert_equal(
dumps(meta, sort_keys=True, indent=2),
"""\
{
"author": [
"Last1, First1",
"Last2, First2"
],
"citation": [
"10.1016/j.cub.2011.08.031"
],
"description": "Some long description.",
"formats": [
"application/matlab",
"NIFTY"
],
"name": "CRCNS.org xxx-1",
"sameas": "10.6080/K0QN64NG",
"shortdescription": "Main title",
"tag": [
"Neuroscience",
"fMRI"
],
"version": "1.0"
}""")
|
[
"giulia.ippoliti@mail.mcgill.ca"
] |
giulia.ippoliti@mail.mcgill.ca
|
59fbf899cb91638c4c208f659ae96a918d587461
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/nltk/cluster/__init__.py
|
38a9111e2204c7174d3bfbd82559e79570513835
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:59aceae689404a10cc3a170d5442209edea3f051e4f50c800fa557e86d234639
size 4271
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
351b4eddb3f58e872e3497a9bea27b19aa4d720f
|
4d89652acca24e0bc653e0b4cb5846ceb5b568e4
|
/google-cloud-sdk/lib/surface/run/domain_mappings/list.py
|
ab9c9af7d8e8d0e25820072bf29df8501224e959
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
ibssasimon/LyricLingo
|
410fcec94d2bd3ea75c975c55713f5b8fb913229
|
0dfc951b270912470b36ce0083afd9d4fe41b10a
|
refs/heads/master
| 2021-06-25T10:00:18.215900 | 2020-01-09T00:35:46 | 2020-01-09T00:35:46 | 222,135,399 | 2 | 1 | null | 2021-04-30T20:54:14 | 2019-11-16T17:32:19 |
Python
|
UTF-8
|
Python
| false | false | 3,061 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for listing all domain mappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.run import commands
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.command_lib.run import resource_args
from googlecloudsdk.command_lib.run import serverless_operations
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class List(commands.List):
"""Lists domain mappings."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To list all Cloud Run domain mappings, run:
$ {command}
""",
}
@classmethod
def CommonArgs(cls, parser):
# Flags specific to connecting to a cluster
cluster_group = flags.GetClusterArgGroup(parser)
namespace_presentation = presentation_specs.ResourcePresentationSpec(
'--namespace',
resource_args.GetNamespaceResourceSpec(),
'Namespace to list domain mappings in.',
required=True,
prefixes=False)
concept_parsers.ConceptParser(
[namespace_presentation]).AddToParser(cluster_group)
parser.display_info.AddFormat(
"""table(
{ready_column},
metadata.name:label=DOMAIN,
route_name:label=SERVICE,
region:label=REGION)""".format(ready_column=pretty_print.READY_COLUMN))
parser.display_info.AddUriFunc(cls._GetResourceUri)
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
def Run(self, args):
"""List available domain mappings."""
conn_context = connection_context.GetConnectionContext(
args, self.ReleaseTrack())
namespace_ref = args.CONCEPTS.namespace.Parse()
with serverless_operations.Connect(conn_context) as client:
self.SetCompleteApiEndpoint(conn_context.endpoint)
return commands.SortByName(client.ListDomainMappings(namespace_ref))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""Lists domain mappings."""
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
AlphaList.__doc__ = List.__doc__
|
[
"ibssasimon@gmail.com"
] |
ibssasimon@gmail.com
|
657337bf90a24e453740657f6c0d434ef21313c9
|
cf62f7a7f9e13205fe83957fb7bfcf1b097bf481
|
/src/index.py
|
a2ae504efaedb021f53a79f53ead655fd59982c9
|
[
"Apache-2.0"
] |
permissive
|
biothings/mygene.info
|
09bf19f481c066789a4ad02a0d2880f31dae28f6
|
fe1bbdd81bc29b412ca4288d3af38e47c0602ab7
|
refs/heads/master
| 2023-08-22T21:34:43.540840 | 2023-08-08T23:25:15 | 2023-08-08T23:25:18 | 54,933,630 | 89 | 20 |
NOASSERTION
| 2023-07-18T23:53:49 | 2016-03-29T00:36:49 |
Python
|
UTF-8
|
Python
| false | false | 757 |
py
|
"""
Mygene Web Server Entry Point
Examples:
>>> python index.py
>>> python index.py --debug
>>> python index.py --port=8000
"""
import os.path
import config
from biothings.web.launcher import main
ADDON_HANDLERS = [
(r"/demo/?(.*)", "tornado.web.StaticFileHandler",
{"path": "docs/demo", "default_filename": "index.html"}),
]
if config.INCLUDE_DOCS:
if not os.path.exists(config.DOCS_STATIC_PATH):
raise IOError('Run "make html" to generate sphinx docs first.')
ADDON_HANDLERS += [
(r"/widget/(.*)", "tornado.web.RedirectHandler", {"url": "/static/widget/{0}"}),
(r"/?(.*)", "tornado.web.StaticFileHandler", {'path': config.DOCS_STATIC_PATH}),
]
if __name__ == '__main__':
main(ADDON_HANDLERS)
|
[
"xzhou@scripps.edu"
] |
xzhou@scripps.edu
|
981f3b685443c1e8fabdc340684e1a4a52e41de2
|
e15fb687990589783066669784912ea8ac5bacaf
|
/genome_designer/test_data/full_vcf_test_set/generate_full_vcf_test_set.py
|
9dac81496c35a6bb2eaa6bc20477bb1f155f8606
|
[
"MIT"
] |
permissive
|
RubensZimbres/millstone
|
74d32105fa54104d0597b6789fb2871cb4fbd854
|
898936072a716a799462c113286056690a7723d1
|
refs/heads/master
| 2020-03-16T18:57:55.174716 | 2018-03-07T16:40:14 | 2018-03-07T16:40:14 | 132,894,394 | 1 | 2 | null | 2018-05-10T12:01:34 | 2018-05-10T12:01:33 | null |
UTF-8
|
Python
| false | false | 5,259 |
py
|
"""
Script for generating the test set.
This document describes how this test test was generated.
1) Select a region of the MG1655 genome to excise.
"""
import copy
import random
from Bio import SeqIO
import vcf
import simNGS_util
# Portion of MG1655 Genbank of size ~5.5 kB
EXCISED_GENBANK = 'mg1655_tolC_through_zupT.gb'
TEMPLATE_VCF = 'template.vcf'
VCF_TEMPLATE_READER = vcf.Reader(TEMPLATE_VCF)
SAMPLE_FASTA_ROOT = 'sample'
DESIGNED_SNP_VCF = 'designed_snps.vcf'
# If we do a SNP every 100 bases, that's 50 SNPs.
# We'll then do 20 designed SNPs and 20 SNPs per sample so we should get
# fairly interesting overlaps.
TOTAL_SNPS = 50
NUM_IN_CDS = 45
NUM_OTHER = TOTAL_SNPS - NUM_IN_CDS
# We'll create this many genomes.
NUM_SAMPLES = 6
def is_position_in_coding_feature(position, cds_features):
"""Checks whether the given position lies inside of a coding feature
in the given genome record.
"""
for feature in cds_features:
if (feature.location.start <= position and
position < feature.location.end):
return True
return False
BASE_OPTIONS = ['A', 'T', 'G', 'C']
def choose_alt(ref):
"""Returns a random base that is not ref.
"""
alt = ref
while alt == ref:
alt = random.choice(BASE_OPTIONS)
return alt
def get_subset_of_snps(all_snps, subset_size):
all_snp_positions = all_snps.keys()
subset = {}
while len(subset) < subset_size:
pos = random.choice(all_snp_positions)
if pos in subset:
continue
subset[pos] = all_snps[pos]
return subset
def create_vcf_for_subset(subset, out_path):
with open(out_path, 'w') as designed_fh:
writer = vcf.Writer(designed_fh, VCF_TEMPLATE_READER,
lineterminator='\n')
for pos, value_dict in subset.iteritems():
writer.write_record(vcf.model._Record(
'Chromosome', # CHROM
pos, # POS
None, # ID
value_dict['ref'], # REF
value_dict['alt'], # ALT
None, # QUAL
None, # FILTER
None, # INFO
None, # FORMAT
None, # sample_indexes
samples=None))
def main():
seq_record = SeqIO.read(EXCISED_GENBANK, 'genbank')
cds_features = [f for f in seq_record.features if f.type == 'CDS']
# Generate all possible SNPs to sample from. Store them in a dictionary
# keyed by position so we can easily deal with lookups and avoiding
# duplicates as needed below.
all_snps = {}
len_seq_record = len(seq_record)
# Select random positions for SNPs, respecting the distribution
# set above by the NUM_IN_CDS vs TOTAL_SNPS constants.
# NOTE: These SNP positions are pythonic. We have to update them when
# writing them out in vcf format below.
num_in_cds = 0
num_other = 0
while num_in_cds < NUM_IN_CDS or num_other < NUM_OTHER:
position = random.randint(0, len_seq_record - 1)
if position in all_snps:
continue
in_cds_feature = is_position_in_coding_feature(position, cds_features)
do_add_position = False
if in_cds_feature and num_in_cds < NUM_IN_CDS:
do_add_position = True
num_in_cds += 1
elif not in_cds_feature and num_other < NUM_OTHER:
do_add_position = True
num_other += 1
if do_add_position:
ref = seq_record.seq[position]
alt = choose_alt(ref)
all_snps[position] = {
'ref': ref,
'alt': [alt]
}
assert len(all_snps) == TOTAL_SNPS, "Didn't get all the SNPs we expected."
# Now select a subset of these SNPS to serve as designed.
designed_snps = get_subset_of_snps(all_snps, 20)
create_vcf_for_subset(designed_snps, DESIGNED_SNP_VCF)
# Now create the samples.
for sample_num in range(NUM_SAMPLES):
sample_name = SAMPLE_FASTA_ROOT + str(sample_num)
sample_record = copy.deepcopy(seq_record)
sample_record.id = sample_name
# Grab a subset of SNPs.
sample_snps = get_subset_of_snps(all_snps, 20)
# Introduce the mutations.
for position, value_dict in sample_snps.iteritems():
sample_record.seq = (
sample_record.seq[:position] +
value_dict['alt'][0] +
sample_record.seq[position + 1:])
assert len(sample_record) == len(seq_record), (
"For now we are only doing mutations.")
# Write out the sample fasta.
sample_output = sample_name + '.fa'
with open(sample_output, 'w') as out_fh:
SeqIO.write(sample_record, out_fh, 'fasta')
# Generate fake reads using simNGS.
simLibrary_fasta = sample_name + '.simLibrary.fa'
print sample_output, simLibrary_fasta
simNGS_util.run_simLibrary(sample_output, simLibrary_fasta)
# Generate reads using simNGS.
output_fq = sample_name + '.simLibrary.fq'
simNGS_util.run_paired_simNGS(simLibrary_fasta, output_fq)
if __name__ == '__main__':
main()
|
[
"gleb.kuznetsov@gmail.com"
] |
gleb.kuznetsov@gmail.com
|
899c5f0098afd90b2bbd71e177e514e42fe973d5
|
36d4c9a57b53f5e14acb512759b49fe44d9990d8
|
/hackerrank/30-days-of-code/day-8.py
|
d6527ddafbd6b3abc73b984d4cbb1c5fe239558e
|
[] |
no_license
|
yosef8234/test
|
4a280fa2b27563c055b54f2ed3dfbc7743dd9289
|
8bb58d12b2837c9f8c7b1877206a365ab9004758
|
refs/heads/master
| 2021-05-07T22:46:06.598921 | 2017-10-16T18:11:26 | 2017-10-16T18:11:26 | 107,286,907 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,652 |
py
|
# # -*- coding: utf-8 -*-
# Objective
# Today, we're learning about Key-Value pair mappings using a Map or Dictionary data structure. Check out the Tutorial tab for learning materials and an instructional video!
# Task
# Given NN names and phone numbers, assemble a phone book that maps friends' names to their respective phone numbers. You will then be given an unknown number of names to query your phone book for; for each namename queried, print the associated entry from your phone book (in the form name=phoneNumbername=phoneNumber) or Not foundNot found if there is no entry for namename.
# Note: Your phone book should be a Dictionary/Map/HashMap data structure.
# Input Format
# The first line contains an integer, NN, denoting the number of entries in the phone book.
# Each of the NN subsequent lines describes an entry in the form of 22 space-separated values on a single line. The first value is a friend's namename, and the second value is an 88-digit phone numberphone number.
# After the NN lines of phone book entries, there are an unknown number of lines of queries. Each line (query) contains a namename to look up, and you must continue reading lines until there is no more input.
# Note: Names consist of lowercase English letters and are first names only.
# Constraints
# 1≤N≤1051≤N≤105
# 1≤queries≤1051≤queries≤105
# Output Format
# On a new line for each query, print Not foundNot found if the name has no corresponding entry in the phone book; otherwise, print the full namename and phoneNumberphoneNumber in the format name=phoneNumbername=phoneNumber.
# Sample Input
# 3
# sam 99912222
# tom 11122222
# harry 12299933
# sam
# edward
# harry
# Sample Output
# sam=99912222
# Not found
# harry=12299933
# Explanation
# N=3N=3
# We add the NN subsequent (Key,Value) pairs to our map so it looks like this:
# phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}
# We then process each query and print Key=ValueKey=Value if the queried Key is found in the map, or Not foundNot found otherwise.
# Query 0: samsam
# Sam is one of the keys in our dictionary, so we print sam=99912222sam=99912222.
# Query 1: edwardedward
# Edward is not one of the keys in our dictionary, so we print Not foundNot found.
# Query 2: harryharry
# Harry is one of the keys in our dictionary, so we print harry=12299933harry=12299933.
n=int(input())
phonebook = dict(input().split() for _ in range(n))
for j in range(n):
name = input().strip()
if name in phonebook:
print(name + "=" + phonebook[name])
else:
print("Not found")
|
[
"ekoz@protonmail.com"
] |
ekoz@protonmail.com
|
8cdd0bd9d537ad94f769df4f3a1faf52e3fb8895
|
5760ff9bca037a2e85dde8ad4d583139ab8e128a
|
/migrations/versions/20150624090637_3606d4a47663_update_answercomment_model.py
|
c4dcdcc74edfefac69c1499b71d92697c7e86322
|
[] |
no_license
|
dianchang/dianchang
|
5b58cbfcf6dfcd9c2c9d55c0612a9327086b8b54
|
3414cd5af0a66facd6ec4eb787e7646d04d8c96c
|
refs/heads/master
| 2016-08-11T11:24:49.322330 | 2015-07-30T05:18:09 | 2015-07-30T05:18:09 | 36,111,229 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 645 |
py
|
"""Update AnswerComment model.
Revision ID: 3606d4a47663
Revises: 2040a458fc8a
Create Date: 2015-06-24 09:06:37.957787
"""
# revision identifiers, used by Alembic.
revision = '3606d4a47663'
down_revision = '2040a458fc8a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('answer_comment', sa.Column('likes_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('answer_comment', 'likes_count')
### end Alembic commands ###
|
[
"hustlzp@qq.com"
] |
hustlzp@qq.com
|
2fad265d11b5850de7947324b15cf3811b053d58
|
1b25efab9fd81f1c1b9cd484a13d530759809838
|
/backend/dating/api/v1/serializers.py
|
94acc95fb234b127aaf19304903f55ffff0256f5
|
[] |
no_license
|
crowdbotics-apps/test-31906
|
1728e7947b6cbd52dc123310647ec523914aa1aa
|
2f6841d3ac3e4d335712fd11b3ee81166eec2f47
|
refs/heads/master
| 2023-08-30T11:31:54.409975 | 2021-11-10T07:26:53 | 2021-11-10T07:26:53 | 426,524,333 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 973 |
py
|
from rest_framework import serializers
from dating.models import Setting, Like, UserPhoto, Match, Dislike, Inbox, Profile
class InboxSerializer(serializers.ModelSerializer):
class Meta:
model = Inbox
fields = "__all__"
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = "__all__"
class DislikeSerializer(serializers.ModelSerializer):
class Meta:
model = Dislike
fields = "__all__"
class UserPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = UserPhoto
fields = "__all__"
class SettingSerializer(serializers.ModelSerializer):
class Meta:
model = Setting
fields = "__all__"
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = "__all__"
class MatchSerializer(serializers.ModelSerializer):
class Meta:
model = Match
fields = "__all__"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b7af57cfe3b70002b84576ef64c5255279fa4d72
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/TankmanOperationDialogMeta.py
|
e1d2fcccb7f4552ec5aef843bb1b493e8473c8d1
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 |
Python
|
UTF-8
|
Python
| false | false | 386 |
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/TankmanOperationDialogMeta.py
from gui.Scaleform.daapi.view.dialogs.SimpleDialog import SimpleDialog
class TankmanOperationDialogMeta(SimpleDialog):
def as_setDataS(self, data):
return self.flashObject.as_setData(data) if self._isDAAPIInited() else None
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
265522a7deada1360fac4df736f45501ac5024dc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_285/ch5_2019_06_03_01_06_29_598637.py
|
b4bfd46781ff35c49180a25a814cd9a7dfae311a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 506 |
py
|
def verifica_primo(n):
if n<0:
return -1
num=3
while num<n:
if n%2==0 or n%num==0:
return False
num+=2
if n==0 or n==1:
return False
else:
return True
def maior_primo_menor_que(n):
if verifica_primo(n)== True:
return n
elif verifica_primo(n)== False:
lista=[]
for e in range(n):
if verifica_primo(n-e)==True:
return n-e
else:
return -1
|
[
"you@example.com"
] |
you@example.com
|
ecd1fe8a8b5678366ade3ae81684187a171f55f5
|
4c601eaa346e660c296e270cc2d79aea9a3721fe
|
/homeassistant/components/atag/__init__.py
|
237a82f207a51306dfec01869827bd135973d15b
|
[
"Apache-2.0"
] |
permissive
|
basnijholt/home-assistant
|
f55110af9ff602274c0a929c7298ef97a0ef282f
|
ba55b4b8338a2dc0ba3f1d750efea49d86571291
|
refs/heads/dev
| 2023-01-21T11:53:52.621353 | 2020-08-08T15:03:06 | 2020-08-08T15:03:06 | 220,313,680 | 5 | 1 |
Apache-2.0
| 2023-01-13T06:04:49 | 2019-11-07T19:29:54 |
Python
|
UTF-8
|
Python
| false | false | 4,345 |
py
|
"""The ATAG Integration."""
from datetime import timedelta
import logging
import async_timeout
from pyatag import AtagException, AtagOne
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.water_heater import DOMAIN as WATER_HEATER
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, asyncio
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
_LOGGER = logging.getLogger(__name__)
DOMAIN = "atag"
PLATFORMS = [CLIMATE, WATER_HEATER, SENSOR]
async def async_setup(hass: HomeAssistant, config):
"""Set up the Atag component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Atag integration from a config entry."""
session = async_get_clientsession(hass)
coordinator = AtagDataUpdateCoordinator(hass, session, entry)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=coordinator.atag.id)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
class AtagDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold Atag data."""
def __init__(self, hass, session, entry):
"""Initialize."""
self.atag = AtagOne(session=session, **entry.data)
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=30)
)
async def _async_update_data(self):
"""Update data via library."""
with async_timeout.timeout(20):
try:
if not await self.atag.update():
raise UpdateFailed("No data received")
except AtagException as error:
raise UpdateFailed(error)
return self.atag.report
async def async_unload_entry(hass, entry):
"""Unload Atag config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AtagEntity(Entity):
"""Defines a base Atag entity."""
def __init__(self, coordinator: AtagDataUpdateCoordinator, atag_id: str) -> None:
"""Initialize the Atag entity."""
self.coordinator = coordinator
self._id = atag_id
self._name = DOMAIN.title()
@property
def device_info(self) -> dict:
"""Return info for device registry."""
device = self.coordinator.atag.id
version = self.coordinator.atag.apiversion
return {
"identifiers": {(DOMAIN, device)},
"name": "Atag Thermostat",
"model": "Atag One",
"sw_version": version,
"manufacturer": "Atag",
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def should_poll(self) -> bool:
"""Return the polling requirement of the entity."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{self.coordinator.atag.id}-{self._id}"
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Atag entity."""
await self.coordinator.async_request_refresh()
|
[
"noreply@github.com"
] |
basnijholt.noreply@github.com
|
9902776c082c92c16c89cd39d922e4b461482b88
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_05_01_preview/models/__init__.py
|
b555500e3ca24979e6c5c02f7be553bf65fd61c7
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 |
MIT
| 2019-07-25T22:28:52 | 2019-04-19T20:59:15 |
Python
|
UTF-8
|
Python
| false | false | 3,815 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccessReviewDecision
from ._models_py3 import AccessReviewDecisionListResult
from ._models_py3 import AccessReviewDecisionProperties
from ._models_py3 import AccessReviewDecisionTarget
from ._models_py3 import AccessReviewDefaultSettings
from ._models_py3 import AccessReviewInstance
from ._models_py3 import AccessReviewInstanceListResult
from ._models_py3 import AccessReviewReviewer
from ._models_py3 import AccessReviewScheduleDefinition
from ._models_py3 import AccessReviewScheduleDefinitionListResult
from ._models_py3 import AccessReviewScheduleDefinitionProperties
from ._models_py3 import AccessReviewScheduleSettings
from ._models_py3 import ErrorDefinition
from ._models_py3 import ErrorDefinitionProperties
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import ServicePrincipalDecisionTarget
from ._models_py3 import UserDecisionTarget
from ._authorization_management_client_enums import AccessRecommendationType
from ._authorization_management_client_enums import AccessReviewActorIdentityType
from ._authorization_management_client_enums import AccessReviewApplyResult
from ._authorization_management_client_enums import AccessReviewInstanceStatus
from ._authorization_management_client_enums import AccessReviewRecurrencePatternType
from ._authorization_management_client_enums import AccessReviewRecurrenceRangeType
from ._authorization_management_client_enums import AccessReviewResult
from ._authorization_management_client_enums import AccessReviewReviewerType
from ._authorization_management_client_enums import AccessReviewScheduleDefinitionReviewersType
from ._authorization_management_client_enums import AccessReviewScheduleDefinitionStatus
from ._authorization_management_client_enums import AccessReviewScopePrincipalType
from ._authorization_management_client_enums import DecisionTargetType
from ._authorization_management_client_enums import DefaultDecisionType
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AccessReviewDecision",
"AccessReviewDecisionListResult",
"AccessReviewDecisionProperties",
"AccessReviewDecisionTarget",
"AccessReviewDefaultSettings",
"AccessReviewInstance",
"AccessReviewInstanceListResult",
"AccessReviewReviewer",
"AccessReviewScheduleDefinition",
"AccessReviewScheduleDefinitionListResult",
"AccessReviewScheduleDefinitionProperties",
"AccessReviewScheduleSettings",
"ErrorDefinition",
"ErrorDefinitionProperties",
"Operation",
"OperationDisplay",
"OperationListResult",
"ServicePrincipalDecisionTarget",
"UserDecisionTarget",
"AccessRecommendationType",
"AccessReviewActorIdentityType",
"AccessReviewApplyResult",
"AccessReviewInstanceStatus",
"AccessReviewRecurrencePatternType",
"AccessReviewRecurrenceRangeType",
"AccessReviewResult",
"AccessReviewReviewerType",
"AccessReviewScheduleDefinitionReviewersType",
"AccessReviewScheduleDefinitionStatus",
"AccessReviewScopePrincipalType",
"DecisionTargetType",
"DefaultDecisionType",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
155abd7d13912aa0987c80a0c964ad7d4fc7990e
|
09b22d1bd1263e4082e6bba7afa2f2b7a66afd4a
|
/FaceDetection/Smile Detector.py
|
5dfcb45bf4e354761a24d3842578b36decee18d7
|
[] |
no_license
|
yogeshkushwahait/Machine-Learning-Using-Python
|
b70bc5334c4178fecc175451b8b7e04e50a60917
|
8102ce7b0cba5d48e923f979ae0a8e71c25857b1
|
refs/heads/master
| 2022-03-28T05:21:24.332537 | 2019-11-05T06:34:00 | 2020-01-09T16:06:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,172 |
py
|
# coding: utf-8
# In[2]:
import cv2
# In[3]:
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml');
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml');
# In[4]:
def detect(gray,frame):
faces = face_cascade.detectMultiScale(gray, 1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = frame[y:y+h,x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1,22)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
smiles = smile_cascade.detectMultiScale(roi_gray, 1.7,22)
for (sx,sy,sw,sh) in smiles:
cv2.rectangle(roi_color,(sx,sy),(sx+sw,sy+sh),(0,0,255),2)
return frame
# In[5]:
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
[
"rishav.jnit@gmail.com"
] |
rishav.jnit@gmail.com
|
4402400f9646593187e43c7982a4e61d0d01b033
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/pandas/tags/V00-00-02/SConscript
|
b42efd570012fd5fb90cb1ffb13d916266eb5ce8
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 769 |
#------------------------------------------------------------------------
# File and Version Information:
# $Id: SConscript 4651 2012-10-26 16:55:30Z salnikov@SLAC.STANFORD.EDU $
#
# Description:
# SConscript file for package pandas
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
from os.path import join as pjoin
from SConsTools.standardExternalPackage import standardExternalPackage
pkg = "pandas"
pkg_ver = "0.13.1b"
PREFIX = pjoin('$SIT_EXTERNAL_SW', pkg, pkg_ver)
PYDIR = pjoin('$PYTHON_LIBDIRNAME', '$PYTHON', "site-packages")
PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found')
standardExternalPackage(pkg, **locals())
|
[
"gapon@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
gapon@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
|
e51fbe1d83fc18c0809ef65f62e0f7a148dfdd77
|
a672f984782a1fa328069363671c328da3e4d8bd
|
/weatherdaily/views.py
|
40d1077ff5095487d6bbd95bd1a79c2aceed8864
|
[] |
no_license
|
avs8/yourweather
|
396a0b093cbc9fc9b501eb979418e10eecfadf2b
|
2415769dad416c9fcf99d57cba93b455d30447fc
|
refs/heads/master
| 2021-08-29T08:11:16.664340 | 2017-12-13T14:15:44 | 2017-12-13T14:15:44 | 112,365,929 | 0 | 0 | null | 2017-12-11T14:35:38 | 2017-11-28T17:20:17 |
JavaScript
|
UTF-8
|
Python
| false | false | 558 |
py
|
from django.shortcuts import render, render_to_response
from .forms import WeatherForm
from django.http import HttpResponse
from django.template import RequestContext
from .models import *
def index(request):
args = {}
if request.POST:
form = WeatherForm(request.POST)
if form.is_valid():
form.save()
return HttpResponse("Thanks for submitting your information!!")
else:
form = WeatherForm()
args = {}
args['form'] = form
return render(request, 'weatherdaily/index.html', args)
|
[
"ajitavsingh_8@yahoo.com"
] |
ajitavsingh_8@yahoo.com
|
22026862c4779187068f89cb47fe0e6b11a7c0f0
|
18a6b272d4c55b24d9c179ae1e58959674e53afe
|
/tf_rl/test/CartPole/CartPole_recording_test.py
|
b8d3760e15ebd4d0ca5d66f39bae6090d71f9a17
|
[
"MIT"
] |
permissive
|
Rowing0914/TF2_RL
|
6cce916f409b3d4ef2a5a40a0611908f20d08b2c
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
refs/heads/master
| 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 |
MIT
| 2022-12-08T07:02:42 | 2020-01-12T23:53:48 |
Python
|
UTF-8
|
Python
| false | false | 503 |
py
|
import gym
env = gym.make('CartPole-v0')
gym.wrappers.Monitor(env, './tmp/cartpole-experiment-1', force=True, video_callable=lambda episode_id: True)
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
env.close()
|
[
"kosakaboat@gmail.com"
] |
kosakaboat@gmail.com
|
21fe54b94c5e5b3cd05505d1e5b489b734e9a820
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/setuptools/setuptools/_distutils/archive_util.pyi
|
38458fc0e00349e77ebf105fdf5a52e850bc9e25
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 548 |
pyi
|
def make_archive(
base_name: str,
format: str,
root_dir: str | None = ...,
base_dir: str | None = ...,
verbose: int = ...,
dry_run: int = ...,
owner: str | None = ...,
group: str | None = ...,
) -> str: ...
def make_tarball(
base_name: str,
base_dir: str,
compress: str | None = ...,
verbose: int = ...,
dry_run: int = ...,
owner: str | None = ...,
group: str | None = ...,
) -> str: ...
def make_zipfile(base_name: str, base_dir: str, verbose: int = ..., dry_run: int = ...) -> str: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
26556f32fd856b8732227ea6ddcc48bd711e6877
|
4412fd856cfbdfab98122b11ea01e447a76851b3
|
/rodentdb/querysets.py
|
063568ed8e0276d9a070a898fd61d93c93f1a13b
|
[] |
no_license
|
fchampalimaud/rodentdb
|
d8e8c0c7552de638d3a2fd57de287401997fdf3c
|
4a970c09da78f22a8c57d8ea98d29a569f531613
|
refs/heads/master
| 2021-06-18T02:05:19.200858 | 2019-09-17T18:09:57 | 2019-09-17T18:09:57 | 185,334,185 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 367 |
py
|
from django.db import models
from users.mixins import PyformsPermissionsMixin
# FIXME import this when users model is not present
# try:
# from users.mixins import PyformsPermissionsMixin
# except ImportError:
# PyformsPermissionsMixin = None
# # PyformsPermissionsMixin = object
class RodentQuerySet(PyformsPermissionsMixin, models.QuerySet):
...
|
[
"hugo.cachitas@research.fchampalimaud.org"
] |
hugo.cachitas@research.fchampalimaud.org
|
b75806926dcb3bbbf4251fca79a3bd28f9300dab
|
71b86fc54e811c3a06d6d2db32a65a212f642bac
|
/scripts/create_prod_optfreq_jobs.py
|
298897a5ec19bd62538151015a4d4e84b1ae396f
|
[
"MIT"
] |
permissive
|
yunsiechung/ard_gsm
|
c773f4454836b54f6bb788c8d038d60d628ffcf2
|
82826011b0edf7122e16063094b04ecd16bf1cf5
|
refs/heads/master
| 2021-05-20T12:01:32.981441 | 2020-01-20T17:08:31 | 2020-01-20T17:08:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,879 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import argparse
import glob
import os
import re
from ard_gsm.qchem import QChem
from ard_gsm.util import iter_sub_dirs, read_xyz_file
def main():
args = parse_args()
num_regex = re.compile(r'\d+')
maxnum = float('inf') if args.maxnum is None else args.maxnum
for gsm_sub_dir in iter_sub_dirs(args.gsm_dir, pattern=r'gsm\d+'):
gsm_num = int(num_regex.search(os.path.basename(gsm_sub_dir)).group(0))
if gsm_num > maxnum:
continue
out_dir = os.path.join(args.out_dir, os.path.basename(gsm_sub_dir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
elif not args.overwrite:
continue
qstart_file = os.path.join(gsm_sub_dir, 'qstart')
qtmp = QChem(logfile=qstart_file)
charge, multiplicity = qtmp.get_charge(), qtmp.get_multiplicity()
print(f'Extracting from {gsm_sub_dir}...')
for gsm_log in glob.iglob(os.path.join(gsm_sub_dir, 'gsm*.out')):
num = int(num_regex.search(os.path.basename(gsm_log)).group(0))
string_file = os.path.join(gsm_sub_dir, f'stringfile.xyz{num:04}')
if not (os.path.isfile(string_file) and os.path.getsize(string_file) > 0):
continue
if args.ignore_errors and has_error(gsm_log):
continue
if args.ignore_errors or is_successful(gsm_log):
# Optimize van-der-Waals wells instead of separated products
# Also check if product optimization during GSM failed
xyzs = read_xyz_file(string_file, with_energy=True)
last_energy = xyzs[-1][-1]
second_to_last_energy = xyzs[-2][-1]
if last_energy > second_to_last_energy: # Something went wrong in product optimization
continue
path = os.path.join(out_dir, f'prod_optfreq{num:04}.in')
q = QChem(config_file=args.config)
q.make_input_from_coords(path, *xyzs[-1][:-1], charge=charge, multiplicity=multiplicity, mem=args.mem)
def is_successful(gsm_log):
"""
Success is defined as having converged to a transition state.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if '-XTS-' in line or '-TS-' in line:
return True
return False
def has_error(gsm_log):
"""
Check if last node is high in energy or if the path is dissociative.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if 'high energy' in line and '-exit early-' in line:
return True
if 'terminating due to dissociation' in line:
return True
return False
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('gsm_dir', metavar='GSMDIR', help='Path to directory containing GSM folders')
parser.add_argument('out_dir', metavar='ODIR', help='Path to output directory')
parser.add_argument('--mem', type=int, metavar='MEM', help='Q-Chem memory')
parser.add_argument('--overwrite', action='store_true', help='Overwrite input files in existing directories')
parser.add_argument('--maxnum', type=int, metavar='NUM', help='Only make jobs from GSM folders up to this number')
parser.add_argument('--ignore_errors', action='store_true',
help='Extract from all GSM calculations ignoring (most) errors')
parser.add_argument(
'--config', metavar='FILE',
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, 'config', 'qchem.opt_freq'),
help='Configuration file for product optfreq jobs in Q-Chem'
)
return parser.parse_args()
if __name__ == '__main__':
main()
|
[
"cgrambow@mit.edu"
] |
cgrambow@mit.edu
|
5f2d1ac25562db38ab4a821c2566217b269f5519
|
f8ab044c34f0d286195c8e5abfae6f451e6c8223
|
/test_arg.py
|
58c8a56dafab815d14bbf764635a534ade273ed5
|
[] |
no_license
|
liuyug/pytest
|
aa512e902cf4ba9afb91b6b5b5c5cb9dccdc6478
|
ffc14dbee70ff6fd9c8ab63a1c771fddc8bf5491
|
refs/heads/master
| 2020-05-19T12:20:57.958939 | 2016-09-06T03:01:38 | 2016-09-06T03:01:38 | 15,447,570 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 996 |
py
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
import argparse
import unittest
class testArgs(unittest.TestCase):
def setUp(self):
self.args = (
['--foo', 'abc', 'abcdefg.ext'],
['-a', 'abc', '-a', 'bcd', '-a', 'cde', 'def.def'],
['-vvvv', 'abc.ea'],
#['--version'],
)
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--foo', help='foo help', default='foo')
self.parser.add_argument('-a', '--all', help='all help', action='append')
self.parser.add_argument('-v', '--verbose', help='verbose help', action='count')
#self.parser.add_argument('--version', action='version', version='%(prog)s 0.3')
self.parser.add_argument('file', help='add filename')
def testargs(self):
for args in self.args:
print('args: ', args)
pargs = self.parser.parse_args(args)
print(pargs)
if __name__ == '__main__':
unittest.main()
|
[
"liuyug@gmail.com"
] |
liuyug@gmail.com
|
6006a738df99a24e60f0d1202d8a0998e6f3c28b
|
45f93a9d47204d76b8bf25a71dfb79403e75c33c
|
/CodeForces/yes-or-yes.py
|
cb4db3b37842abd3b41cac071b12adb1ab078941
|
[] |
no_license
|
tahmid-tanzim/problem-solving
|
0173bce1973ac3e95441a76c10324c0e1b0a57c3
|
6ddb51de6772130f209474e76f39ca2938f444f0
|
refs/heads/master
| 2023-06-25T02:18:03.690263 | 2023-06-20T06:58:46 | 2023-06-20T06:58:46 | 137,173,850 | 4 | 1 | null | 2022-03-30T08:28:41 | 2018-06-13T06:44:25 |
Python
|
UTF-8
|
Python
| false | false | 189 |
py
|
# https://codeforces.com/problemset/problem/1703/A
# A. YES or YES?
if __name__ == "__main__":
for _ in range(int(input())):
print('YES' if input().lower() == 'yes' else 'NO')
|
[
"tahmid.tanzim@gmail.com"
] |
tahmid.tanzim@gmail.com
|
660bc9b69bbe66c2d9ce7f4e54e3b4a1dcabcda8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/334/usersdata/294/100466/submittedfiles/listas.py
|
90a4557636f7b45bccefc918ec87c0a87bd5ae66
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 389 |
py
|
# -*- coding: utf-8 -*-
n= int(input('Digite o número de elementos: '))
while n<2:
n= int(input('Digite o número de elementos: '))
a=[]
for i in range (0,n,1):
a.append(int(input('Digite o elemento%d: ' %(i+1))))
for i in range (0,n-1,1):
dif= a[i]-a[i+1]
if dif>0:
dif=dif*1
degrau=0
for i in range (0,n-1,1):
if dif>degrau:
degrau=dif
print(degrau)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
ec14651799152533ae1afb6b7c4b39d5335c4ecb
|
2e74cff6c9639f3903ccde662e79359d0724285e
|
/2019_late/20190829/swea_5105_미로의거리.py
|
a33cf516df96ab271637b2d73566c84324d7b61b
|
[] |
no_license
|
dodonmountain/algorithm
|
e29988071f651e51ba65e3926302f94a3d4074a5
|
ce33e0d74220839aed4b17a47fa0069458a4324e
|
refs/heads/master
| 2022-11-05T05:14:01.527015 | 2022-11-01T04:29:37 | 2022-11-01T04:29:37 | 200,008,533 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,349 |
py
|
import sys
sys.stdin = open('5105.txt')
from pprint import pprint
from collections import deque
from collections import deque
def bfs(x, y):
global shortest
Q = deque()
visit[x][y] == True
Q.append((x, y))
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
D = [[0] * N for _ in range(N)] # 최단 거리를 저장
while Q:
xx, yy = Q.popleft()
for step in range(4):
nx = xx + dx[step]
ny = yy + dy[step]
if nx < 0 or ny < 0 or nx > N-1 or ny > N-1:
continue
if maze[nx][ny] == 3:
shortest = D[xx][yy]
return
if maze[nx][ny] == 0 and visit[nx][ny] == False:
visit[nx][ny] = 1 # 방문표시
D[nx][ny] = D[xx][yy] + 1
Q.append((nx, ny))
T = int(input())
for t_case in range(T):
shortest = 0
N = int(input())
visit = [[0] * N for _ in range(N)]
maze = []
for _ in range(N):
maze.append(list(map(int,input())))
# pprint(maze, width=30)
for i in range(N):
for j in range(N):
if maze[i][j] == 2:
start = [i, j]
elif maze[i][j] == 3:
goal = [i, j]
bfs(start[0], start[1])
pprint(visit,width=40)
print('#{} {}'.format(t_case + 1, shortest))
|
[
"lkh151515@gmail.com"
] |
lkh151515@gmail.com
|
01d0c81a95e80f405f125fd99caa00848d8f6f63
|
a86ca34e23afaf67fdf858df9e47847606b23e0c
|
/lib/temboo/Library/MailChimp/ListSubscribe.py
|
1366bda24013c469dbaa94248b91bb53f0dbbfa1
|
[] |
no_license
|
miriammelnick/dont-get-mugged
|
6026ad93c910baaecbc3f5477629b0322e116fa8
|
1613ee636c027ccc49c3f84a5f186e27de7f0f9d
|
refs/heads/master
| 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,083 |
py
|
###############################################################################
#
# ListSubscribe
# Adds a subscriber to a MailChimp list.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class ListSubscribe(Choreography):
"""
Create a new instance of the ListSubscribe Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/MailChimp/ListSubscribe')
def new_input_set(self):
return ListSubscribeInputSet()
def _make_result_set(self, result, path):
return ListSubscribeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListSubscribeChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the ListSubscribe
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class ListSubscribeInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API Key provided by Mailchimp.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the DoubleOptIn input for this choreography. ((optional, boolean) Flag to control whether a double opt-in confirmation message is sent. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
def set_DoubleOptIn(self, value):
InputSet._set_input(self, 'DoubleOptIn', value)
"""
Set the value of the EmailAddress input for this choreography. ((conditional, string) The email address for the subscriber you want to create. Required unless the email address is included in the MergeVars input as part of your JSON object.)
"""
def set_EmailAddress(self, value):
InputSet._set_input(self, 'EmailAddress', value)
"""
Set the value of the EmailType input for this choreography. ((optional, string) Must be one of 'text', 'html', or 'mobile'. Defaults to html.)
"""
def set_EmailType(self, value):
InputSet._set_input(self, 'EmailType', value)
"""
Set the value of the ListId input for this choreography. ((required, string) The id of the list that the subsbriber will be added to.)
"""
def set_ListId(self, value):
InputSet._set_input(self, 'ListId', value)
"""
Set the value of the MergeVars input for this choreography. ((conditional, json) A JSON object of the merge fields for this subscriber. If the subscriber email address is not provided for the EmailAddress input, it must be specified here.)
"""
def set_MergeVars(self, value):
InputSet._set_input(self, 'MergeVars', value)
"""
Set the value of the ReplaceInterests input for this choreography. ((optional, boolean) A flag to determine whether to replace the interest groups with the groups provided or add the provided groups to the member's interest groups. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
def set_ReplaceInterests(self, value):
InputSet._set_input(self, 'ReplaceInterests', value)
"""
Set the value of the SendWelcome input for this choreography. ((optional, boolean) If double_optin is false and this flag is true, a welcome email will be sent. Note that this does not apply when updating records. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
def set_SendWelcome(self, value):
InputSet._set_input(self, 'SendWelcome', value)
"""
Set the value of the UpdateExisting input for this choreography. ((optional, boolean) Indicates that if the email already exists, this request will perform an update instead of an insert. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
def set_UpdateExisting(self, value):
InputSet._set_input(self, 'UpdateExisting', value)
"""
A ResultSet with methods tailored to the values returned by the ListSubscribe choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class ListSubscribeResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from Mailchimp. Returns the string "true" for success and an error description for failures.)
"""
def get_Response(self):
return self._output.get('Response', None)
class ListSubscribeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListSubscribeResultSet(response, path)
|
[
"miriam@famulus"
] |
miriam@famulus
|
445c6ff875319e02bf6c664717e3b20fcc1eeef2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_peters.py
|
5c5535b7cf2fe2459518556a2fea0f4e76f710a9
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
#calss header
class _PETERS():
def __init__(self,):
self.name = "PETERS"
self.definitions = peter
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['peter']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
05dfdbb7f1fe32835a1c9b65abe2e372a8d9fad3
|
3eff0ac549dd24fbade02d63c3a541ab88db1e5b
|
/ultimate_python/pythonrefresh.py
|
36dd68b468c98ac1e89ac271b58821745d51e6d4
|
[] |
no_license
|
lisaolson/udemy
|
618410fb548db864b7878de5a2231e8293daa2ad
|
f40f947f6f79d692748f3efba02176fb360f0c4e
|
refs/heads/master
| 2020-03-28T20:14:23.028759 | 2018-09-18T19:45:32 | 2018-09-18T19:45:32 | 149,051,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,845 |
py
|
# VARIABLE
age = 21
name="Lisa"
# print("Hello my name is {} and I am {} years old".format(name, age))
if age < 25:
print('You are young')
def hello(name, age):
return 'Hello {} you are {} years old'.format(name, age)
sentence = hello('Lisa', 21)
print(sentence)
# LIST
dognames = ["Fido", "Sean", "Sally", "Mark"]
dognames.insert(0, "Jane")
print(dognames)
print(dognames[2])
print(len(dognames))
dognames[1] = "Lilo"
print(dognames)
# FOR LOOP
for dog in dognames:
print(dog)
for x in range(1,10):
print(x)
age = 0
# WHILE LOOP
while age < 18:
print(age)
age += 1
numbers = [76, 83, 16, 69, 52, 78, 10, 77, 45, 52, 32, 17, 58, 54, 79, 72, 55, 50, 81, 74, 45, 33, 38, 10, 40, 44, 70, 81, 79, 28, 83, 41, 14, 16, 27, 38, 20, 84, 24, 50, 59, 71, 1, 13, 56, 91, 29, 54, 65, 23, 60, 57, 13, 39, 58, 94, 94, 42, 46, 58, 59, 29, 69, 60, 83, 9, 83, 5, 64, 70, 55, 89, 67, 89, 70, 8, 90, 17, 48, 17, 94, 18, 98, 72, 96, 26, 13, 7, 58, 67, 38, 48, 43, 98, 65, 8, 74, 44, 92]
for number in numbers:
if number > 90:
print(number)
# LIBRARY
dogs = {"Fido":8, "Sally":17, "Sean":2}
print(dogs["Sally"])
dogs["Sarah"] = 6
print(dogs)
# CLASS
class Dog:
dogInfo = "Hey dogs are cool!"
def bark(self, str): # self as the first parameter then you can add something for the second parameter
print('BARK!' + str)
mydog = Dog()
mydog.bark("bark bark bark bark")
mydog.name = "Fido"
mydog.age = 16
print(mydog.name)
print(mydog.age)
Dog.dogInfo = "Hey there"
print(Dog.dogInfo)
class Horse:
def __init__(self, name, age, furcolor): # you can call 'self' anything as long as it's the same everywhere
self.name = name
self.age = age
self.furcolor = furcolor
def neigh(self):
print("NEIGH!")
myhorse = Horse("Joker", 7, "Brown")
print(myhorse.age)
|
[
"olson.lisa94@gmail.com"
] |
olson.lisa94@gmail.com
|
a6bec2e6e27892e9e71ddf65399e880ac78e4293
|
f00ad57c98e554470a72511dda7a7bfd160aca19
|
/linear_structure/stack/infix_expression_ui.py
|
5a43f72bf018cb2935ef1500165bb647398f3437
|
[] |
no_license
|
fanzhangg/algorithm-problems
|
d60115210aaaffcd094b34b9db5b46dadf93fe9e
|
43b111ad625f197ba0905abceab9ee4484284e08
|
refs/heads/master
| 2021-07-12T20:24:46.265700 | 2020-07-06T17:58:31 | 2020-07-06T17:58:31 | 171,220,135 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
from infix_expression import *
def eval_infix_input():
while True:
infix = input(">>>")
if not infix:
print("Session ended. Thanks for using!")
break
try:
postfix = infix_to_postfix(infix)
answer = eval_postfix(postfix)
if int(answer) == answer:
answer = int(answer)
print(answer)
except SyntaxError:
print("Invalid syntax!")
if __name__ == "__main__":
eval_infix_input()
|
[
"vanadiumzhang@gmail.com"
] |
vanadiumzhang@gmail.com
|
7ed072fa1524c95c0ada3f899e91a7dcbcfd91de
|
9897061cfd34babf80616ff21a20c30db0212970
|
/server/account/models.py
|
a01557b1bd74e7b11b8ff7b13401a7a631636ebe
|
[
"MIT"
] |
permissive
|
Samhaina/mahjong-portal
|
f310553c5df13e122f3e89d05a9867d0f122d4f1
|
4cdbd8bd61655584c25a437b3d5cab053507b2f4
|
refs/heads/master
| 2020-03-16T22:10:20.864718 | 2018-10-11T00:45:22 | 2018-10-11T00:45:22 | 133,029,373 | 0 | 0 | null | 2018-05-11T11:05:41 | 2018-05-11T11:05:41 | null |
UTF-8
|
Python
| false | false | 287 |
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from tournament.models import Tournament
class User(AbstractUser):
is_tournament_manager = models.BooleanField(default=False)
managed_tournaments = models.ManyToManyField(Tournament, blank=True)
|
[
"lisikhin@gmail.com"
] |
lisikhin@gmail.com
|
6631cd057d686d0a0d7c910975132247c9c16828
|
4e30c855c253cc1d972d29e83edb9d5ef662d30a
|
/approval/models/returns.py
|
fc4920552b9ab0a32ad1d864ac946c3732809dab
|
[
"MIT"
] |
permissive
|
rajeshr188/django-onex
|
8b531fc2f519d004d1da64f87b10ffacbd0f2719
|
0a190ca9bcf96cf44f7773686205f2c1f83f3769
|
refs/heads/master
| 2023-08-21T22:36:43.898564 | 2023-08-15T12:08:24 | 2023-08-15T12:08:24 | 163,012,755 | 2 | 0 |
NOASSERTION
| 2023-07-22T09:47:28 | 2018-12-24T17:46:35 |
Python
|
UTF-8
|
Python
| false | false | 3,919 |
py
|
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models, transaction
from django.db.models import Sum
from django.urls import reverse
from approval.models import ApprovalLine
from contact.models import Customer
from dea.models import Journal, JournalTypes
from product.models import StockLot
"""
When an approval voucher is created, the stock items that are being approved for release to a contact should be recorded in the database or inventory management system, along with the contact's information.
When the approved stock items are released to the contact, they should be recorded as being moved out of the approval area and into the possession of the contact.
If the contact returns some or all of the approved stock items, those items should be recorded as being returned to the approval area.
When the approval is complete and all approved stock items have been returned, the approval should be closed.
If any stock items were approved for release but not returned, those items should be flagged for invoicing.
When the invoice is created, the stock items that were approved but not returned should be included on the invoice, along with the appropriate billing information.
If any changes are made to the approval, return, or invoice, those changes should be recorded in the database or inventory management system, along with a timestamp and the user who made the changes.
"""
# Create your models here.
class Return(models.Model):
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
created_by = models.ForeignKey(
"users.CustomUser", on_delete=models.CASCADE, null=True, blank=True
)
contact = models.ForeignKey(
Customer, related_name="approval_returns", on_delete=models.CASCADE
)
total_wt = models.DecimalField(max_digits=10, decimal_places=3, default=0)
total_qty = models.IntegerField(default=0)
posted = models.BooleanField(default=False)
def __str__(self):
return f"Return #{self.id}"
def get_absolute_url(self):
return reverse("approval:approval_return_detail", args=(self.pk,))
def get_total_qty(self):
return self.returnitem_set.aggregate(t=Sum("quantity"))["t"]
def get_total_wt(self):
return self.returnitem_set.aggregate(t=Sum("weight"))["t"]
class ReturnItem(models.Model):
return_obj = models.ForeignKey(Return, on_delete=models.CASCADE)
line_item = models.ForeignKey(
ApprovalLine, on_delete=models.CASCADE, related_name="return_items"
)
quantity = models.IntegerField(default=0)
weight = models.DecimalField(max_digits=10, decimal_places=3, default=0.0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
journal = GenericRelation(Journal, related_query_name="approval_returnitem")
def __str__(self):
return f"{self.quantity} x {self.line_item.product}"
def get_absolute_url(self):
return reverse("approval:approval_returnitem_detail", args=(self.pk,))
def get_hx_edit_url(self):
kwargs = {"return_pk": self.return_obj.id, "pk": self.pk}
return reverse("approval:approval_returnitem_update", kwargs=kwargs)
def create_journal(self):
return Journal.objects.create(
journal_type=JournalTypes.SJ,
desc="Approval Return",
content_object=self,
)
def get_journal(self):
return self.journal.first()
@transaction.atomic
def post(self, journal):
self.line_item.product.transact(self.weight, self.quantity, journal, "AR")
self.line_item.update_status()
@transaction.atomic
def unpost(self, journal):
self.line_item.product.transact(self.weight, self.quantity, journal, "A")
self.line_item.update_status()
|
[
"rajeshrathodh@gmail.com"
] |
rajeshrathodh@gmail.com
|
b4e8b03b8387462c961ea36f580a145007ada11a
|
38b68b2202726bcdea32271448fea22554db6121
|
/BOJ/Silver/1992.py
|
3b0a539d575b9951914cdb95f3dbd52b1b69e1cd
|
[] |
no_license
|
Soohee410/Algorithm-in-Python
|
42c4f02342dc922e44ee07e3a0e1d6c0a559e0bb
|
fbc859c092d86174387fe3dc11f16b616e6fdfab
|
refs/heads/master
| 2023-05-06T13:07:19.179143 | 2021-05-14T14:32:44 | 2021-05-14T14:32:44 | 336,232,129 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 495 |
py
|
def QuadTree(n, cp, x, y):
if n == 1:
return cp[x][y]
cp1 = QuadTree(n // 2, cp, x, y)
cp2 = QuadTree(n // 2, cp, x, y + n // 2)
cp3 = QuadTree(n // 2, cp, x + n // 2, y)
cp4 = QuadTree(n // 2, cp, x + n // 2, y + n // 2)
if cp1 == cp2 == cp3 == cp4 and len(cp1) == 1:
return cp1
return '('+cp1+cp2+cp3+cp4+')'
if __name__ == "__main__":
n = int(input())
arr = [list(input().rstrip()) for _ in range(n)]
print(QuadTree(n, arr, 0, 0))
|
[
"ggohee0410@gmail.com"
] |
ggohee0410@gmail.com
|
0f3cc4a2087d8125cc761a1644c51c12e6c814d4
|
d838bed08a00114c92b73982a74d96c15166a49e
|
/docs/data/learn/Bioinformatics/output/ch6_code/src/Stepik.6.9.CodeChallenge.2BreakDistance.py
|
a9ce5254b6d1201e2c2202e7b13a59eeda40ae42
|
[] |
no_license
|
offbynull/offbynull.github.io
|
4911f53d77f6c59e7a453ee271b1e04e613862bc
|
754a85f43159738b89dd2bde1ad6ba0d75f34b98
|
refs/heads/master
| 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
py
|
from BreakpointGraph import BreakpointGraph
with open('/home/user/Downloads/dataset_240324_4.txt', mode='r', encoding='utf-8') as f:
data = f.read()
lines = data.split('\n')
p_list1 = [[int(x) for x in s.split(' ')] for s in lines[0][1:-1].split(')(')]
p_list2 = [[int(x) for x in s.split(' ')] for s in lines[1][1:-1].split(')(')]
bg = BreakpointGraph(p_list1, p_list2)
cycles = bg.get_red_blue_cycles()
block_count = len(bg.node_to_blue_edges) // 2 # number of synteny blocks is number of nodes / 2
cycle_count = len(cycles)
print(f'{block_count - cycle_count}')
|
[
"offbynull@gmail.com"
] |
offbynull@gmail.com
|
35c792e078f9037cf38a3a3bd992d3b7bee00e0d
|
de17634e6b149d5828c1c78f7f5f5e1f6c17c4d0
|
/nnvm/amalgamation/amalgamation.py
|
310daa9d68e0e2cd33876364a3e4533f23cc45b5
|
[
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
starimpact/mxnet_v1.0.0
|
e135cc9e4c2711314d03cf1281a72b755f53144e
|
fcd6f7398ef811c3f8b01e7c9c16fb25c8d202bd
|
refs/heads/bv1.0.0
| 2022-11-10T09:09:11.966942 | 2018-07-13T04:59:30 | 2018-07-13T04:59:30 | 120,399,107 | 8 | 4 |
Apache-2.0
| 2022-11-02T20:24:32 | 2018-02-06T03:54:35 |
C++
|
UTF-8
|
Python
| false | false | 2,628 |
py
|
import sys
import os.path, re, StringIO
blacklist = [
'Windows.h',
'mach/clock.h', 'mach/mach.h',
'malloc.h',
'glog/logging.h', 'io/azure_filesys.h', 'io/hdfs_filesys.h', 'io/s3_filesys.h',
'sys/stat.h', 'sys/types.h',
'omp.h', 'execinfo.h', 'packet/sse-inl.h'
]
def get_sources(def_file):
sources = []
files = []
visited = set()
mxnet_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
for line in open(def_file):
files = files + line.strip().split(' ')
for f in files:
f = f.strip()
if not f or f.endswith('.o:') or f == '\\': continue
fn = os.path.relpath(f)
if os.path.abspath(f).startswith(mxnet_path) and fn not in visited:
sources.append(fn)
visited.add(fn)
return sources
sources = get_sources(sys.argv[1])
def find_source(name, start):
candidates = []
for x in sources:
if x == name or x.endswith('/' + name): candidates.append(x)
if not candidates: return ''
if len(candidates) == 1: return candidates[0]
for x in candidates:
if x.split('/')[1] == start.split('/')[1]: return x
return ''
re1 = re.compile('<([./a-zA-Z0-9_-]*)>')
re2 = re.compile('"([./a-zA-Z0-9_-]*)"')
sysheaders = []
history = set([])
out = StringIO.StringIO()
def expand(x, pending):
if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes
return
if x in pending:
#print 'loop found: %s in ' % x, pending
return
print >>out, "//===== EXPANDING: %s =====\n" %x
for line in open(x):
if line.find('#include') < 0:
out.write(line)
continue
if line.strip().find('#include') > 0:
print line
continue
m = re1.search(line)
if not m: m = re2.search(line)
if not m:
print line + ' not found'
continue
h = m.groups()[0].strip('./')
source = find_source(h, x)
if not source:
if (h not in blacklist and
h not in sysheaders and
'mkl' not in h and
'nnpack' not in h): sysheaders.append(h)
else:
expand(source, pending + [x])
print >>out, "//===== EXPANDED: %s =====\n" %x
history.add(x)
expand(sys.argv[2], [])
f = open(sys.argv[3], 'wb')
for k in sorted(sysheaders):
print >>f, "#include <%s>" % k
print >>f, ''
print >>f, out.getvalue()
for x in sources:
if x not in history and not x.endswith('.o'):
print 'Not processed:', x
|
[
"mingzhang@deepglint.com"
] |
mingzhang@deepglint.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.