hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1eb050f5f78a88cdcb901e8404776450cbd1dd6 | 1,131 | py | Python | app_venv/Lib/site-packages/phonenumbers/data/region_QA.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | app_venv/Lib/site-packages/phonenumbers/data/region_QA.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | app_venv/Lib/site-packages/phonenumbers/data/region_QA.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | """Auto-generated file, do not edit by hand. QA metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_QA = PhoneMetadata(id='QA', country_code=974, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-7]\\d{7}|800\\d{4}(?:\\d{2})?|2\\d{6}', possible_length=(7, 8, 9)),
fixed_line=PhoneNumberDesc(national_number_pattern='4141\\d{4}|(?:23|4[04])\\d{6}', example_number='44123456', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:2[89]|[35-7]\\d)\\d{6}', example_number='33123456', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{4}(?:\\d{2})?', example_number='8001234', possible_length=(7, 9)),
pager=PhoneNumberDesc(national_number_pattern='2(?:[12]\\d|61)\\d{4}', example_number='2123456', possible_length=(7,)),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['2[126]|8']),
NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[2-7]'])],
mobile_number_portable_region=True)
| 87 | 137 | 0.695844 |
62b3398585bb7fef71d09b4a2e397e97e51ecc0e | 344 | py | Python | slider.py | RyderTheCoder/gui-learning | d92636fe4c31bdfd8752ccdd3a3f1dd3e9edb2cd | [
"Unlicense"
] | null | null | null | slider.py | RyderTheCoder/gui-learning | d92636fe4c31bdfd8752ccdd3a3f1dd3e9edb2cd | [
"Unlicense"
] | null | null | null | slider.py | RyderTheCoder/gui-learning | d92636fe4c31bdfd8752ccdd3a3f1dd3e9edb2cd | [
"Unlicense"
] | null | null | null | from Tkinter import *
def sel():
selection = "Value = " + str(var.get())
label.config(text = selection)
root = Tk()
var = DoubleVar()
scale = Scale( root, variable = var )
scale.pack(anchor=CENTER)
button = Button(root, text="Get Scale Value", command=sel)
button.pack(anchor=CENTER)
label = Label(root)
label.pack()
root.mainloop()
| 18.105263 | 58 | 0.680233 |
096f60818786251bd1817e13a1958f2915d6b642 | 87 | py | Python | conf.py | ziozzang/clair-generic-scanner | f7ea4c67cfa13af751bb79d241a5ae2b19edcc50 | [
"BSD-2-Clause"
] | null | null | null | conf.py | ziozzang/clair-generic-scanner | f7ea4c67cfa13af751bb79d241a5ae2b19edcc50 | [
"BSD-2-Clause"
] | null | null | null | conf.py | ziozzang/clair-generic-scanner | f7ea4c67cfa13af751bb79d241a5ae2b19edcc50 | [
"BSD-2-Clause"
] | null | null | null | DB_IP='pgsql'
DB_PORT='5432'
DB_ID='postgres'
DB_PW=''
BIND_ADDR="0.0.0.0"
DEBUG=False
| 12.428571 | 19 | 0.712644 |
57d772f0eadb4e3818bfc3378535fb2ec2401aeb | 7,056 | py | Python | gtfs_data/database_test.py | seanrees/gtfs-upcoming | 76ebff4bbe1bd49bc598dfe0840792dfad500ae5 | [
"MIT"
] | 5 | 2020-09-08T20:38:46.000Z | 2022-03-31T18:24:31.000Z | gtfs_data/database_test.py | seanrees/gtfs-upcoming | 76ebff4bbe1bd49bc598dfe0840792dfad500ae5 | [
"MIT"
] | null | null | null | gtfs_data/database_test.py | seanrees/gtfs-upcoming | 76ebff4bbe1bd49bc598dfe0840792dfad500ae5 | [
"MIT"
] | 1 | 2022-03-31T18:24:32.000Z | 2022-03-31T18:24:32.000Z | import gtfs_data.database
import datetime
import unittest
TEST_FILE = 'gtfs_data/testdata/agency.txt'
GTFS_DATA = 'gtfs_data/testdata'
INTERESTING_STOPS = ['8220DB000490']
class TestDatabase(unittest.TestCase):
def setUp(self):
self.database = gtfs_data.database.Database(GTFS_DATA, INTERESTING_STOPS)
def test_Load(self):
data = self.database._Load('agency.txt')
self.assertEqual(len(data), 4)
def test_Collect(self):
data = [
{'a': 'one', 'b': 200},
{'a': 'one', 'b': 300},
{'a': 'two', 'b': 400},
]
c = self.database._Collect(data, 'a')
self.assertEqual(c, {
'one': {'a': 'one', 'b': 300},
'two': {'a': 'two', 'b': 400}})
c = self.database._Collect(data, 'a', multi=True)
self.assertEqual(c, {
'one': [{'a': 'one', 'b': 200}, {'a': 'one', 'b': 300}],
'two': [{'a': 'two', 'b': 400}]})
def testGetTrip(self):
self.database.Load()
found = self.database.GetTrip('1167')
self.assertIsNotNone(found)
self.assertEqual(found.trip_headsign, 'Loughlinstown Wood Estate - Mountjoy Square Nth')
notfound = self.database.GetTrip('1168')
self.assertIsNone(notfound)
def testLoad(self):
self.database.Load()
trips = {
'1167': {
'direction_id': '1',
'trip_headsign': 'Loughlinstown Wood Estate - Mountjoy Square Nth',
'route_short_name': '7A',
'num_stop_times': 64,
},
'1169': {
'direction_id': '1',
'trip_headsign': 'Bride\'s Glen Bus Stop - Mountjoy Square Nth',
'route_short_name': '7',
'num_stop_times': 56
}
}
self.assertEqual(len(self.database._trip_db.keys()), 2)
for unused, t in self.database._trip_db.items():
self.assertIn(t.trip_id, trips.keys())
data = trips[t.trip_id]
self.assertEqual(t.direction_id, data['direction_id'])
self.assertEqual(t.trip_headsign, data['trip_headsign'])
self.assertIsNotNone(t.route)
self.assertEqual(t.route['route_short_name'], data['route_short_name'])
self.assertIsNotNone(t.stop_times)
self.assertEqual(len(t.stop_times), data['num_stop_times'])
def testLoadAll(self):
database = gtfs_data.database.Database(GTFS_DATA, [])
database.Load()
self.assertEqual(database._trip_db.keys(), set(['1167', '1168', '1169', 'ONIGHT']))
def testGetScheduledFor(self):
database = gtfs_data.database.Database(GTFS_DATA, [])
database.Load()
stop_id = INTERESTING_STOPS[0]
start = datetime.datetime(2020, 11, 19, 7, 30, 00)
stop = datetime.datetime(2020, 11, 19, 8, 30, 00)
resp = database.GetScheduledFor(stop_id, start, stop)
# Note: GetScheduledFor sorts on arrival time; so the order here is
# predictable.
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].trip_id, '1167')
self.assertEqual(resp[1].trip_id, '1169')
# This trip's schedule has no exceptions; ensure we don't error
# out loading it. Note: the stop id below is not in INTERESTING_STOPS
# so we don't get it by default from setUp().
stop_id = '8220DB000819'
start = datetime.datetime(2020, 11, 19, 20, 00, 00)
stop = datetime.datetime(2020, 11, 19, 21, 00, 00)
resp = database.GetScheduledFor(stop_id, start, stop)
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0].trip_id, '1168')
def testGetScheduledForOvernightRoutes(self):
"""Test schedule generation for routes that span days"""
database = gtfs_data.database.Database(GTFS_DATA, [])
database.Load()
stop_id = 'ONIGHT-STOP2'
start = datetime.datetime(2020, 11, 19, 23, 00, 00)
stop = datetime.datetime(2020, 11, 20, 2, 00, 00)
resp = database.GetScheduledFor(stop_id, start, stop)
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0].trip_id, 'ONIGHT')
start = datetime.datetime(2020, 11, 20, 0, 00, 00)
stop = datetime.datetime(2020, 11, 20, 2, 00, 00)
resp = database.GetScheduledFor(stop_id, start, stop)
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0].trip_id, 'ONIGHT')
start = datetime.datetime(2020, 11, 18, 23, 00, 00)
stop = datetime.datetime(2020, 11, 20, 2, 00, 00)
resp = database.GetScheduledFor(stop_id, start, stop)
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].trip_id, 'ONIGHT')
self.assertEqual(resp[0].trip_id, 'ONIGHT')
def testGetScheduledForInvalids(self):
self.database.Load()
start = datetime.datetime(2020, 11, 20, 0, 00, 00)
stop = datetime.datetime(2020, 11, 20, 2, 00, 00)
# Invalid stop.
resp = self.database.GetScheduledFor("foo", start, stop)
self.assertEqual(len(resp), 0)
# Invalid times.
self.assertRaises(ValueError, self.database.GetScheduledFor,
INTERESTING_STOPS[0], stop, start)
def testGetScheduledForExceptions(self):
self.database.Load()
# We have an exception for this date ("no service").
stop_id = INTERESTING_STOPS[0]
start = datetime.datetime(2020, 11, 26, 7, 30, 00)
stop = datetime.datetime(2020, 11, 26, 8, 30, 00)
resp = self.database.GetScheduledFor(stop_id, start, stop)
self.assertEqual(len(resp), 0)
# We have an exception for this date ("added service").
stop_id = INTERESTING_STOPS[0]
start = datetime.datetime(2020, 11, 27, 7, 30, 00)
stop = datetime.datetime(2020, 11, 27, 8, 30, 00)
resp = self.database.GetScheduledFor(stop_id, start, stop)
self.assertEqual(len(resp), 2)
def testIsValidServiceDay(self):
database = gtfs_data.database.Database(GTFS_DATA, [])
database.Load()
# The exceptions only apply to trips 1167 and 1169. Trip 1168 has no exceptions
# but we should check to make sure it still behaves normally.
removed_service_date = datetime.date(2020, 11, 26)
self.assertFalse(database._IsValidServiceDay(removed_service_date, '1167'))
self.assertFalse(database._IsValidServiceDay(removed_service_date, '1169'))
self.assertTrue(database._IsValidServiceDay(removed_service_date, '1168'))
added_service_date = datetime.date(2020, 11, 27)
self.assertTrue(database._IsValidServiceDay(added_service_date, '1167'))
self.assertTrue(database._IsValidServiceDay(added_service_date, '1169'))
self.assertTrue(database._IsValidServiceDay(added_service_date, '1168'))
normal_service_date = datetime.date(2020, 11, 19)
self.assertTrue(database._IsValidServiceDay(normal_service_date, '1167'))
self.assertTrue(database._IsValidServiceDay(normal_service_date, '1169'))
self.assertTrue(database._IsValidServiceDay(normal_service_date, '1168'))
normal_no_service_date = datetime.date(2020, 11, 28)
self.assertFalse(database._IsValidServiceDay(normal_no_service_date, '1167'))
self.assertFalse(database._IsValidServiceDay(normal_no_service_date, '1169'))
self.assertFalse(database._IsValidServiceDay(normal_no_service_date, '1168'))
def testNumberOfDays(self):
self.assertEqual(len(gtfs_data.database.CALENDAR_DAYS), 7)
if __name__ == '__main__':
unittest.main()
| 36.371134 | 92 | 0.68013 |
4dfb786d497ae53ffc6e76a24bd7f6b32676cdf0 | 12,565 | py | Python | pysnmp/Wellfleet-QOS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/Wellfleet-QOS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/Wellfleet-QOS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Wellfleet-QOS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-QOS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:34:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, Counter32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, TimeTicks, iso, Unsigned32, Gauge32, Bits, IpAddress, ObjectIdentity, Counter64, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Counter32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "TimeTicks", "iso", "Unsigned32", "Gauge32", "Bits", "IpAddress", "ObjectIdentity", "Counter64", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
wfServicePkgGroup, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfServicePkgGroup")
wfQosServPkgTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1), )
if mibBuilder.loadTexts: wfQosServPkgTable.setStatus('mandatory')
wfQosServPkgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1, 1), ).setIndexNames((0, "Wellfleet-QOS-MIB", "wfQosServPkgIndex"))
if mibBuilder.loadTexts: wfQosServPkgEntry.setStatus('mandatory')
wfQosServPkgDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgDelete.setStatus('mandatory')
wfQosServPkgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQosServPkgIndex.setStatus('mandatory')
wfQosServPkgServiceName = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgServiceName.setStatus('mandatory')
wfQosServPkgScheduling = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("round-robin", 1), ("strict-priority", 2))).clone('round-robin')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgScheduling.setStatus('mandatory')
wfQosServPkgNumQueues = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQosServPkgNumQueues.setStatus('mandatory')
wfQosServPkgNumLines = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQosServPkgNumLines.setStatus('mandatory')
wfQosServPkgQueCfgTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2), )
if mibBuilder.loadTexts: wfQosServPkgQueCfgTable.setStatus('mandatory')
wfQosServPkgQueCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1), ).setIndexNames((0, "Wellfleet-QOS-MIB", "wfQosServPkgQueCfgServiceIndex"), (0, "Wellfleet-QOS-MIB", "wfQosServPkgQueCfgQueueIndex"))
if mibBuilder.loadTexts: wfQosServPkgQueCfgEntry.setStatus('mandatory')
wfQosServPkgQueCfgDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgDelete.setStatus('mandatory')
wfQosServPkgQueCfgServiceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQosServPkgQueCfgServiceIndex.setStatus('mandatory')
wfQosServPkgQueCfgQueueIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQosServPkgQueCfgQueueIndex.setStatus('mandatory')
wfQosServPkgQueCfgQueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgQueueName.setStatus('mandatory')
wfQosServPkgQueCfgState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("waitPkg", 2), ("misCfg", 3))).clone('waitPkg')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQosServPkgQueCfgState.setStatus('mandatory')
wfQosServPkgQueCfgClass = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)).clone(7)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgClass.setStatus('mandatory')
wfQosServPkgQueCfgAcctRule = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgAcctRule.setStatus('mandatory')
wfQosServPkgQueCfgRxCommitInfoRate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1536))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgRxCommitInfoRate.setStatus('mandatory')
wfQosServPkgQueCfgRxBurstRate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1536))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgRxBurstRate.setStatus('mandatory')
wfQosServPkgQueCfgRxBurstSize = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 10), Integer32().clone(8000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgRxBurstSize.setStatus('mandatory')
wfQosServPkgQueCfgRxBurstAction = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("downgrade", 2), ("mark", 3), ("mark-downgrade", 4))).clone('none')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgRxBurstAction.setStatus('mandatory')
wfQosServPkgQueCfgTxDropThresh = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("percent-83", 2), ("percent-66", 3), ("percent-50", 4))).clone('none')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgTxDropThresh.setStatus('mandatory')
wfQosServPkgQueCfgTxWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 13), Integer32().clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfQosServPkgQueCfgTxWeight.setStatus('mandatory')
wfQosServPkgQueCfgTxActualWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 2, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQosServPkgQueCfgTxActualWeight.setStatus('mandatory')
wfQueueStatTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3), )
if mibBuilder.loadTexts: wfQueueStatTable.setStatus('mandatory')
wfQueueStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1), ).setIndexNames((0, "Wellfleet-QOS-MIB", "wfQueueStatPortLineNumber"), (0, "Wellfleet-QOS-MIB", "wfQueueStatLineIndex"), (0, "Wellfleet-QOS-MIB", "wfQueueStatQueueIndex"))
if mibBuilder.loadTexts: wfQueueStatEntry.setStatus('mandatory')
wfQueueStatPortLineNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatPortLineNumber.setStatus('mandatory')
wfQueueStatLineIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatLineIndex.setStatus('mandatory')
wfQueueStatQueueIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatQueueIndex.setStatus('mandatory')
wfQueueStatTxOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatTxOctets.setStatus('mandatory')
wfQueueStatTxPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatTxPackets.setStatus('mandatory')
wfQueueStatTxDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatTxDrops.setStatus('mandatory')
wfQueueStatRxBelowCirOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatRxBelowCirOctets.setStatus('mandatory')
wfQueueStatRxBelowCirPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatRxBelowCirPackets.setStatus('mandatory')
wfQueueStatRxAboveCirOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatRxAboveCirOctets.setStatus('mandatory')
wfQueueStatRxAboveCirPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatRxAboveCirPackets.setStatus('mandatory')
wfQueueStatRxAboveBrOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatRxAboveBrOctets.setStatus('mandatory')
wfQueueStatRxAboveBrPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 3, 2, 23, 1, 3, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfQueueStatRxAboveBrPackets.setStatus('mandatory')
mibBuilder.exportSymbols("Wellfleet-QOS-MIB", wfQosServPkgQueCfgTable=wfQosServPkgQueCfgTable, wfQueueStatTxDrops=wfQueueStatTxDrops, wfQosServPkgNumQueues=wfQosServPkgNumQueues, wfQosServPkgQueCfgRxBurstSize=wfQosServPkgQueCfgRxBurstSize, wfQosServPkgDelete=wfQosServPkgDelete, wfQueueStatRxAboveCirPackets=wfQueueStatRxAboveCirPackets, wfQosServPkgQueCfgRxCommitInfoRate=wfQosServPkgQueCfgRxCommitInfoRate, wfQueueStatPortLineNumber=wfQueueStatPortLineNumber, wfQosServPkgTable=wfQosServPkgTable, wfQosServPkgQueCfgServiceIndex=wfQosServPkgQueCfgServiceIndex, wfQueueStatTxPackets=wfQueueStatTxPackets, wfQosServPkgQueCfgDelete=wfQosServPkgQueCfgDelete, wfQosServPkgNumLines=wfQosServPkgNumLines, wfQueueStatRxAboveBrOctets=wfQueueStatRxAboveBrOctets, wfQosServPkgQueCfgClass=wfQosServPkgQueCfgClass, wfQosServPkgEntry=wfQosServPkgEntry, wfQosServPkgQueCfgQueueName=wfQosServPkgQueCfgQueueName, wfQueueStatRxAboveCirOctets=wfQueueStatRxAboveCirOctets, wfQosServPkgScheduling=wfQosServPkgScheduling, wfQosServPkgQueCfgAcctRule=wfQosServPkgQueCfgAcctRule, wfQosServPkgQueCfgTxWeight=wfQosServPkgQueCfgTxWeight, wfQosServPkgServiceName=wfQosServPkgServiceName, wfQosServPkgQueCfgEntry=wfQosServPkgQueCfgEntry, wfQosServPkgQueCfgQueueIndex=wfQosServPkgQueCfgQueueIndex, wfQueueStatQueueIndex=wfQueueStatQueueIndex, wfQueueStatRxBelowCirOctets=wfQueueStatRxBelowCirOctets, wfQosServPkgQueCfgRxBurstAction=wfQosServPkgQueCfgRxBurstAction, wfQosServPkgQueCfgTxDropThresh=wfQosServPkgQueCfgTxDropThresh, wfQueueStatRxBelowCirPackets=wfQueueStatRxBelowCirPackets, wfQueueStatTable=wfQueueStatTable, wfQosServPkgIndex=wfQosServPkgIndex, wfQueueStatTxOctets=wfQueueStatTxOctets, wfQueueStatLineIndex=wfQueueStatLineIndex, wfQosServPkgQueCfgRxBurstRate=wfQosServPkgQueCfgRxBurstRate, wfQueueStatEntry=wfQueueStatEntry, wfQueueStatRxAboveBrPackets=wfQueueStatRxAboveBrPackets, wfQosServPkgQueCfgState=wfQosServPkgQueCfgState, wfQosServPkgQueCfgTxActualWeight=wfQosServPkgQueCfgTxActualWeight)
| 136.576087 | 1,981 | 0.771269 |
b4ed4c572c624bcede20af8b2a24eff016a690b2 | 1,372 | py | Python | 9-6.py | Holaplace/path_to_python | 8fae2aca8d6da04c39a67514948fdf50e883750a | [
"MIT"
] | 1 | 2019-02-06T01:49:18.000Z | 2019-02-06T01:49:18.000Z | 9-6.py | Holaplace/path_to_python | 8fae2aca8d6da04c39a67514948fdf50e883750a | [
"MIT"
] | null | null | null | 9-6.py | Holaplace/path_to_python | 8fae2aca8d6da04c39a67514948fdf50e883750a | [
"MIT"
] | null | null | null | class Restaurant():
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
self.count = 10
def describe_restaurant(self):
print('The restaurant name is '
+ self.restaurant_name.title()
+ ', and the cuisine type is '
+ self.cuisine_type.title())
def open_restaurant(self):
print(self.restaurant_name.title() + ' is opening.')
def number_served(self):
print("This restaurant has " + str(self.count) + " people been here.")
def set_number_served(self,num):
self.count = num
print("This restaurant has " + str(self.count) + " people been here.")
def increment_number_served(self,num_increment):
self.count += num_increment
print("This restaurant has " + str(self.count) + " people (new) been here.")
class IceCreamStand(Restaurant):
def __init__(self, restaurant_name, cuisine_type):
super().__init__(restaurant_name, cuisine_type)
self.flavors = ['me', 'you', 'them']
def flavor(self):
for flavor in self.flavors:
print('We have these kinds: ' + flavor)
res = IceCreamStand('me', 'chinese')
res.describe_restaurant()
res.increment_number_served(80)
res.flavor()
| 31.181818 | 85 | 0.620262 |
a9f1d66e9ad3970c857e9174aa855a587096b24c | 17,586 | py | Python | scipy/signal/tests/test_spectral.py | jiffyclub/scipy | e346aa55c0416b915148c35cc200a0ed74f85c0a | [
"BSD-3-Clause"
] | 1 | 2019-04-27T16:04:14.000Z | 2019-04-27T16:04:14.000Z | scipy/signal/tests/test_spectral.py | joferkington/scipy | 6a7327e8bb8248b2ea165180bc602edf1ab33dda | [
"BSD-3-Clause"
] | 5 | 2021-03-19T08:36:48.000Z | 2022-01-13T01:52:34.000Z | scipy/signal/tests/test_spectral.py | joferkington/scipy | 6a7327e8bb8248b2ea165180bc602edf1ab33dda | [
"BSD-3-Clause"
] | 1 | 2019-08-13T21:23:57.000Z | 2019-08-13T21:23:57.000Z | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_approx_equal, \
assert_, run_module_suite, TestCase,\
assert_allclose, assert_array_equal,\
assert_array_almost_equal_nulp, dec
from scipy import signal, fftpack
from scipy.lib._version import NumpyVersion
from scipy.signal import periodogram, welch, lombscargle
class TestPeriodogram(TestCase):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_odd(self):
x = np.zeros(15)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q[-1] /= 2.0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, scaling='spectrum')
g, q = periodogram(x, scaling='density')
assert_allclose(f, np.linspace(0, 0.5, 9))
assert_allclose(p, q/16.0)
def test_integer_even(self):
x = np.zeros(16, dtype=np.int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_integer_odd(self):
x = np.zeros(15, dtype=np.int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q[-1] /= 2.0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_integer_twosided(self):
x = np.zeros(16, dtype=np.int)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
f, p = periodogram(x)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = 5.0*np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_unk_scaling(self):
assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),
scaling='foo')
def test_nd_axis_m1(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((2,1,10))
x[:,:,0] = 1.0
f, p = periodogram(x)
assert_array_equal(p.shape, (2, 1, 6))
assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)
f0, p0 = periodogram(x[0,0,:])
assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
def test_nd_axis_0(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((10,2,1))
x[0,:,:] = 1.0
f, p = periodogram(x, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)
f0, p0 = periodogram(x[:,0,0])
assert_array_almost_equal_nulp(p0, p[:,1,0])
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, 10, 'hanning')
win = signal.get_window('hanning', 16)
fe, pe = periodogram(x, 10, win)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
def test_padded_fft(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
fp, pp = periodogram(x, nfft=32)
assert_allclose(f, fp[::2])
assert_allclose(p, pp[::2])
assert_array_equal(pp.shape, (17,))
def test_empty_input(self):
f, p = periodogram([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_nfft(self):
x = np.zeros(18)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_nfft_is_xshape(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9, 'f')
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(15, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8, 'f')
q[0] = 0
q[-1] /= 2.0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-7)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16, 'f')/16.0
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
f, p = periodogram(x)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = 5.0*np.ones(16, 'f')/16.0
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
class TestWelch(TestCase):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
assert_allclose(p, np.array([0.08333333, 0.15277778, 0.22222222,
0.22222222, 0.11111111]))
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
assert_allclose(p, np.array([0.15958226, 0.24193954, 0.24145223,
0.24100919, 0.12188675]))
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
assert_allclose(p, np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889]))
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
assert_allclose(p, np.array([0.015625, 0.028645833333333332,
0.041666666666666664, 0.041666666666666664, 0.020833333333333332]))
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=np.int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
assert_allclose(p, np.array([0.08333333, 0.15277778, 0.22222222,
0.22222222, 0.11111111]))
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=np.int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
assert_allclose(p, np.array([0.15958226, 0.24193954, 0.24145223,
0.24100919, 0.12188675]))
def test_integer_twosided(self):
x = np.zeros(16, dtype=np.int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
assert_allclose(p, np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889]))
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
assert_allclose(p, np.array([0.41666667, 0.38194444, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.38194444]))
def test_unk_scaling(self):
assert_raises(ValueError, welch, np.zeros(4, np.complex128),
scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = welch(x, nperseg=10, detrend=False)
f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = welch(x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = welch(x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = welch(x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, 10, 'hanning', 8)
win = signal.get_window('hanning', 8)
fe, pe = welch(x, 10, win, 8)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
def test_empty_input(self):
f, p = welch([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = welch(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
f, p = welch(x)
f1, p1 = welch(x, nperseg=8)
assert_allclose(f, f1)
assert_allclose(p, p1)
def test_window_long_or_nd(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1]))
assert_raises(ValueError, welch, np.zeros(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = welch(x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., 1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, welch, np.zeros(4), 1, 'hanning', 2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.15958226, 0.24193954, 0.24145223, 0.24100919,
0.12188675], 'f')
assert_allclose(p, q, atol=1e-7)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889], 'f')
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.38194444], 'f')
assert_allclose(p, q)
assert_(p.dtype == q.dtype, 'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
class TestLombscargle:
def test_frequency(self):
"""Test if frequency location of peak corresponds to frequency of
generated input signal.
"""
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
P = lombscargle(t, x, f)
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
delta = f[1] - f[0]
assert_(w - f[np.argmax(P)] < (delta/2.))
def test_amplitude(self):
"""Test if height of peak in normalized Lomb-Scargle periodogram
corresponds to amplitude of the generated input signal.
"""
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
# Normalize
pgram = np.sqrt(4 * pgram / t.shape[0])
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
assert_approx_equal(np.max(pgram), ampl, significant=2)
def test_wrong_shape(self):
t = np.linspace(0, 1, 1)
x = np.linspace(0, 1, 2)
f = np.linspace(0, 1, 3)
assert_raises(ValueError, lombscargle, t, x, f)
def test_zero_division(self):
t = np.zeros(1)
x = np.zeros(1)
f = np.zeros(1)
assert_raises(ZeroDivisionError, lombscargle, t, x, f)
def test_lombscargle_atan_vs_atan2(self):
# https://github.com/scipy/scipy/issues/3787
# This raised a ZeroDivisionError.
t = np.linspace(0, 10, 1000, endpoint=False)
x = np.sin(4*t)
f = np.linspace(0, 50, 500, endpoint=False) + 0.1
q = lombscargle(t, x, f*2*np.pi)
if __name__ == "__main__":
run_module_suite()
| 33.243856 | 83 | 0.551234 |
e65ca3bac939e4c922da5142173bf5d766ecc7da | 3,871 | py | Python | cnn2d_model.py | wumch/text-classification-cnn-rnn | f2b8395442d8d5e373990a6672fc665a07c3c655 | [
"MIT"
] | null | null | null | cnn2d_model.py | wumch/text-classification-cnn-rnn | f2b8395442d8d5e373990a6672fc665a07c3c655 | [
"MIT"
] | null | null | null | cnn2d_model.py | wumch/text-classification-cnn-rnn | f2b8395442d8d5e373990a6672fc665a07c3c655 | [
"MIT"
] | null | null | null |
import tensorflow as tf
import word2vec
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, embedding_model: word2vec.WordVectors, filter_sizes, num_filters, l2_reg_lambda=0.0):
vocab_size, embedding_size = embedding_model.vectors.shape[1]
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| 43.988636 | 129 | 0.594162 |
74fc343c097e20acb922f5c9ed6bb8d5908726de | 3,736 | py | Python | python/cugraph/community/ktruss_subgraph.py | isVoid/cugraph | a39e5a3063b9ac8ee93f4cc632c59ca2613434c6 | [
"Apache-2.0"
] | null | null | null | python/cugraph/community/ktruss_subgraph.py | isVoid/cugraph | a39e5a3063b9ac8ee93f4cc632c59ca2613434c6 | [
"Apache-2.0"
] | null | null | null | python/cugraph/community/ktruss_subgraph.py | isVoid/cugraph | a39e5a3063b9ac8ee93f4cc632c59ca2613434c6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.community import ktruss_subgraph_wrapper
from cugraph.structure.graph import Graph
def ktruss_subgraph(G, k, use_weights=True):
"""
Returns the K-Truss subgraph of a graph for a specific k.
The k-truss of a graph is a subgraph where each edge is part of at least
(k−2) triangles. K-trusses are used for finding tighlty knit groups of
vertices in a graph. A k-truss is a relaxation of a k-clique in the graph
and was define in [1]. Finding cliques is computationally demanding and
finding the maximal k-clique is known to be NP-Hard.
In contrast, finding a k-truss is computationally tractable as its
key building block, namely triangle counting counting, can be executed
in polnymomial time.Typically, it takes many iterations of triangle
counting to find the k-truss of a graph. Yet these iterations operate
on a weakly monotonically shrinking graph.
Therefore, finding the k-truss of a graph can be done in a fairly
reasonable amount of time. The solution in cuGraph is based on a
GPU algorithm first shown in [2] and uses the triangle counting algorithm
from [3].
[1] Cohen, J.,
"Trusses: Cohesive subgraphs for social network analysis"
National security agency technical report, 2008
[2] O. Green, J. Fox, E. Kim, F. Busato, et al.
“Quickly Finding a Truss in a Haystack”
IEEE High Performance Extreme Computing Conference (HPEC), 2017
https://doi.org/10.1109/HPEC.2017.8091038
[3] O. Green, P. Yalamanchili, L.M. Munguia,
“Fast Triangle Counting on GPU”
Irregular Applications: Architectures and Algorithms (IA3), 2014
Parameters
----------
G : cuGraph.Graph
cuGraph graph descriptor with connectivity information. k-Trusses are
defined for only undirected graphs as they are defined for
undirected triangle in a graph.
k : int
The desired k to be used for extracting the k-truss subgraph.
use_weights : Bool
whether the output should contain the edge weights if G has them
Returns
-------
G_truss : cuGraph.Graph
A cugraph graph descriptor with the k-truss subgraph for the given k.
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, source='0', destination='1')
>>> k_subgraph = cugraph.ktruss_subgraph(G, 3)
"""
KTrussSubgraph = Graph()
if type(G) is not Graph:
raise Exception("input graph must be undirected")
subgraph_df = ktruss_subgraph_wrapper.ktruss_subgraph(G, k, use_weights)
if G.renumbered:
subgraph_df = G.unrenumber(subgraph_df, "src")
subgraph_df = G.unrenumber(subgraph_df, "dst")
if G.edgelist.weights:
KTrussSubgraph.from_cudf_edgelist(
subgraph_df, source="src", destination="dst", edge_attr="weight"
)
else:
KTrussSubgraph.from_cudf_edgelist(
subgraph_df, source="src", destination="dst"
)
return KTrussSubgraph
| 38.122449 | 77 | 0.695128 |
8790436e34ffe8a9e8e7991bb3ccbe7f2e8a44bc | 2,167 | py | Python | share/qt/extract_strings_qt.py | altecoin-altc/altecoin | 913678a298eb7eb9157cd2be184b927cfbb429a9 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | altecoin-altc/altecoin | 913678a298eb7eb9157cd2be184b927cfbb429a9 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | altecoin-altc/altecoin | 913678a298eb7eb9157cd2be184b927cfbb429a9 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/altecoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *altecoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("altecoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.797619 | 105 | 0.620212 |
844aa6f754096ded6b93d60cfcc27cbd7f6137a3 | 11,353 | py | Python | cryptol-remote-api/python/tests/cryptol/test_cryptol_api.py | GaloisInc/cryptol | 8b97cf4a8a16b511c52bc0664e4ee2feff3d874a | [
"BSD-3-Clause"
] | 773 | 2015-01-08T15:43:54.000Z | 2022-03-23T04:26:02.000Z | cryptol-remote-api/python/tests/cryptol/test_cryptol_api.py | GaloisInc/cryptol | 8b97cf4a8a16b511c52bc0664e4ee2feff3d874a | [
"BSD-3-Clause"
] | 1,050 | 2015-01-02T23:10:55.000Z | 2022-03-30T17:02:34.000Z | cryptol-remote-api/python/tests/cryptol/test_cryptol_api.py | GaloisInc/cryptol | 8b97cf4a8a16b511c52bc0664e4ee2feff3d874a | [
"BSD-3-Clause"
] | 117 | 2015-01-01T18:45:39.000Z | 2022-03-06T15:40:57.000Z | import unittest
from pathlib import Path
import os
from pathlib import Path
import subprocess
import time
import unittest
import signal
from distutils.spawn import find_executable
import cryptol
import argo_client.connection as argo
import cryptol.cryptoltypes
from cryptol.single_connection import *
from cryptol import solver
from cryptol.bitvector import BV
from BitVector import * #type: ignore
class CryptolTests(unittest.TestCase):
@classmethod
def setUpClass(self):
connect(verify=False)
load_file(str(Path('tests','cryptol','test-files', 'Foo.cry')))
def test_low_level(self):
x_val = cry_eval("x")
self.assertEqual(cry_eval("Id::id x"), x_val)
self.assertEqual(call('Id::id', bytes.fromhex('ff')), BV(8,255))
self.assertEqual(call('add', b'\0', b'\1'), BV(8,1))
self.assertEqual(call('add', bytes.fromhex('ff'), bytes.fromhex('03')), BV(8,2))
# AMK: importing cryptol bindings into Python temporarily broken due to linear state usage changes
# in argo approx 1 March 2020
# def test_module_import(self):
# c = cryptol.connect()
# cryptol.add_cryptol_module('Foo', c)
# from Foo import add
# self.assertEqual(add(b'\2', 2), BV(8,4))
# self.assertEqual(add(BitVector( intVal = 0, size = 8 ), BitVector( intVal = 1, size = 8 )), BV(8,1))
# self.assertEqual(add(BitVector( intVal = 1, size = 8 ), BitVector( intVal = 2, size = 8 )), BV(8,3))
# self.assertEqual(add(BitVector( intVal = 255, size = 8 ), BitVector( intVal = 1, size = 8 )), BV(8,0))
# self.assertEqual(add(BV(8,0), BV(8,1)), BV(8,1))
# self.assertEqual(add(BV(8,1), BV(8,2)), BV(8,3))
# self.assertEqual(add(BV(8,255), BV(8,1)), BV(8,0))
def test_sat_and_prove(self):
# test a single sat model can be returned
rootsOf9 = sat('isSqrtOf9')
self.assertTrue(rootsOf9)
self.assertEqual(len(rootsOf9.models), 1)
self.assertEqual(len(rootsOf9.models[0]), 1)
self.assertTrue(int(rootsOf9.models[0][0]) ** 2 % 256, 9)
# check we can specify the solver
rootsOf9 = sat('isSqrtOf9', solver = solver.ANY)
self.assertTrue(rootsOf9)
self.assertEqual(len(rootsOf9.models), 1)
self.assertEqual(len(rootsOf9.models[0]), 1)
self.assertTrue(int(rootsOf9.models[0][0]) ** 2 % 256, 9)
# check we can ask for a specific number of results
rootsOf9 = sat('isSqrtOf9', count = 3)
self.assertTrue(rootsOf9)
self.assertEqual(len(rootsOf9.models), 3)
for model in rootsOf9.models:
self.assertEqual(len(model), 1)
self.assertTrue(int(model[0]) ** 2 % 256, 9)
# check we can ask for all results
rootsOf9 = sat('isSqrtOf9', count = None)
self.assertTrue(rootsOf9)
self.assertEqual(len(rootsOf9.models), 4)
for model in rootsOf9.models:
self.assertEqual(len(model), 1)
self.assertTrue(int(model[0]) ** 2 % 256, 9)
# check for an unsat condition
self.assertFalse(sat('\\x -> isSqrtOf9 x && ~(elem x [3,131,125,253])'))
# check for a valid condition
self.assertTrue(prove('\\x -> isSqrtOf9 x ==> elem x [3,131,125,253]'))
self.assertTrue(prove('\\x -> isSqrtOf9 x ==> elem x [3,131,125,253]', solver.Z3))
self.assertTrue(prove('\\x -> isSqrtOf9 x ==> elem x [3,131,125,253]', solver.W4_Z3))
self.assertTrue(prove('\\x -> isSqrtOf9 x ==> elem x [3,131,125,253]', solver.W4_Z3.without_hash_consing()))
self.assertTrue(prove('\\x -> isSqrtOf9 x ==> elem x [3,131,125,253]', solver.SBV_Z3))
self.assertIsInstance(prove('\\(x : [8]) -> x == reverse (reverse x)', solver.OFFLINE), solver.OfflineSmtQuery)
self.assertIsInstance(prove('\\(x : [8]) -> x == reverse (reverse x)', solver.SBV_OFFLINE), solver.OfflineSmtQuery)
self.assertIsInstance(prove('\\(x : [8]) -> x == reverse (reverse x)', solver.W4_OFFLINE), solver.OfflineSmtQuery)
def test_check(self):
res = check("\\x -> x==(x:[8])")
self.assertTrue(res.success)
self.assertEqual(res.tests_run, 100)
self.assertEqual(res.tests_possible, 256)
self.assertFalse(len(res.args), 0)
self.assertEqual(res.error_msg, None)
res = check("\\x -> x==(x:[8])", num_tests=1)
self.assertTrue(res.success)
self.assertEqual(res.tests_run, 1)
self.assertEqual(res.tests_possible, 256)
self.assertEqual(len(res.args), 0)
self.assertEqual(res.error_msg, None)
res = check("\\x -> x==(x:[8])", num_tests=42)
self.assertTrue(res.success)
self.assertEqual(res.tests_run, 42)
self.assertEqual(res.tests_possible, 256)
self.assertEqual(len(res.args), 0)
self.assertEqual(res.error_msg, None)
res = check("\\x -> x==(x:[8])", num_tests=1000)
self.assertTrue(res.success)
self.assertEqual(res.tests_run, 256)
self.assertEqual(res.tests_possible, 256)
self.assertEqual(len(res.args), 0)
self.assertEqual(res.error_msg, None)
res = check("\\x -> x==(x:[8])", num_tests='all')
self.assertTrue(res.success)
self.assertEqual(res.tests_run, 256)
self.assertEqual(res.tests_possible, 256)
self.assertEqual(len(res.args), 0)
self.assertEqual(res.error_msg, None)
res = check("\\x -> x==(x:Integer)", num_tests=1024)
self.assertTrue(res.success)
self.assertEqual(res.tests_run, 1024)
self.assertEqual(res.tests_possible, None)
self.assertEqual(len(res.args), 0)
self.assertEqual(res.error_msg, None)
res = check("\\x -> (x + 1)==(x:[8])")
self.assertFalse(res.success)
self.assertEqual(res.tests_possible, 256)
self.assertEqual(len(res.args), 1)
self.assertEqual(res.error_msg, None)
res = check("\\x -> (x / 0)==(x:[8])")
self.assertFalse(res.success)
self.assertEqual(res.tests_possible, 256)
self.assertEqual(len(res.args), 1)
self.assertIsInstance(res.error_msg, str)
def test_safe(self):
res = safe("\\x -> x==(x:[8])")
self.assertTrue(res)
res = safe("\\x -> x / (x:[8])")
self.assertFalse(res)
self.assertEqual(res.assignments, [BV(size=8, value=0)])
res = safe("\\x -> x / (x:[8])", solver.Z3)
self.assertFalse(res)
self.assertEqual(res.assignments, [BV(size=8, value=0)])
res = safe("\\x -> x / (x:[8])", solver.W4_Z3)
self.assertFalse(res)
self.assertEqual(res.assignments, [BV(size=8, value=0)])
def test_many_usages_one_connection(self):
for i in range(0,100):
x_val1 = cry_eval("x")
x_val2 = cry_eval("Id::id x")
self.assertEqual(x_val1, x_val2)
class HttpMultiConnectionTests(unittest.TestCase):
# Python initiated process running the server (if any)
p = None
# url of HTTP server
url = None
@classmethod
def setUpClass(self):
server = os.getenv('CRYPTOL_SERVER_URL')
if server is not None:
self.url = server
else:
server = os.getenv('CRYPTOL_SERVER')
if server is not None:
server = find_executable(server)
if server is None:
server = find_executable('cryptol-remote-api')
if server is not None:
self.p = subprocess.Popen(
[server, "http", "/", "--port", "8080"],
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.PIPE,
start_new_session=True)
time.sleep(5)
assert(self.p is not None)
poll_result = self.p.poll()
if poll_result is not None:
print(poll_result)
print(self.p.stdout.read())
print(self.p.stderr.read())
assert(poll_result is None)
self.url = "http://localhost:8080/"
else:
raise RuntimeError("NO CRYPTOL SERVER FOUND")
@classmethod
def tearDownClass(self):
if self.p is not None:
os.killpg(os.getpgid(self.p.pid), signal.SIGKILL)
super().tearDownClass()
def test_reset_with_many_usages_many_connections(self):
for i in range(0,100):
time.sleep(.05)
connect(url=self.url, verify=False)
load_file(str(Path('tests','cryptol','test-files', 'Foo.cry')))
x_val1 = cry_eval("x")
x_val2 = cry_eval("Id::id x")
self.assertEqual(x_val1, x_val2)
reset()
def test_server_with_many_usages_many_connections(self):
for i in range(0,100):
time.sleep(.05)
connect(url=self.url, verify=False)
load_file(str(Path('tests','cryptol','test-files', 'Foo.cry')))
x_val1 = cry_eval("x")
x_val2 = cry_eval("Id::id x")
self.assertEqual(x_val1, x_val2)
class TLSConnectionTests(unittest.TestCase):
# Python initiated process running the server (if any)
p = None
# url of HTTP server
url = None
run_tests = True
@classmethod
def setUpClass(self):
os.system('openssl req -nodes -newkey rsa:2048 -keyout server.key -out server.csr'\
+ ' -subj "/C=GB/ST=London/L=London/O=Acme Widgets/OU=IT Department/CN=localhost"')
os.system('openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt')
server = os.getenv('CRYPTOL_SERVER')
if server is not None:
server = find_executable(server)
if server is None:
server = find_executable('cryptol-remote-api')
if server is not None:
self.p = subprocess.Popen(
[server, "http", "/", "--port", "8081", "--tls"],
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.PIPE,
start_new_session=True)
time.sleep(5)
assert(self.p is not None)
poll_result = self.p.poll()
if poll_result is not None:
print(poll_result)
print(self.p.stdout.read())
print(self.p.stderr.read())
assert(poll_result is None)
self.url = "https://localhost:8081/"
else:
print("WARNING: TLS tests not being run because no cryptol server executable was found")
print(" (Note that this is expected behavior, however, for some CI tests)")
self.run_tests = False
@classmethod
def tearDownClass(self):
if self.p is not None:
os.killpg(os.getpgid(self.p.pid), signal.SIGKILL)
super().tearDownClass()
def test_tls_connection(self):
if self.run_tests:
connect(url=self.url, verify=False)
load_file(str(Path('tests','cryptol','test-files', 'Foo.cry')))
x_val1 = cry_eval("x")
x_val2 = cry_eval("Id::id x")
self.assertEqual(x_val1, x_val2)
if __name__ == "__main__":
unittest.main()
| 39.013746 | 123 | 0.588303 |
184ba6896750f2e80355b84ff3d94e42c21840f4 | 694 | py | Python | google/ads/googleads/v6/services/services/custom_interest_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/custom_interest_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/custom_interest_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import CustomInterestServiceClient
__all__ = ("CustomInterestServiceClient",)
| 33.047619 | 74 | 0.756484 |
d1ed98ef77ac7262de7be0164f3180ff3867c196 | 4,910 | py | Python | docs/conf.py | yoyonel/pyDarknetServer | 51c573971a55b1b565ffab9530a4955799a81b06 | [
"MIT"
] | null | null | null | docs/conf.py | yoyonel/pyDarknetServer | 51c573971a55b1b565ffab9530a4955799a81b06 | [
"MIT"
] | 234 | 2019-07-24T05:39:34.000Z | 2022-03-28T11:38:20.000Z | docs/conf.py | yoyonel/pyDarknetServer | 51c573971a55b1b565ffab9530a4955799a81b06 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pydarknetserver documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import pydarknetserver
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyDarknetServer'
copyright = u"2019, Lionel Atty"
author = u"Lionel Atty"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pydarknetserver.__version__
# The full version, including alpha/beta/rc tags.
release = pydarknetserver.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydarknetserverdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pydarknetserver.tex',
u'pyDarknetServer Documentation',
u'Lionel Atty', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pydarknetserver',
u'pyDarknetServer Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pydarknetserver',
u'pyDarknetServer Documentation',
author,
'pydarknetserver',
'One line description of project.',
'Miscellaneous'),
]
| 29.939024 | 77 | 0.689613 |
de9b155fe6f9c512330c0a4b86f24ead42e10845 | 116 | py | Python | homeworks/alexei_rakhmanko/hw05/level02.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | homeworks/alexei_rakhmanko/hw05/level02.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | 8 | 2019-11-15T18:15:56.000Z | 2020-02-03T18:05:05.000Z | homeworks/alexei_rakhmanko/hw05/level02.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | """Уровень 2"""
def hello(name):
"""Функция"""
if name:
return f"Hello, {name}!"
return "Hi!"
| 12.888889 | 32 | 0.491379 |
efdf31ceebf701a22daa1e7ba81b729ad1709c35 | 8,732 | py | Python | sdk/python/pulumi_azure_native/storagesync/v20190301/get_cloud_endpoint.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagesync/v20190301/get_cloud_endpoint.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagesync/v20190301/get_cloud_endpoint.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetCloudEndpointResult',
'AwaitableGetCloudEndpointResult',
'get_cloud_endpoint',
]
@pulumi.output_type
class GetCloudEndpointResult:
"""
Cloud Endpoint object.
"""
def __init__(__self__, azure_file_share_name=None, backup_enabled=None, friendly_name=None, id=None, last_operation_name=None, last_workflow_id=None, name=None, partnership_id=None, provisioning_state=None, storage_account_resource_id=None, storage_account_tenant_id=None, type=None):
if azure_file_share_name and not isinstance(azure_file_share_name, str):
raise TypeError("Expected argument 'azure_file_share_name' to be a str")
pulumi.set(__self__, "azure_file_share_name", azure_file_share_name)
if backup_enabled and not isinstance(backup_enabled, str):
raise TypeError("Expected argument 'backup_enabled' to be a str")
pulumi.set(__self__, "backup_enabled", backup_enabled)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if partnership_id and not isinstance(partnership_id, str):
raise TypeError("Expected argument 'partnership_id' to be a str")
pulumi.set(__self__, "partnership_id", partnership_id)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if storage_account_resource_id and not isinstance(storage_account_resource_id, str):
raise TypeError("Expected argument 'storage_account_resource_id' to be a str")
pulumi.set(__self__, "storage_account_resource_id", storage_account_resource_id)
if storage_account_tenant_id and not isinstance(storage_account_tenant_id, str):
raise TypeError("Expected argument 'storage_account_tenant_id' to be a str")
pulumi.set(__self__, "storage_account_tenant_id", storage_account_tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="azureFileShareName")
def azure_file_share_name(self) -> Optional[str]:
"""
Azure file share name
"""
return pulumi.get(self, "azure_file_share_name")
@property
@pulumi.getter(name="backupEnabled")
def backup_enabled(self) -> str:
"""
Backup Enabled
"""
return pulumi.get(self, "backup_enabled")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
"""
CloudEndpoint lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnershipId")
def partnership_id(self) -> Optional[str]:
"""
Partnership Id
"""
return pulumi.get(self, "partnership_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
CloudEndpoint Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="storageAccountResourceId")
def storage_account_resource_id(self) -> Optional[str]:
"""
Storage Account Resource Id
"""
return pulumi.get(self, "storage_account_resource_id")
@property
@pulumi.getter(name="storageAccountTenantId")
def storage_account_tenant_id(self) -> Optional[str]:
"""
Storage Account Tenant Id
"""
return pulumi.get(self, "storage_account_tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetCloudEndpointResult(GetCloudEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCloudEndpointResult(
azure_file_share_name=self.azure_file_share_name,
backup_enabled=self.backup_enabled,
friendly_name=self.friendly_name,
id=self.id,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
name=self.name,
partnership_id=self.partnership_id,
provisioning_state=self.provisioning_state,
storage_account_resource_id=self.storage_account_resource_id,
storage_account_tenant_id=self.storage_account_tenant_id,
type=self.type)
def get_cloud_endpoint(cloud_endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
sync_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCloudEndpointResult:
"""
Cloud Endpoint object.
:param str cloud_endpoint_name: Name of Cloud Endpoint object.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
:param str sync_group_name: Name of Sync Group resource.
"""
__args__ = dict()
__args__['cloudEndpointName'] = cloud_endpoint_name
__args__['resourceGroupName'] = resource_group_name
__args__['storageSyncServiceName'] = storage_sync_service_name
__args__['syncGroupName'] = sync_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20190301:getCloudEndpoint', __args__, opts=opts, typ=GetCloudEndpointResult).value
return AwaitableGetCloudEndpointResult(
azure_file_share_name=__ret__.azure_file_share_name,
backup_enabled=__ret__.backup_enabled,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
name=__ret__.name,
partnership_id=__ret__.partnership_id,
provisioning_state=__ret__.provisioning_state,
storage_account_resource_id=__ret__.storage_account_resource_id,
storage_account_tenant_id=__ret__.storage_account_tenant_id,
type=__ret__.type)
| 40.613953 | 288 | 0.682432 |
149108dd41dc2b2f850caca642970715ebf1f00a | 16,599 | py | Python | isofit/common.py | lewismc/isofit | 87fb45f76fc06e5212e0c8cdf830b996efcb2950 | [
"Apache-2.0"
] | 1 | 2020-11-05T18:11:25.000Z | 2020-11-05T18:11:25.000Z | isofit/common.py | kylepeterson777/isofit | af428c8f141141e165df66554156f9a60d9d509c | [
"Apache-2.0"
] | null | null | null | isofit/common.py | kylepeterson777/isofit | af428c8f141141e165df66554156f9a60d9d509c | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
#
# Copyright 2018 California Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ISOFIT: Imaging Spectrometer Optimal FITting
# Author: David R Thompson, david.r.thompson@jpl.nasa.gov
#
import os
import json
import xxhash
import scipy as s
from collections import OrderedDict
from scipy.interpolate import RegularGridInterpolator
from os.path import expandvars, split, abspath
from scipy.linalg import cholesky, inv, det, svd
from numba import jit
# Maximum size of our hash tables
max_table_size = 500
binary_table = [s.array([[]]),
s.array([[0], [1]]),
s.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
s.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]),
s.array([[0, 0, 0, 0], [0, 0, 0, 1],
[0, 0, 1, 0], [0, 0, 1, 1],
[0, 1, 0, 0], [0, 1, 0, 1],
[0, 1, 1, 0], [0, 1, 1, 1],
[1, 0, 0, 0], [1, 0, 0, 1],
[1, 0, 1, 0], [1, 0, 1, 1],
[1, 1, 0, 0], [1, 1, 0, 1],
[1, 1, 1, 0], [1, 1, 1, 1]]),
s.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 1],
[0, 0, 0, 1, 0], [0, 0, 0, 1, 1],
[0, 0, 1, 0, 0], [0, 0, 1, 0, 1],
[0, 0, 1, 1, 0], [0, 0, 1, 1, 1],
[0, 1, 0, 0, 0], [0, 1, 0, 0, 1],
[0, 1, 0, 1, 0], [0, 1, 0, 1, 1],
[0, 1, 1, 0, 0], [0, 1, 1, 0, 1],
[0, 1, 1, 1, 0], [0, 1, 1, 1, 1],
[1, 0, 0, 0, 0], [1, 0, 0, 0, 1],
[1, 0, 0, 1, 0], [1, 0, 0, 1, 1],
[1, 0, 1, 0, 0], [1, 0, 1, 0, 1],
[1, 0, 1, 1, 0], [1, 0, 1, 1, 1],
[1, 1, 0, 0, 0], [1, 1, 0, 0, 1],
[1, 1, 0, 1, 0], [1, 1, 0, 1, 1],
[1, 1, 1, 0, 0], [1, 1, 1, 0, 1],
[1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]),
s.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 1],
[0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1],
[0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1],
[0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1],
[1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1],
[1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1],
[1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1],
[1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1],
[1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1],
[1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1],
[1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])]
eps = 1e-5 # small value used in finite difference derivatives
def emissive_radiance_old(emissivity, T, wl):
"""Radiance of a surface due to emission"""
h = 6.62607004e-34 # m2 kg s-1
c = 299792458 # m s-1
numerator = 2.0*h*(c**2) # m4 kg s-1
wl_m = wl*1e-9
numerator_per_lam5 = numerator * pow(wl_m, -5) # kg s-1 m-1
k = 1.380648520-23 # Boltzmann constant, m2 kg s-2 K-1
denom = s.exp(h*c/(k*wl_m*T))-1.0 # dimensionless
L = numerator_per_lam5 / denom # Watts per m3
cm2_per_m2, nm_per_m, uW_per_W = 10000, 1e9, 1e6
conversion = cm2_per_m2 * nm_per_m * uW_per_W / s.pi # -> uW nm-1 cm-2 sr-1
L = L * conversion
ddenom_dT = s.exp(h*c/(k*wl_m*T)) * h*c*(-1.0)/(pow(k*wl_m*T, 2)) * k*wl_m
dL_dT = -numerator_per_lam5 / pow(denom, 2.0) * ddenom_dT * conversion
L = L * emissivity
dL_dT = dL_dT * emissivity
L[s.logical_not(s.isfinite(L))] = 0
dL_dT[s.logical_not(s.isfinite(dL_dT))] = 0
return L, dL_dT
def load_wavelen(wavelength_file):
"""Load a wavelength file, and convert to nanometers if needed"""
q = s.loadtxt(wavelength_file)
if q.shape[1] > 2:
q = q[:, 1:3]
if q[0, 0] < 100:
q = q * 1000.0
wl, fwhm = q.T
return wl, fwhm
def emissive_radiance(emissivity, T, wl):
"""Radiance of a surface due to emission"""
c_1 = 1.88365e32/s.pi
c_2 = 14387690
J_per_eV = 1.60218e-19
wl_um = wl / 1000.0
ph_per_sec_cm2_sr_nm = c_1/(wl**4)/(s.exp(c_2/wl/T)-1.0) * emissivity
# photon energy in eV
eV_per_sec_cm2_sr_nm = 1.2398 * ph_per_sec_cm2_sr_nm/wl_um
W_per_cm2_sr_nm = J_per_eV * eV_per_sec_cm2_sr_nm
uW_per_cm2_sr_nm = W_per_cm2_sr_nm*1e6
dRdn_dT = c_1/(wl**4)*(-pow(s.exp(c_2/wl/T)-1.0, -2.0)) *\
s.exp(c_2/wl/T)*(-pow(T, -2)*c_2/wl) *\
emissivity/wl_um*1.2398*J_per_eV*1e6
return uW_per_cm2_sr_nm, dRdn_dT
@jit
def chol_inv(C):
"""Fast stable inverse for Hermetian positive definite matrices"""
R = cholesky(C, lower=False)
S = inv(R)
return S.dot(S.T)
@jit
def svd_inv(C, mineig=0, hashtable=None):
"""Fast stable inverse using SVD. This can handle near-singular matrices"""
return svd_inv_sqrt(C, mineig, hashtable)[0]
@jit
def svd_inv_sqrt(C, mineig=0, hashtable=None):
"""Fast stable inverse using SVD. This can handle near-singular matrices.
Also return the square root."""
# If we have a hash table, look for the precalculated solution
h = None
if hashtable is not None:
h = xxhash.xxh64_digest(C)
if h in hashtable:
return hashtable[h]
U, V, D = svd(C)
ignore = s.where(V < mineig)[0]
Vi = 1.0 / V
Vi[ignore] = 0
Visqrt = s.sqrt(Vi)
Cinv = (D.T).dot(s.diag(Vi)).dot(U.T)
Cinv_sqrt = (D.T).dot(s.diag(Visqrt)).dot(U.T)
# If there is a hash table, cache our solution. Bound the total cache
# size by removing any extra items in FIFO order.
if hashtable is not None:
hashtable[h] = (Cinv, Cinv_sqrt)
while len(hashtable) > max_table_size:
hashtable.popitem(last=False)
return Cinv, Cinv_sqrt
def expand_path(directory, subpath):
"""Expand a path variable to an absolute path, if it is not one already"""
if subpath.startswith('/'):
return subpath
return os.path.join(directory, subpath)
def recursive_replace(obj, key, val):
"""Find and replace a vector in a nested structure"""
if isinstance(obj, dict):
if key in obj:
obj[key] = val
for item in obj.values():
recursive_replace(item, key, val)
elif any(isinstance(obj, t) for t in (list, tuple)):
for item in obj:
recursive_replace(item, key, val)
def get_absorption(wl, absfile):
'''Calculate water and ice absorption coefficients using indices of
refraction, and interpolate them to new wavelengths (user specifies nm)'''
# read the indices of refraction
q = s.loadtxt(absfile, delimiter=',')
wl_orig_nm = q[:, 0]
wl_orig_cm = wl_orig_nm/1e9*1e2
water_imag = q[:, 2]
ice_imag = q[:, 4]
# calculate absorption coefficients in cm^-1
water_abscf = water_imag*s.pi*4.0/wl_orig_cm
ice_abscf = ice_imag*s.pi*4.0/wl_orig_cm
# interpolate to new wavelengths (user provides nm)
water_abscf_intrp = s.interp(wl, wl_orig_nm, water_abscf)
ice_abscf_intrp = s.interp(wl, wl_orig_nm, ice_abscf)
return water_abscf_intrp, ice_abscf_intrp
def json_load_ascii(filename, shell_replace=True):
"""Load a hierarchical structure, convert all unicode to ASCII and
expand environment variables"""
def recursive_reincode(j):
if isinstance(j, dict):
for key, value in j.items():
j[key] = recursive_reincode(value)
return j
elif isinstance(j, list):
for i, k in enumerate(j):
j[i] = recursive_reincode(k)
return j
elif isinstance(j, tuple):
return tuple([recursive_reincode(k) for k in j])
else:
if shell_replace and type(j) is str:
try:
j = expandvars(j)
except IndexError:
pass
return j
with open(filename, 'r') as fin:
j = json.load(fin)
return recursive_reincode(j)
def load_config(config_file):
"""Configuration files are typically .json, with relative paths"""
config = json.load(open(config_file, 'r'))
configdir, f = split(abspath(config_file))
return expand_all_paths(config, configdir)
def expand_all_paths(config, configdir):
"""Expand any config entry containing the string 'file' into
an absolute path, if needed"""
def recursive_expand(j):
if isinstance(j, dict):
for key, value in j.items():
if isinstance(key, str) and \
('file' in key or 'directory' in key or 'path' in key) and \
isinstance(value, str):
j[key] = expand_path(configdir, value)
else:
j[key] = recursive_expand(value)
return j
elif isinstance(j, list):
for i, k in enumerate(j):
j[i] = recursive_expand(k)
return j
elif isinstance(j, tuple):
return tuple([recursive_reincode(k) for k in j])
return j
return recursive_expand(config)
def find_header(imgfile):
"""Return the header associated with an image file"""
if os.path.exists(imgfile+'.hdr'):
return imgfile+'.hdr'
ind = imgfile.rfind('.raw')
if ind >= 0:
return imgfile[0:ind]+'.hdr'
ind = imgfile.rfind('.img')
if ind >= 0:
return imgfile[0:ind]+'.hdr'
raise IOError('No header found for file {0}'.format(imgfile))
def expand_path(directory, subpath):
"""Turn a subpath into an absolute path if it is not absolute already"""
if subpath.startswith('/'):
return subpath
return os.path.join(directory, subpath)
def rdn_translate(wvn, rdn_wvn):
"""Translate radiance out of wavenumber space"""
dwvn = wvn[1:]-wvn[:-1]
dwl = 10000.0/wvn[1:] - 10000.0/wvn[:-1]
return rdn_wvn*(dwl/dwvn)
def resample_spectrum(x, wl, wl2, fwhm2, fill=False):
"""Resample a spectrum to a new wavelength / FWHM.
I assume Gaussian SRFs"""
H = s.array([srf(wl, wi, fwhmi/2.355)
for wi, fwhmi in zip(wl2, fwhm2)])
if fill is False:
return s.dot(H, x[:, s.newaxis]).ravel()
else:
xnew = s.dot(H, x[:, s.newaxis]).ravel()
good = s.isfinite(xnew)
for i, xi in enumerate(xnew):
if not good[i]:
nearest_good_ind = s.argmin(abs(wl2[good]-wl2[i]))
xnew[i] = xnew[nearest_good_ind]
return xnew
def load_spectrum(init):
"""Load a single spectrum from a text file with initial columns giving
wavelength and magnitude respectively"""
x = s.loadtxt(init)
if x.ndim > 1:
x = x[:, :2]
wl, x = x.T
if wl[0] < 100:
wl = wl*1000.0 # convert microns -> nm if needed
return x, wl
else:
return x, None
def srf(x, mu, sigma):
"""Spectral Response Function """
u = (x-mu)/abs(sigma)
y = (1.0/(s.sqrt(2.0*s.pi)*abs(sigma)))*s.exp(-u*u/2.0)
return y/y.sum()
class VectorInterpolator:
def __init__(self, grid, data):
self.n = data.shape[-1]
grid_aug = grid + [s.arange(data.shape[-1])]
self.itp = RegularGridInterpolator(grid_aug, data)
def __call__(self, points):
res = []
for v in s.arange(self.n):
p_aug = s.concatenate((points, s.array([v])), axis=0)
res.append(self.itp(p_aug))
return res
class VectorInterpolatorJIT:
def __init__(self, grid, data):
"""By convention, the final dimensionn of "data" is the wavelength.
"grid" contains a list of arrays, each representing the input grid
points in the ith dimension of the table."""
self.in_d = len(data.shape)-1
self.out_d = data.shape[-1]
self.grid = [i.copy() for i in grid]
self.data = data.copy()
@jit
def __call__(self, point):
return jitinterp(self.in_d, self.out_d, self.grid, self.data, point)
@jit
def jitinterp(s_in_d, s_out_d, s_grid, s_data, point):
# we find the bottom index along each input dimension
lo_inds = s.zeros(s_in_d)
lo_fracs = s.zeros(s_in_d)
stride = []
for i in s.arange(s_in_d):
stride.append(s.prod(s_data.shape[(i+1):]))
for d in s.arange(s_in_d):
n_gridpoints = len(s_grid[d])
for j in s.arange(n_gridpoints-1):
if j == 0 and s_grid[d][j] >= point[d]:
lo_inds[d] = 0
lo_fracs[d] = 1.0
break
if j == n_gridpoints-2 and s_grid[d][-1] <= point[d]:
lo_inds[d] = n_gridpoints-2
lo_fracs[d] = 0.0
break
if s_grid[d][j] < point[d] and s_grid[d][j+1] >= point[d]:
lo_inds[d] = j
denom = (s_grid[d][j+1]-s_grid[d][j])
lo_fracs[d] = 1.0 - (point[d]-s_grid[d][j])/denom
# Now we form a list of all points on the hypercube
# and the associated fractions of each
hypercube_bin = binary_table[s_in_d].copy()
n_hypercube = len(hypercube_bin)
hypercube_weights = s.ones((n_hypercube))
hypercube_flat_inds = s.zeros((n_hypercube))
# simple version
for i in range(n_hypercube):
for j in range(s_in_d):
if hypercube_bin[i, j]:
hypercube_weights[i] = hypercube_weights[i] * lo_fracs[j]
hypercube_flat_inds[i] = \
hypercube_flat_inds[i] + (lo_inds[j]) * stride[j]
else:
hypercube_weights[i] = hypercube_weights[i] * (1.0-lo_fracs[j])
hypercube_flat_inds[i] = \
hypercube_flat_inds[i] + (lo_inds[j]+1) * stride[j]
# once per output datapoint
res = s.zeros(s_out_d)
for oi in s.arange(s_out_d):
val = 0
for i in s.arange(n_hypercube):
ind = int(hypercube_flat_inds[i]+oi)
res[oi] = res[oi] + s_data.flat[ind] * hypercube_weights[i]
return s.array(res)
def combos(inds):
'''Return all combinations of indices in a list of index sublists
For example, for the input [[1,2],[3,4,5]] it would return:
[[1,3],[1,4],[1,5],[2,3],[2,4],[2,5]]
This is used for interpolation in the high-dimensional LUT'''
n = len(inds)
cases = s.prod([len(i) for i in inds])
return s.array(s.meshgrid(*inds)).reshape((n, cases)).T
| 35.696774 | 80 | 0.514368 |
2f3ce262ac7697f026fb33b731ef2fac10326ed0 | 1,437 | py | Python | mmedit/datasets/generation_paired_dataset.py | Yshuo-Li/mmediting-test | ff8349a183b3d266495a53be0c8ad8e342e8b461 | [
"Apache-2.0"
] | 2 | 2021-04-20T11:31:37.000Z | 2021-05-27T13:04:40.000Z | mmedit/datasets/generation_paired_dataset.py | Yshuo-Li/mmediting-test | ff8349a183b3d266495a53be0c8ad8e342e8b461 | [
"Apache-2.0"
] | 1 | 2021-08-05T16:20:39.000Z | 2021-08-05T16:20:39.000Z | mmedit/datasets/generation_paired_dataset.py | Yshuo-Li/mmediting-test | ff8349a183b3d266495a53be0c8ad8e342e8b461 | [
"Apache-2.0"
] | 2 | 2021-04-22T12:10:14.000Z | 2021-05-19T02:09:48.000Z | import os.path as osp
from .base_generation_dataset import BaseGenerationDataset
from .registry import DATASETS
@DATASETS.register_module()
class GenerationPairedDataset(BaseGenerationDataset):
"""General paired image folder dataset for image generation.
It assumes that the training directory is '/path/to/data/train'.
During test time, the directory is '/path/to/data/test'. '/path/to/data'
can be initialized by args 'dataroot'. Each sample contains a pair of
images concatenated in the w dimension (A|B).
Args:
dataroot (str | :obj:`Path`): Path to the folder root of paired images.
pipeline (List[dict | callable]): A sequence of data transformations.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self, dataroot, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
phase = 'test' if test_mode else 'train'
self.dataroot = osp.join(str(dataroot), phase)
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load paired image paths.
Returns:
list[dict]: List that contains paired image paths.
"""
data_infos = []
pair_paths = sorted(self.scan_folder(self.dataroot))
for pair_path in pair_paths:
data_infos.append(dict(pair_path=pair_path))
return data_infos
| 35.04878 | 79 | 0.675017 |
ca6919f85e37d6b4876ff73f68873802ca08b052 | 746 | py | Python | example.py | maxir143/GPEmu | 7ce724b65f4e1f298cc4d0e942892154d252e208 | [
"MIT"
] | null | null | null | example.py | maxir143/GPEmu | 7ce724b65f4e1f298cc4d0e942892154d252e208 | [
"MIT"
] | null | null | null | example.py | maxir143/GPEmu | 7ce724b65f4e1f298cc4d0e942892154d252e208 | [
"MIT"
] | null | null | null | from GPEmu import GamePad
def main():
# create game pad
gamepad = GamePad()
# start game pad
gamepad.connect()
# check if 'RT' is a trigger
if 'RT' in gamepad.triggers:
# set the trigger <'RT'> to <0.5> and update is <true> (so, don't need to call GamePad.update() later)
gamepad.set_trigger('RT', .5, True)
# Press button <'A'> and <'B'>
gamepad.press_button('A')
gamepad.press_button('B')
# Update all the changes made that hasn't been updated to the controller at one
gamepad.update()
# set button <'B'> press to <False> and update the controller
gamepad.button('B', False, True)
# Gamepad disconnect
gamepad.disconnect()
if __name__ == '__main__':
main() | 25.724138 | 110 | 0.630027 |
9cac023edd8a8231906a54ad0ff12c0dd521a695 | 3,823 | py | Python | watchman/integration/test_wm_wait.py | kabat87/watchman | 6cab7e98f70722e9d635086596d543c0e1875e28 | [
"MIT"
] | null | null | null | watchman/integration/test_wm_wait.py | kabat87/watchman | 6cab7e98f70722e9d635086596d543c0e1875e28 | [
"MIT"
] | null | null | null | watchman/integration/test_wm_wait.py | kabat87/watchman | 6cab7e98f70722e9d635086596d543c0e1875e28 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from watchman.integration.lib import WatchmanInstance
from watchman.integration.lib import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestWatchmanWait(WatchmanTestCase.WatchmanTestCase):
def requiresPersistentSession(self) -> bool:
return True
def spawnWatchmanWait(self, cmdArgs):
wait_script = os.environ.get("WATCHMAN_WAIT_PATH")
if wait_script:
args = [wait_script]
else:
args = [
sys.executable,
os.path.join(os.environ["WATCHMAN_PYTHON_BIN"], "watchman-wait"),
]
args.extend(cmdArgs)
env = os.environ.copy()
sock_path = WatchmanInstance.getSharedInstance().getSockPath()
env["WATCHMAN_SOCK"] = sock_path.legacy_sockpath()
pywatchman_path = env.get("PYWATCHMAN_PATH")
if pywatchman_path:
env["PYTHONPATH"] = pywatchman_path
return subprocess.Popen(
args, env=env, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
def assertWaitedFileList(self, stdout, expected) -> None:
stdout = stdout.decode("utf-8").rstrip()
files = [f.rstrip() for f in stdout.split("\n")]
self.assertFileListContains(files, expected)
def assertWaitForWmWaitWatch(self, root) -> None:
"""Wait for the specified root to appear in the watch list;
watchman-wait will initiate that asynchronously and we have
to wait for that before proceeding.
Then wait for the watch to be ready to query, otherwise the
test expectations will not be reliably met."""
# wait for the watch to appear
self.assertWaitFor(
lambda: self.rootIsWatched(root),
message="%s was not watched by watchman-wait" % root,
)
# now wait for it to be ready to query. The easiest way
# to do this is to ask for the watch ourselves, as that
# will block us until it is ready
self.watchmanCommand("watch", root)
def test_wait(self) -> None:
root = self.mkdtemp()
self.touchRelative(root, "foo")
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
self.touchRelative(a_dir, "foo")
wmwait = self.spawnWatchmanWait(
["--relative", root, "--max-events", "8", "-t", "3", root]
)
self.assertWaitForWmWaitWatch(root)
self.touchRelative(root, "bar")
self.removeRelative(root, "foo")
self.touchRelative(a_dir, "bar")
self.removeRelative(a_dir, "foo")
b_dir = os.path.join(root, "b")
os.mkdir(b_dir)
self.touchRelative(b_dir, "foo")
(stdout, stderr) = wmwait.communicate()
self.assertWaitedFileList(stdout, ["a/bar", "a/foo", "b/foo", "bar", "foo"])
def test_rel_root(self) -> None:
root = self.mkdtemp()
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
b_dir = os.path.join(root, "b")
os.mkdir(b_dir)
wmwait = self.spawnWatchmanWait(
["--relative", b_dir, "--max-events", "8", "-t", "6", a_dir, b_dir]
)
self.assertWaitForWmWaitWatch(b_dir)
self.assertWaitForWmWaitWatch(a_dir)
self.touchRelative(a_dir, "afoo")
self.touchRelative(b_dir, "bfoo")
a_sub_dir = os.path.join(a_dir, "asub")
os.mkdir(a_sub_dir)
b_sub_dir = os.path.join(b_dir, "bsub")
os.mkdir(b_sub_dir)
(stdout, stderr) = wmwait.communicate()
self.assertWaitedFileList(stdout, ["../a/afoo", "../a/asub", "bfoo", "bsub"])
| 33.831858 | 85 | 0.618886 |
64524685594673bfe5f52bfb1a755d6b1ab20e00 | 5,040 | py | Python | mapping/star/discretized_bath/asymmetric_mean.py | fhoeb/py-mapping | daf37f522f8acb6af2285d44f39cab31f34b01a4 | [
"BSD-3-Clause"
] | 1 | 2021-01-18T00:02:40.000Z | 2021-01-18T00:02:40.000Z | mapping/star/discretized_bath/asymmetric_mean.py | fhoeb/py-mapping | daf37f522f8acb6af2285d44f39cab31f34b01a4 | [
"BSD-3-Clause"
] | null | null | null | mapping/star/discretized_bath/asymmetric_mean.py | fhoeb/py-mapping | daf37f522f8acb6af2285d44f39cab31f34b01a4 | [
"BSD-3-Clause"
] | 3 | 2021-01-18T00:12:41.000Z | 2021-07-05T15:28:33.000Z | """
Discretized bath for the generation of direct asymmetric discretization coefficients, where the integrals for
the couplings and energies are evaluated using a heuristic method called mean discretization.
Introduced in: de Vega et al., Phys. Rev. B 92, 155126 (2015)
"""
import numpy as np
from scipy.integrate import quad
from mapping.star.discretized_bath.base.asymmetric import BaseDiscretizedAsymmetricBath
from mapping.utils.integration_defaults import default_epsabs, default_epsrel, default_limit
from mapping.star.discretized_bath.stopcoeff import StopCoefficients
class MeanDiscretizedAsymmetricBath(BaseDiscretizedAsymmetricBath):
def __init__(self, J, domain, max_nof_coefficients=100, **kwargs):
"""
Generates direct discretization coefficients from a spectral density J, by
mean discretization (see de Vega et al., Phys. Rev. B 92, 155126 (2015) for details on the method)
Computes max_nof_coefficients coefficients directly!
:param J: Spectral density. A function defined on 'domain', must be >0 in the inner part of domain
:param domain: List/tuple of two elements for the left and right boundary of the domain of J
:param max_nof_coefficients: Size of the buffers which hold gamma and xi coefficients (maximum number of
these coefficients that can be calculated)
:param kwargs: may contain 'ignore_zeros' If one gamma_i is numerically 0, the corresponding xi_i is also set 0,
default is False
'epsabs': absolute tolerance for the scipy integrations, default is 1e-11
'epsrel': relative tolerance for the scipy integrations, default is 1e-11
'limit': limit parameter for the scipy quad function, default is 100
"""
assert not np.isinf(domain[1])
try:
self.ignore_zeros = kwargs['ignore_zeros']
except KeyError:
self.ignore_zeros = False
try:
self.epsabs = kwargs['epsabs']
except KeyError:
self.epsabs = default_epsabs
try:
self.epsrel = kwargs['epsrel']
except KeyError:
self.epsrel = default_epsrel
try:
self.limit = kwargs['limit']
except KeyError:
self.limit = default_limit
self.J = J
self.Jx = lambda x: J(x) * x
self.domain = domain
super().__init__(self.compute_coefficients, max_nof_coefficients=max_nof_coefficients)
try:
self.gamma_buf[:], self.xi_buf[:] = self.get_mean_coefficients(max_nof_coefficients)
except ZeroDivisionError:
print('Cannot calculate ' + str(max_nof_coefficients) + ' coefficients. Encountered div/0')
raise
self._set_next_n(max_nof_coefficients)
def get_interval_avg(self, a, b):
"""
Returns the average of J in the interval [1a, b]
"""
return quad(self.Jx, a=a, b=b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)[0] / \
quad(self.J, a=a, b=b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)[0]
def get_mean_coefficients(self, nof_coefficients):
"""
Calculates the mean discretization coefficients
"""
interval_points = np.empty(nof_coefficients+2)
# include the endpoints of the interval
interval_points[0] = self.domain[0]
interval_points[-1] = self.domain[1]
x0 = self.get_interval_avg(self.domain[0], self.domain[1])
interval_points[1] = x0
# iteratively determine the points, that divide J by equal weight
for n in range(2, nof_coefficients+1):
last_points = np.empty(n+1)
last_points[0] = self.domain[0]
last_points[-1] = self.domain[1]
last_points[1:-1] = interval_points[1:n]
for pt_idx in range(n):
interval_points[pt_idx+1] = self.get_interval_avg(last_points[pt_idx], last_points[pt_idx+1])
# Calculate the couplings in the above determined intervals
couplings = np.empty(nof_coefficients)
for pt_idx in range(1, nof_coefficients+1):
a = (interval_points[pt_idx-1] + interval_points[pt_idx])/2 if pt_idx > 1 else interval_points[0]
b = (interval_points[pt_idx] + interval_points[pt_idx+1])/2 if pt_idx < nof_coefficients else \
interval_points[nof_coefficients+1]
couplings[pt_idx-1] = np.sqrt(quad(self.J, a=a, b=b, epsabs=self.epsabs, epsrel=self.epsrel,
limit=self.limit)[0])
return couplings, interval_points[1:-1]
def compute_coefficients(self, stop_n):
"""
Immediately raises a StopCoefficients exception, because everything is already calculated in the constructor
"""
raise StopCoefficients
| 51.958763 | 120 | 0.63869 |
457818dee737a765581ca4fcc7566217fbdae1d8 | 1,505 | py | Python | app/version1/users/validator.py | SimonAwiti/sentIT-endpoints | 914b5cba4b82442cc7a9a01adaee321948d6abfa | [
"MIT"
] | null | null | null | app/version1/users/validator.py | SimonAwiti/sentIT-endpoints | 914b5cba4b82442cc7a9a01adaee321948d6abfa | [
"MIT"
] | null | null | null | app/version1/users/validator.py | SimonAwiti/sentIT-endpoints | 914b5cba4b82442cc7a9a01adaee321948d6abfa | [
"MIT"
] | null | null | null | def validate_data_signup(data):
"""validate user details"""
try:
# check if password has spaces
if " " in data["password"]:
return "password should be one word, no spaces"
# check if password is empty
elif data["password"] == "":
return "password required"
# check if username is empty
elif data["name"] == "":
return "username required"
# check if email has spaces
elif " " in data["email"]:
return "email should be one word, no spaces"
# check if email empty
elif data["email"] == "":
return "email required"
# check if Role is empty
elif data["role"] == "":
return "user Role required"
else:
return "valid"
except Exception as error:
return "please provide all the fields, missing " + str(error)
def validate_data_login(data):
"""validate user details"""
try:
# check if the username is more than 3 characters
if len(data['name'].strip()) < 3:
return "username must be more than 3 characters"
# check if password is empty
elif data["password"] == "":
return "password required"
# check if username is empty
elif data["name"] == "":
return "username required"
else:
return "valid"
except Exception as error:
return "please provide all the fields, missing " + str(error) | 35 | 69 | 0.55814 |
5b264e2d65085ec561b2ca89e1ce74aacdb5e4e3 | 45,337 | py | Python | video_3D_alg/video_draft.py | landdafku11/CogAlg | b33d706b25f63d5a2a4bbf9bb6a5d1fad5b9b5eb | [
"MIT"
] | 102 | 2016-10-09T01:33:00.000Z | 2022-01-28T01:03:23.000Z | video_3D_alg/video_draft.py | Risingabhi/CogAlg | a95ea498af3104893f92028f466a56ef3a211f10 | [
"MIT"
] | 41 | 2017-06-04T16:09:43.000Z | 2022-01-20T21:11:42.000Z | video_3D_alg/video_draft.py | Risingabhi/CogAlg | a95ea498af3104893f92028f466a56ef3a211f10 | [
"MIT"
] | 50 | 2017-05-10T06:25:36.000Z | 2021-08-02T20:28:54.000Z | import os
import cv2
import argparse
import numpy as np
from scipy import misc
from time import time
from collections import deque
''' Temporal blob composition over a sequence of frames in a video:
pixels are compared to rng adjacent pixels over lateral x, vertical y, temporal t coordinates,
then resulting 3D tuples are combined into incremental-dimensionality patterns: 1D Ps ) 2D blobs ) 3D tblobs.
tblobs will then be evaluated for recursive intra-tblob search.
'''
# ************ REUSABLE CLASSES *****************************************************************************************
class frame_of_patterns(object):
def __init__(self, typ):
self.core = typ[0]
self.dimension = typ[1]
self.level = typ[2:]
self.xD = 0
self.abs_xD = 0
self.e_ = []
if self.level == 'frame':
self.Ly = 0
else:
self.Lt = 0
self.yD = 0
self.abs_yD = 0
if self.core == 'm':
self.L = 0
self.I = 0
self.Dx = 0; self.Dy = 0; self.Dt = 0
self.Mx = 0; self.My = 0; self.Mt = 0
def accum_params(self, params):
" add lower-composition param to higher-composition param "
self.L += params[0]
self.I += params[1]
self.Dx += params[2]
self.Dy += params[3]
self.Dt += params[4]
self.Mx += params[5]
self.My += params[6]
self.Mt += params[7]
class pattern(object):
def __init__(self, typ, x_coord=(9999999, -1), y_coord=(9999999, -1), t_coord=(9999999, -1), sign=-1):
" initialize P, segment or blob "
self.core = typ[0]
self.dimension = typ[1]
self.level = typ[2:]
self.sign = sign
self.L = 0 # length/area of a pattern
self.I = 0 # summed input
self.Dx = 0; self.Dy = 0; self.Dt = 0 # lateral, vertical, temporal D
self.Mx = 0; self.My = 0; self.Mt = 0 # lateral, vertical, temporal M
self.Alt0 = 0; self.Alt1 = 0; self.Alt2 = 0; self.Alt3 = 0; self.Alt4 = 0 # indicate value of overlapping alt-core blobs
self.min_x, self.max_x = x_coord
self.e_ = []
self.terminated = False
# Pattern level-specific init:
if self.level != 'P':
self.min_y, self.max_y = y_coord
if not self.level in ['segment', 'blob']:
self.min_t, self.max_t = t_coord
def type(self):
return self.core + self.dimension + self.level
def params(self):
" params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4] "
return [self.L, self.I, self.Dx, self.Dy, self.Dt, self.Mx, self.My, self.Mt,
self.Alt0, self.Alt1, self.Alt2, self.Alt3, self.Alt4]
def coords(self):
" coords = [min_x, max_x, min_y, max_y] "
if self.level == 'P':
return [self.min_x, self.max_x]
elif self.level in ['segment', 'blob']:
return [self.min_x, self.max_x, self.min_y, self.max_y]
else:
return [self.min_x, self.max_x, self.min_y, self.max_y, self.min_t, self.max_t]
def accum_params(self, params):
" add lower-composition param to higher-composition param "
self.L += params[0]
self.I += params[1]
self.Dx += params[2]
self.Dy += params[3]
self.Dt += params[4]
self.Mx += params[5]
self.My += params[6]
self.Mt += params[7]
self.Alt0 += params[8]
self.Alt1 += params[9]
self.Alt2 += params[10]
self.Alt3 += params[11]
self.Alt4 += params[12]
def extend_coords(self, coords):
" replace min/max coords with min/max that includes min/max of input coords "
self.min_x = min(self.min_x, coords[0])
self.max_x = max(self.max_x, coords[1])
if len(coords) > 2:
self.min_y = min(self.min_y, coords[2])
self.max_y = max(self.max_y, coords[3])
if len(coords) > 4:
self.min_t = min(self.min_t, coords[4])
self.max_t = max(self.max_t, coords[5])
def rename(self, list):
" rename multiple internal attributes at once "
for old_name, new_name in list:
self.__dict__[new_name] = self.__dict__.pop(old_name)
def set(self, dict):
" create new or set values of multiple attributes at once "
for key, value in dict.iteritems():
self.__dict__[key] = value
# ************ REUSABLE CLASSES ENDS ************************************************************************************
# ************ MISCELLANEOUS FUNCTIONS **********************************************************************************
# Includes:
# -rebuild_segment()
# -rebuild_blob()
# -rebuild_frame()
# -fetch_frame()
# ***********************************************************************************************************************
def rebuild_segment(dir, index, seg, blob_img, frame_img, print_separate_blobs=0, print_separate_segs=1):
if print_separate_segs: seg_img = np.array([[[127] * 4] * X] * Y)
y = seg.min_y # min_y
for P in seg.e_:
x = P.min_x
for i in range(P.L):
frame_img[y, x, : 3] = [255, 255, 255] if P.sign else [0, 0, 0]
if print_separate_blobs: blob_img[y, x, : 3] = [255, 255, 255] if P.sign else [0, 0, 0]
if print_separate_segs: seg_img[y, x, : 3] = [255, 255, 255] if P.sign else [0, 0, 0]
x += 1
y += 1
if print_separate_segs:
min_x, max_x, min_y, max_y = seg.coords()
cv2.rectangle(seg_img, (min_x - 1, min_y - 1), (max_x + 1, max_y + 1), (0, 255, 255), 1)
cv2.imwrite(dir + 'seg%d.jpg' % (index), seg_img)
return blob_img
# ---------- rebuild_segment() end ----------------------------------------------------------------------------------
def rebuild_blob(dir, index, blob, frame_img, print_separate_blobs=1, print_separate_segs=0):
" Rebuilt data of a blob into an image "
if print_separate_blobs:
blob_img = np.array([[[127] * 4] * X] * Y)
else:
blob_img = None
for iidx, idx in enumerate(blob.sorted_min_x_idx_): # Iterate through segments' sorted id
blob_img = rebuild_segment(dir + 'blob%d' % (index), iidx, blob.e_[idx], blob_img, frame_img, print_separate_blobs, print_separate_segs)
if print_separate_blobs:
min_x, max_x, min_y, max_y = blob.coords()
cv2.rectangle(blob_img, (min_x - 1, min_y - 1), (max_x + 1, max_y + 1), (0, 255, 255), 1)
cv2.imwrite(dir + 'blob%d.jpg' % (index), blob_img)
return frame_img
# ---------- rebuild_blob() end -------------------------------------------------------------------------------------
def rebuild_frame(dir, frame, print_separate_blobs=0, print_separate_segs=0):
" Rebuilt data of a frame into an image "
frame_img = np.array([[[127] * 4] * X] * Y)
if (print_separate_blobs or print_separate_segs) and not os.path.exists(dir):
os.mkdir(dir)
for iindex, index in enumerate(frame.sorted_min_x_idx_): # Iterate through blobs' sorted indices
frame_img = rebuild_blob(dir + '/', iindex, frame.e_[index], frame_img, print_separate_blobs, print_separate_segs)
cv2.imwrite(dir + '.jpg', frame_img)
# ---------- rebuild_frame() end ------------------------------------------------------------------------------------
def fetch_frame(video):
" Short call to read a gray-scale frame"
_, frame = video.read()
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype('int32')
# ************ MISCELLANEOUS FUNCTIONS END ******************************************************************************
# ************ MAIN FUNCTIONS *******************************************************************************************
# Includes:
# -lateral_comp()
# -vertical_comp()
# -temporal_comp()
# -form_P()
# -scan_P_()
# -form_segment()
# -term_segment_()
# -form_blob()
# -sort_coords()
# -scan_blob_()
# -scan_segment_()
# -find_overlaps()
# -find_olp_index()
# -form_tsegment()
# -video_to_tblobs()
# ***********************************************************************************************************************
def lateral_comp(pixel_):
# Comparison over x coordinate, within rng of consecutive pixels on each line
ders1_ = [] # tuples of complete 1D derivatives: summation range = rng
rng_ders1_ = deque(maxlen=rng) # incomplete ders1s, within rng from input pixel: summation range < rng
rng_ders1_.append((0, 0, 0))
max_index = rng - 1 # max index of rng_ders1_
for x, p in enumerate(pixel_): # pixel p is compared to rng of prior pixels within horizontal line, summing d and m per prior pixel
back_fd, back_fm = 0, 0 # fuzzy derivatives from rng of backward comps per pri_p
for index, (pri_p, fd, fm) in enumerate(rng_ders1_):
d = p - pri_p
m = ave - abs(d)
fd += d # bilateral fuzzy d: running sum of differences between pixel and all prior and subsequent pixels within rng
fm += m # bilateral fuzzy m: running sum of matches between pixel and all prior and subsequent pixels within rng
back_fd += d # running sum of d between pixel and all prior pixels within rng
back_fm += m # running sum of m between pixel and all prior pixels within rng
if index < max_index:
rng_ders1_[index] = (pri_p, fd, fm)
elif x > min_coord: # after pri_p comp over full bilateral rng
ders1_.append((pri_p, fd, fm)) # completed bilateral tuple is transferred from rng_ders_ to ders_
rng_ders1_.appendleft((p, back_fd, back_fm)) # new tuple with initialized d and m, maxlen displaces completed tuple
# last incomplete rng_ders1_ in line are discarded, vs. ders1_ += reversed(rng_ders1_)
return ders1_
# ---------- lateral_comp() end -------------------------------------------------------------------------------------
def vertical_comp(ders1_, rng_ders2__):
# Comparison to bilateral rng of vertically consecutive pixels forms ders2: pixel + lateral and vertical derivatives
ders2_ = [] # line of tuples with complete 2D derivatives: summation range = rng
new_rng_ders2__ = deque() # 2D array: line of ders2_s buffered for next-line comp
max_index = rng - 1 # max ders2_ index
x = rng # lateral coordinate of pixel in input ders1
for (p, dx, mx), rng_ders2_ in zip(ders1_, rng_ders2__): # pixel comp to rng _pixels in rng_ders2_, summing dy and my
index = 0
back_dy, back_my = 0, 0
for (_p, _dx, fdy, _mx, fmy) in rng_ders2_: # vertical derivatives are incomplete; prefix '_' denotes higher-line variable
dy = p - _p
my = ave - abs(dy)
fdy += dy # running sum of differences between pixel _p and all higher and lower pixels within rng
fmy += my # running sum of matches between pixel _p and all higher and lower pixels within rng
back_dy += dy # running sum of dy between pixel p and all higher pixels within rng
back_my += my # running sum of my between pixel p and all higher pixels within rng
if index < max_index:
rng_ders2_[index] = (_p, _dx, fdy, _mx, fmy)
elif y > min_coord:
ders2_.append((_p, _dx, fdy, _mx, fmy)) # completed bilateral tuple is transferred from rng_ders2_ to ders2_
index += 1
rng_ders2_.appendleft((p, dx, back_dy, mx, back_my)) # new ders2 displaces completed one in vertical ders2_ via maxlen
new_rng_ders2__.append(rng_ders2_) # 2D array of vertically-incomplete 2D tuples, converted to rng_ders2__, for next line
x += 1
return ders2_, new_rng_ders2__
# ---------- vertical_comp() end ------------------------------------------------------------------------------------
def temporal_comp(ders2_, rng_ders3___, _xP_, _yP_, _tP_, frame, _frame):
# ders2_: input line of complete 2D ders
# rng_ders3___: prior frame of incomplete 3D tuple buffers, sliced into lines
# comparison between t_rng temporally consecutive pixels, forming ders3: 3D tuple of derivatives per pixel
# each of the following contains 2 types, per core variables m and d:
xP = [pattern('mxP', (rng, -1)), pattern('dxP', (rng, -1))] # initialize with min_x = rng, max_x = -1
yP = [pattern('myP', (rng, -1)), pattern('dyP', (rng, -1))]
tP = [pattern('mtP', (rng, -1)), pattern('dtP', (rng, -1))]
xP_ = [deque(), deque()]
yP_ = [deque(), deque()] # line y - rng
tP_ = [deque(), deque()]
xbuff_ = [deque(), deque()]
ybuff_ = [deque(), deque()] # line y - rng - 1: _Ps buffered by previous run of scan_P_
tbuff_ = [deque(), deque()]
rng_ders3__ = rng_ders3___.pop(0)
new_rng_ders3__ = deque() # 2D array: line of rng_ders3_s buffered for next-frame comp
max_index = t_rng - 1 # max rng_ders3_ index
x = rng # lateral coordinate of pixel
for (p, dx, dy, mx, my), rng_ders3_ in zip(ders2_, rng_ders3__): # pixel comp to rng _pixels in rng_ders3_, summing dt and mt
index = 0
back_dt, back_mt = 0, 0
for (_p, _dx, _dy, fdt, _mx, _my, fmt) in rng_ders3_: # temporal derivatives are incomplete; prefix '_' denotes previous-frame variable
dt = p - _p
mt = ave - abs(dt)
fdt += dt # running sum of differences between pixel _p and all previous and subsequent pixels within t_rng
fmt += mt # running sum of matches between pixel _p and all previous and subsequent pixels within t_rng
back_dt += dt # running sum of dt between pixel p and all previous pixels within t_rng
back_mt += mt # running sum of mt between pixel p and all previous pixels within t_rng
if index < max_index:
rng_ders3_[index] = (_p, _dx, _dy, fdt, _mx, _my, fmt)
elif t > t_min_coord:
ders = _p, _dx, _dy, fdt, _mx, _my, fmt
xP, xP_, xbuff_, _xP_, frame, _frame = form_P(ders, x, X - rng - 1, xP, xP_, xbuff_, _xP_, frame, _frame, 0) # mxP: typ = 0
yP, yP_, ybuff_, _yP_, frame, _frame = form_P(ders, x, X - rng - 1, yP, yP_, ybuff_, _yP_, frame, _frame, 1) # myP: typ = 1
tP, tP_, tbuff_, _tP_, frame, _frame = form_P(ders, x, X - rng - 1, tP, tP_, tbuff_, _tP_, frame, _frame, 2) # mtP: typ = 2
index += 1
rng_ders3_.appendleft((p, dx, dy, back_dt, mx, my, back_mt)) # new ders3 displaces completed one in temporal rng_ders3_ via maxlen
new_rng_ders3__.append(rng_ders3_) # rng_ders3__: line of incomplete ders3 buffers, to be added to next-frame rng_ders3___
x += 1
# terminate last higher line dP (typ = 3 -> 5) within neg mPs
for typ in range(dim, dim * 2):
if typ == 3: buff_ = xbuff_[1]; hP_ = _xP_[1]
if typ == 4: buff_ = ybuff_[1]; hP_ = _yP_[1]
if typ == 5: buff_ = tbuff_[1]; hP_ = _tP_[1]
while buff_:
hP = buff_.popleft()
if hP.roots != 1: # no roots
frame, _frame = form_blob(hP, frame, _frame, typ)
hP_, frame, _frame = term_segment_(hP_, frame, _frame, typ)
rng_ders3___.append(new_rng_ders3__) # rng_ders3___ for next frame
return rng_ders3___, xP_, yP_, tP_, frame, _frame
# ---------- temporal_comp() end ------------------------------------------------------------------------------------
def form_P(ders, x, term_x, P, P_, buff_, hP_, frame, _frame, typ, is_dP=0):
# Initializes and accumulates 1D pattern
# is_dP = bool(typ // dim), computed directly for speed and clarity:
p, dx, dy, dt, mx, my, mt = ders # 3D tuple of derivatives per pixel, "x" for lateral, "y" for vertical, "t" for temporal
if typ == 0: core = mx; alt0 = dx; alt1 = my; alt2 = mt; alt3 = dy; alt4 = dt
elif typ == 1: core = my; alt0 = dy; alt1 = mx; alt2 = mt; alt3 = dx; alt4 = dt
elif typ == 2: core = mt; alt0 = dt; alt1 = mx; alt2 = my; alt3 = dx; alt4 = dy
elif typ == 3: core = dx; alt0 = mx; alt1 = dy; alt2 = dt; alt3 = my; alt4 = mt
elif typ == 4: core = dy; alt0 = my; alt1 = dx; alt2 = dt; alt3 = mx; alt4 = mt
else: core = dt; alt0 = mt; alt1 = dx; alt2 = dy; alt3 = mx; alt4 = my
s = 1 if core > 0 else 0
if not (s == P[is_dP].sign or x == P[is_dP].min_x): # P is terminated. P[0] is mP, P[1] is dP
P, P_, buff_, hP_, frame, _frame = term_P(s, x, P, P_, buff_, hP_, frame, _frame, typ, is_dP)
# Continued or initialized input and derivatives are accumulated:
P[is_dP].accum_params([1, p, dx, dy, dt, mx, my, mt, abs(alt0), abs(alt1), abs(alt2), abs(alt3), abs(alt4)])
# params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
P[is_dP].e_.append(ders)
if P[is_dP].sign == -1: P[is_dP].sign = s
if x == term_x: # P is terminated
P, P_, buff_, hP_, frame, _frame = term_P(s, x + 1, P, P_, buff_, hP_, frame, _frame, typ, is_dP)
return P, P_, buff_, hP_, frame, _frame # accumulated within line, P_ is a buffer for conversion to _P_
# ---------- form_P() end -------------------------------------------------------------------------------------------
def term_P(s, x, P, P_, buff_, hP_, frame, _frame, typ, is_dP):
# Terminates 1D pattern if sign change or P_ end
if not is_dP and P[is_dP].sign == 0:
x0, L, ders_ = P[0].min_x, P[0].L, P[0].e_
P[1] = pattern(typ_str[typ + dim] + 'P', (x0, -1)) # dPs (P[1]) formed inside of negative mP (P[0])
for i in range(L):
P, P_, buff_, _P_, frame, _frame = form_P(ders_[i], x0 + i, x - 1, P, P_, buff_, hP_, frame, _frame, typ + dim, True) # is_dP = 1
P[is_dP].max_x = x - 1
P[is_dP].terminated = True
if y == rng * 2: # 1st line P_ is converted to init hP_; scan_P_(), form_segment(), form_blob() use one type of Ps, hPs, buffs
P_[is_dP].append((P[is_dP], [])) # P, _fork_, no root yet
else:
P_[is_dP], buff_[is_dP], hP_[is_dP], frame, _frame \
= scan_P_(x - 1, P[is_dP], P_[is_dP], buff_[is_dP], hP_[is_dP], frame, _frame, typ) # P scans hP_
P[is_dP] = pattern(typ_str[typ] + 'P', (x, -1), sign=s) # new P initialization at x0 = x
return P, P_, buff_, hP_, frame, _frame
# ---------- term_P() end -------------------------------------------------------------------------------------------
def scan_P_(x, P, P_, _buff_, hP_, frame, _frame, typ):
# P scans shared-x-coordinate hPs in higher P_, combines overlapping Ps into blobs
buff_ = deque() # new buffer for displaced hPs (higher-line P tuples), for scan_P_(next P)
fork_ = [] # refs to hPs connected to input P
_x0 = 0 # to start while loop
x0 = P.min_x
while _x0 <= x: # while x values overlap between P and _P
if _buff_:
hP = _buff_.popleft() # hP was extended to segment and buffered in prior scan_P_
elif hP_:
hP, frame, _frame = form_segment(hP_.popleft(), frame, _frame, typ)
else:
break # higher line ends, all hPs are converted to segments
roots = hP.roots
_x0 = hP.e_[-1].min_x # hP.e_[-1] is _P
_x = hP.e_[-1].max_x
if P.sign == hP.sign and not _x < x0 and not x < _x0: # P comb -> blob if s == _s, _last_x >= first_x and last_x >= _first_x
roots += 1
hP.roots = roots
fork_.append(hP) # P-connected hPs will be converted to segments at each _fork
if _x > x: # x overlap between hP and next P: hP is buffered for next scan_P_, else hP included in a blob segment
buff_.append(hP)
elif roots != 1:
frame, _frame = form_blob(hP, frame, _frame, typ) # segment is terminated and packed into its blob
_x0 = _x + 1 # = first x of next _P
buff_ += _buff_ # _buff_ is likely empty
P_.append([P, fork_]) # P with no overlap to next _P is extended to hP and buffered for next-line scan_P_
return P_, buff_, hP_, frame, _frame # hP_ and buff_ contain only remaining _Ps, with _x => next x
# ---------- scan_P_() end ------------------------------------------------------------------------------------------
def form_segment(hP, frame, _frame, typ):
# Add hP to higher-line segment or convert it into new segment; merge blobs
_P, fork_ = hP
ave_x = (_P.L - 1) // 2 # extra-x L = L-1 (1x in L)
if len(fork_) == 1 and fork_[0].roots == 1: # hP has one fork: hP.fork_[0], and that fork has one root: hP
fork = fork_[0]
# hP is merged into higher-line blob segment (Pars, roots, ave_x, xD, Py_, coords) at hP.fork_[0]:
fork.accum_params(_P.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
fork.roots = 0 # roots
xd = ave_x - fork.ave_x
fork.ave_x = ave_x # ave_x
fork.xD += xd # xD for seg normalization and orientation, or += |dx| for curved yL?
fork.abs_xD += abs(xd)
fork.xd_.append(xd)
fork.e_.append(_P) # Py_: vertical buffer of Ps merged into seg
fork.extend_coords(_P.coords()) # min_x, max_x
hP = fork # replace segment with including fork's segment
else: # new segment is initialized:
hP = pattern(typ_str[typ] + 'segment', (_P.min_x, _P.max_x), (y - rng - 1, -1), sign=_P.sign) # new instance of pattern class
hP.accum_params(_P.params()) # initialize params with _P's params, etc.
hP.roots = 0 # init roots
hP.fork_ = fork_ # init fork_
hP.ave_x = ave_x # ave_x
hP.xD = 0 # xD
hP.abs_xD = 0
hP.xd_ = [0] # xd_ of corresponding Py_
hP.e_.append(_P) # Py_
if not fork_: # if no fork_: initialize blob
blob = pattern(typ_str[typ] + 'blob', (_P.min_x, _P.max_x), (y - rng - 1, -1), sign=hP.sign)
blob.xD = 0
blob.abs_xD = 0
blob.Ly = 0
blob.remaining_roots = 1
else: # else merge into fork's blob
blob = fork_[0].blob
hP.blob = blob # merge hP into blob
blob.e_.append(hP) # segment is buffered into blob's root_
if len(fork_) > 1: # merge blobs of all forks
if fork_[0].roots == 1: # if roots == 1
frame, _frame = form_blob(fork_[0], frame, _frame, typ, 1) # terminate seg of 1st fork
for fork in fork_[1:len(fork_)]: # merge blobs of other forks into blob of 1st fork
if fork.roots == 1:
frame, _frame = form_blob(fork, frame, _frame, typ, 1)
if not fork.blob is blob: # if not already merged/same
blobs = fork.blob
blob.accum_params(blobs.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
blob.extend_coords(blobs.coords()) # coord = [min_x, max_x, min_y, max_y]
blob.xD += blobs.xD
blob.abs_xD += blobs.abs_xD
blob.Ly += blobs.Ly
blob.remaining_roots += blobs.remaining_roots
for seg in blobs.e_:
if not seg is fork:
seg.blob = blob # blobs in other forks are references to blob in the first fork
blob.e_.append(seg) # buffer of merged root segments
fork.blob = blob
blob.e_.append(fork)
blob.remaining_roots -= 1
return hP, frame, _frame
# ---------- form_segment() end -----------------------------------------------------------------------------------------
def term_segment_(hP_, frame, _frame, typ):
# merge segments of last line into their blobs
while hP_:
hP, frame, _frame = form_segment(hP_.popleft(), frame, _frame, typ)
frame, _frame = form_blob(hP, frame, _frame, typ)
return hP_, frame, _frame
# ---------- term_segment_() end ----------------------------------------------------------------------------------------
def form_blob(term_seg, frame, _frame, typ, y_carry=0):
# Terminated segment is merged into continued or initialized blob (all connected segments)
blob = term_seg.blob
term_seg.max_y = y - rng - 1 - y_carry # set max_y <- current y; y_carry: min elevation of term_seg over current hP
blob.accum_params(term_seg.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
blob.extend_coords(term_seg.coords()) # coords = [min_x, max_x, min_y, max_y]
blob.xD += term_seg.xD # ave_x angle, to evaluate blob for re-orientation
blob.abs_xD += term_seg.abs_xD
blob.Ly += len(term_seg.e_) # Ly = number of slices in segment
blob.remaining_roots += term_seg.roots - 1 # reference to term_seg is already in blob[9]
term_seg.terminated = True
if blob.remaining_roots == 0: # if remaining_roots == 0: blob is terminated and packed in frame
# sort indices of blob' segments by their min and max coordinates
blob.sorted_min_x_idx_, blob.sorted_max_x_idx_, blob.sorted_min_y_idx_, blob.sorted_max_y_idx_, \
blob.sorted_min_x_, blob.sorted_max_x_, blob.sorted_min_y_, blob.sorted_max_y_ = sort_segments(blob.e_)
# terminated blob is packed into frame
if term_seg.core == 'm' and term_seg.sign == 0: # is negative mblob
frame[typ].accum_params(term_seg.params())
frame[typ].xD += blob.xD # ave_x angle, to evaluate frame for re-orientation
frame[typ].abs_xD += blob.abs_xD
frame[typ].Ly += blob.Ly # +Ly
delattr(blob, 'remaining_roots')
blob.terminated = True
frame[typ].e_.append(blob)
# initialize tsegment with terminated blob:
blob.fork_ = []
if t > t_rng * 2:
frame[typ], _frame[typ] = scan_blob_(blob, frame[typ], _frame[typ])
return frame, _frame
# ---------- form_blob() end ----------------------------------------------------------------------------------------
def sort_segments(e_):
" sort indices by min|max coords of segments"
sorted_idx_min_x_ = sorted(range(len(e_)), key=lambda i: e_[i].min_x) # segment indices sorted by min_x
sorted_idx_max_x_ = sorted(range(len(e_)), key=lambda i: e_[i].max_x) # segment indices sorted by max_x
sorted_idx_min_y_ = sorted(range(len(e_)), key=lambda i: e_[i].min_y) # segment indices sorted by min_y
sorted_idx_max_y_ = sorted(range(len(e_)), key=lambda i: e_[i].max_y) # segment indices sorted by max_y
# the following lists are for zoning olp segs
return sorted_idx_min_x_, sorted_idx_max_x_, sorted_idx_min_y_, sorted_idx_max_y_, \
[e_[sorted_idx_min_x_[i]].min_x for i in range(len(e_))], \
[e_[sorted_idx_max_x_[i]].max_x for i in range(len(e_))], \
[e_[sorted_idx_min_y_[i]].min_y for i in range(len(e_))], \
[e_[sorted_idx_max_y_[i]].max_y for i in range(len(e_))]
# ---------- sort_coords() end --------------------------------------------------------------------------------------
def scan_blob_(blob, frame, _frame):
# blob scans pri_blobs in higher frame, combines overlapping blobs into tblobs
# Select only overlapping pri_blobs in _frame for speed?
debug_idx_ = []
olp_idx_ = find_overlaps(_frame, blob.coords())
if len(olp_idx_) != 0:
pri_tseg_ = _frame.e_ # list of same type pri_tsegs
for olp_idx in olp_idx_:
pri_tseg = pri_tseg_[olp_idx]
pri_blob = pri_tseg.e_[-1]
if pri_blob.sign == blob.sign: # Check sign
olp_min_x = max(pri_blob.min_x, blob.min_x)
olp_max_x = min(pri_blob.max_x, blob.max_x)
olp_min_y = max(pri_blob.min_y, blob.min_y)
olp_max_y = min(pri_blob.max_y, blob.max_y)
if scan_segment_(blob, pri_blob, [olp_min_x, olp_max_x, olp_min_y, olp_max_y]):
pri_tseg.roots += 1
blob.fork_.append(pri_tseg)
debug_idx_.append(olp_idx)
# For Debugging --------------------------------------------------------------
# Print selected blob formed in frame at t > t_rng * 2 and all it's overlapping blobs in previous frame
global olp_debug, debug_case
if olp_debug and t > t_rng * 2 and len(debug_idx_) != 0:
if debug_case == output_at_case:
filtered_pri_frame = np.array([[[127] * 4] * X] * Y)
rebuild_blob('./images/', 0, blob, filtered_pri_frame, 1)
for i in debug_idx_:
rebuild_blob('./images/olp_', i, _frame.e_[i].e_[-1], filtered_pri_frame, 1)
olp_debug = False
else:
debug_case += 1
# ----------------------------------------------------------------------------
return frame, _frame
# ---------- scan_blob_() end ---------------------------------------------------------------------------------------
def scan_segment_(blob, pri_blob, bounding_box):
# scans segments for overlap
# choose only segments inside olp to check:
idx = find_overlaps(blob, bounding_box)
pri_idx = find_overlaps(pri_blob, bounding_box)
for i in idx:
seg = blob.e_[i]
olp_idx_ = np.intersect1d(find_overlaps(pri_blob, seg.coords()), pri_idx)
if len(olp_idx_) != 0:
pri_seg_ = pri_blob.e_
for olp_idx in olp_idx_:
pri_seg = pri_seg_[olp_idx]
olp_min_y = max(pri_seg.min_y, seg.min_y) # olp_min/max_y indicates
olp_max_y = min(pri_seg.max_y, seg.max_y) # potentially overlapping Ps
olp_P_idx_stop = olp_max_y - seg.min_y + 1
olp_P_idx = olp_min_y - seg.min_y
olp_pri_P_idx = olp_min_y - pri_seg.min_y
while olp_P_idx < olp_P_idx_stop:
P = seg.e_[olp_P_idx]
pri_P = pri_seg.e_[olp_pri_P_idx]
if P.min_x <= pri_P.max_x and P.max_x >= pri_P.min_x:
return True
olp_P_idx += 1
olp_pri_P_idx += 1
return False
# ---------- scan_segment_() end ------------------------------------------------------------------------------------
def find_overlaps(obj, bounding_box):
# Search for boundaries of sorted pri_blobs that overlap boundaries of input blob
N = len(obj.e_)
min_x, max_x, min_y, max_y = bounding_box
# find_olp_index(a_, first_index, last_index, target, right_olp):
_min_x_idx = find_olp_index(obj.sorted_min_x_, 0, N, max_x, 1)
_max_x_idx = find_olp_index(obj.sorted_max_x_, 0, N, min_x, 0)
_min_y_idx = find_olp_index(obj.sorted_min_y_, 0, N, max_y, 1)
_max_y_idx = find_olp_index(obj.sorted_max_y_, 0, N, min_y, 0)
_min_x_less_or_equal_max_x_indices = obj.sorted_min_x_idx_[:_min_x_idx] # overlap prerequisite: _min_x <= max_x
_min_y_less_or_equal_max_y_indices = obj.sorted_min_y_idx_[:_min_y_idx] # overlap prerequisite: _min_y <= max_y
_max_x_greater_or_equal_min_x_indices = obj.sorted_max_x_idx_[_max_x_idx:] # overlap prerequisite: _max_x <= min_x
_max_y_greater_or_equal_min_y_indices = obj.sorted_max_y_idx_[_max_y_idx:] # overlap prerequisite: _max_y <= min_y
# e_ overlap is a common subset of the above 4 sets
return np.intersect1d(np.intersect1d(_min_x_less_or_equal_max_x_indices, _max_x_greater_or_equal_min_x_indices),
np.intersect1d(_min_y_less_or_equal_max_y_indices, _max_y_greater_or_equal_min_y_indices))
# ---------- find_overlaps() end ------------------------------------------------------------------------------------
def find_olp_index(a_, i0, i, target, right_olp=0):
# a binary search module
if target + right_olp <= a_[i0]:
return i0
elif a_[i - 1] < target + right_olp:
return i
else:
im = (i0 + i) // 2
if a_[im] < target + right_olp:
return find_olp_index(a_, im, i, target, right_olp)
else:
return find_olp_index(a_, i0, im, target, right_olp)
# ---------- find_olp_index() end -----------------------------------------------------------------------------------
def form_tsegment(blob, videoo, typ):
# Add blob to previous-frame tsegment or convert it into new tsegment; merge tblobs
ave_x = (blob.max_x - blob.min_x) // 2
ave_y = (blob.max_y - blob.min_y) // 2
fork_ = blob.fork_
delattr(blob, 'fork_')
if len(fork_) == 1 and fork_[0].roots == 1: # hP has one fork: hP.fork_[0], and that fork has one root: hP
fork = fork_[0]
# hP is merged into higher-line blob segment (Pars, roots, ave_x, xD, Py_, coords) at hP.fork_[0]:
fork.accum_params(blob.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
fork.roots = 0 # roots
xd = ave_x - fork.ave_x
yd = ave_y - fork.ave_y
fork.ave_x = ave_x # ave_x
fork.ave_y = ave_y # ave_y
fork.xD += xd # xD for seg normalization and orientation
fork.yD += yd
fork.abs_xD += abs(xd)
fork.abs_yD += abs(yd)
fork.xyd_.append((xd, yd))
fork.e_.append(blob) # blob_
fork.extend_coords(blob.coords()) # min_x, max_x, min_y, max_y
return fork, videoo # replace blob with including fork's tsegment
else: # new segment is initialized:
tsegment = pattern(typ_str[typ] + 'tsegment', (blob.min_x, blob.max_x), (blob.min_y, blob.max_y), (t - t_rng, -1), sign=blob.sign)
# new instance of pattern class
tsegment.accum_params(blob.params()) # init params with blob's params
tsegment.roots = 0 # init roots
tsegment.fork_ = fork_ # init fork_
tsegment.ave_x = ave_x # ave_x
tsegment.ave_y = ave_y
tsegment.xD = 0 # xD
tsegment.yD = 0 # yD
tsegment.abs_xD = 0
tsegment.abs_yD = 0
tsegment.xyd_ = [(0, 0)] # xyd_ of blob_
tsegment.e_.append(blob) # blob_
if not fork_: # if no forks: initialize tblob
tblob = pattern(typ_str[typ] + 'tblob', (blob.min_x, blob.max_x), (blob.min_y, blob.max_y), (t - t_rng, -1), sign=tsegment.sign)
tblob.xD = 0
tblob.yD = 0
tblob.abs_xD = 0
tblob.abs_yD = 0
tblob.Lt = 0
tblob.remaining_roots = 1
else: # else merge into fork's tblob
tblob = fork_[0].tblob
tsegment.tblob = tblob # merge tsegment into tblob
tblob.e_.append(tsegment) # tsegment is buffered into tblob's root_
if len(fork_) > 1: # merge tblobs of all forks
if fork_[0].roots == 1: # if roots == 1
videoo = form_tblob(fork_[0], videoo, typ, 1) # terminate tsegment of 1st fork
for fork in fork_[1:len(fork_)]: # merge blobs of other forks into blob of 1st fork
if fork.roots == 1:
videoo = form_tblob(fork, videoo, typ, 1)
if not fork.tblob is tblob: # if not already merged/same
tblobs = fork.tblob
tblob.accum_params(tblobs.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
tblob.extend_coords(tblobs.coords()) # coord = [min_x, max_x, min_y, max_y, min_t, max_t]
tblob.xD += tblobs.xD
tblob.yD += tblobs.yD
tblob.abs_xD += tblobs.abs_xD
tblob.abs_yD += tblobs.abs_yD
tblob.Lt += tblobs.Lt
tblob.remaining_roots += tblobs.remaining_roots
for tseg in tblobs.e_:
if not tseg is fork:
tseg.tblob = tblob # tblobs in other forks are references to tblob in the first fork
tblob.e_.append(tseg) # buffer of merged root tsegments
fork.tblob = tblob
tblob.e_.append(fork)
tblob.remaining_roots -= 1
return tsegment, videoo
# ---------- form_tsegment() end ------------------------------------------------------------------------------------
def form_tblob(term_tseg, videoo, typ, t_carry = 0):
# Terminated tsegment is merged into continued or initialized tblob (all connected tsegments)
tblob = term_tseg.tblob
term_tseg.max_t = t - t_rng - 1 - t_carry # set max_t <- current t; t_carry: min elevation of term_tseg over current pri_blob
tblob.accum_params(term_tseg.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
tblob.extend_coords(term_tseg.coords()) # coords = [min_x, max_x, min_y, max_y, min_t, max_t]
tblob.xD += term_tseg.xD # ave_x angle, to evaluate tblob for re-orientation
tblob.yD += term_tseg.yD
tblob.abs_xD += term_tseg.abs_xD
tblob.abs_yD += term_tseg.abs_yD
tblob.Lt += len(term_tseg.e_) # Lt = number of slices in tsegment
tblob.remaining_roots += term_tseg.roots - 1 # reference to term_tseg is already in tblob
term_tseg.terminated = True
if tblob.remaining_roots == 0: # if remaining_roots == 0: tblob is terminated and packed in videoo
if term_tseg.core == 'm' and term_tseg.sign == 0: # is negative mblob
videoo[typ].accum_params(term_tseg.params())
videoo[typ].xD += tblob.xD # ave_x angle, to evaluate frame for re-orientation
videoo[typ].yD += tblob.yD
videoo[typ].abs_xD += tblob.abs_xD
videoo[typ].abs_yD += tblob.abs_yD
videoo[typ].Lt += tblob.Lt
delattr(tblob, 'remaining_roots')
tblob.terminated = True
videoo[typ].e_.append(tblob)
return videoo
# ---------- form_tblob() end ---------------------------------------------------------------------------------------
def video_to_tblobs(video):
# Main body of the operation,
# postfix '_' denotes array vs. element,
# prefix '_' denotes prior- pixel, line, or frame variable
# higher-line same- d-, m-, dy-, my- sign 1D patterns
_xP_ = [deque(), deque()]
_yP_ = [deque(), deque()]
_tP_ = [deque(), deque()]
_frame = [frame_of_patterns(typ_str[i] + 'frame') for i in range(2 * dim)]
# Main output: [[Dxf, Lf, If, Dxf, Dyf, Dtf, Mxf, Myf, Mtf], tblob_]
videoo = [frame_of_patterns(typ_str[i] + 'videoo') for i in range(2 * dim)]
global t, y
# Initialization:
t = 0 # temporal coordinate of current frame
rng_ders2__ = [] # horizontal line of vertical buffers: array of 2D tuples
rng_ders3___ = [] # temporal buffer per pixel of a frame: 3D tuples in 3D -> 2D array
y = 0 # initial line
line_ = fetch_frame(video) # first frame of lines?
pixel_ = line_[0, :] # first line of pixels
ders1_ = lateral_comp(pixel_) # after partial comp, while x < rng?
for (p, d, m) in ders1_:
ders2 = p, d, 0, m, 0 # dy, my initialized at 0
rng_ders2_ = deque(maxlen=rng) # vertical buffer of incomplete derivatives tuples, for fuzzy ycomp
rng_ders2_.append(ders2) # only one tuple in first-line rng_ders2_
rng_ders2__.append(rng_ders2_)
for y in range(1, Y): # or Y-1: default term_blob in scan_P_ at y = Y?
pixel_ = line_[y, :] # vertical coordinate y is index of new line p_
ders1_ = lateral_comp(pixel_) # lateral pixel comparison
ders2_, rng_ders2__ = vertical_comp(ders1_, rng_ders2__) # vertical pixel comparison, ders2_ is array of complete der2s on y line
# incomplete ders2_ s are discarded
if y > min_coord:
# Transfer complete list of tuples of ders2 into line y of ders3___
rng_ders3__ = []
for (p, dx, dy, mx, my) in ders2_:
ders3 = p, dx, dy, 0, mx, my, 0 # dt, mt initialized at 0
rng_ders3_ = deque(maxlen=t_rng) # temporal buffer of incomplete derivatives tuples, for fuzzy ycomp
rng_ders3_.append(ders3) # only one tuple in first-frame rng_ders3_
rng_ders3__.append(rng_ders3_)
rng_ders3___.append(rng_ders3__) # 1st frame, last vertical rng of incomplete ders2__ is discarded
# Main operations
for t in range(1, T):
if not video.isOpened(): # Terminate at the end of video
break
frame = [frame_of_patterns(typ_str[i] + 'frame') for i in range(2 * dim)]
line_ = fetch_frame(video)
for y in range(0, Y):
pixel_ = line_[y, :]
ders1_ = lateral_comp(pixel_) # lateral pixel comparison
ders2_, rng_ders2__ = vertical_comp(ders1_, rng_ders2__) # vertical pixel comparison
if y > min_coord:
# temporal pixel comparison:
rng_ders3___, _xP_, _yP_, _tP_, frame, _frame = temporal_comp(ders2_, rng_ders3___, _xP_, _yP_, _tP_, frame, _frame)
# merge segs of last line into their blobs:
if t > t_min_coord:
y = Y
for typ in range(6):
is_dP = typ // dim
dimension = typ % dim
if dimension == 0: hP_ = _xP_[is_dP]
if dimension == 1: hP_ = _yP_[is_dP]
if dimension == 2: hP_ = _tP_[is_dP]
hP_, frame, _frame = term_segment_(hP_, frame, _frame, typ)
# Sort blobs' indices based on min_x, max_x, min_y, max_y:
frame[typ].sorted_min_x_idx_, frame[typ].sorted_max_x_idx_, frame[typ].sorted_min_y_idx_, frame[typ].sorted_max_y_idx_, \
frame[typ].sorted_min_x_, frame[typ].sorted_max_x_, frame[typ].sorted_min_y_, frame[typ].sorted_max_y_ = sort_segments(frame[typ].e_)
# tsegments, tblobs operations:
for tsegment in _frame[typ].e_:
if tsegment.roots != 1:
videoo = form_tblob(tsegment, videoo, typ)
for i in range(len(frame[typ].e_)):
frame[typ].e_[i], videoo = form_tsegment(frame[typ].e_[i], videoo, typ)
if record and t == frame_output_at: # change these in program body
rebuild_frame('./images/mblobs_horizontal', frame[0], record_blobs, record_segs)
rebuild_frame('./images/mblobs_vertical', frame[1], record_blobs, record_segs)
rebuild_frame('./images/mblobs_temporal', frame[2], record_blobs, record_segs)
rebuild_frame('./images/dblobs_horizontal', frame[3], record_blobs, record_segs)
rebuild_frame('./images/dblobs_vertical', frame[4], record_blobs, record_segs)
rebuild_frame('./images/dblobs_temporal', frame[5], record_blobs, record_segs)
_frame = frame
# sequence ends, incomplete ders3__ discarded, but vertically incomplete blobs are still inputted in scan_blob_?
cv2.destroyAllWindows() # Part of video read
return videoo # frame of 2D patterns is outputted to level 2
# ---------- video_to_tblobs() end ----------------------------------------------------------------------------------
# ************ MAIN FUNCTIONS END ***************************************************************************************
# ************ PROGRAM BODY *********************************************************************************************
# Pattern filters ----------------------------------------------------------------
# eventually updated by higher-level feedback, initialized here as constants:
t_rng = 3 # Number of pixels compared to each pixel in time D
rng = 2 # Number of pixels compared to each pixel in four directions
min_coord = rng * 2 - 1 # min x and y for form_P input: ders2 from comp over rng*2 (bidirectional: before and after pixel p)
t_min_coord = t_rng * 2 - 1 # min t for form_P input: ders3 from comp over t_rng*2 (bidirectional: before and after pixel p)
ave = 15 # |d| value that coincides with average match: mP filter
dim = 3 # Number of dimensions: x, y and t
typ_str = ('mx', 'my', 'mt', 'dx', 'dy', 'dt')
der_str = ('m', 'd') # derivatives string
dim_str = ('x', 'y', 't') # dimensions string
# For outputs:
record = bool(0) # Set to True yield file outputs
record_blobs = bool(0)
record_segs = bool(0)
frame_output_at = t_rng * 2 # first frame that computes 2D blobs
global olp_debug, debug_case
olp_debug = bool(0)
debug_case = 0
output_at_case = 0
# Load inputs --------------------------------------------------------------------
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('-v', '--video', help='path to video file', default='./videos/Test01.avi')
arguments = vars(argument_parser.parse_args())
video = cv2.VideoCapture(arguments['video'], 0)
line_ = fetch_frame(video)
Y, X = line_.shape # image height and width
T = 8 # number of frame read limit
# Main ---------------------------------------------------------------------------
start_time = time()
videoo = video_to_tblobs(video)
end_time = time() - start_time
print(end_time)
# ************ PROGRAM BODY END ****************************************************************************************** | 50.318535 | 149 | 0.56852 |
48e89d800b4d9c51db8ae2e94a990b68fedcdec8 | 286 | py | Python | Ejemplo7.py | batzzinga/Curso-Python | b9abd5d0d2625e5f64a4ea3f8c7540a9f42d2b63 | [
"MIT"
] | null | null | null | Ejemplo7.py | batzzinga/Curso-Python | b9abd5d0d2625e5f64a4ea3f8c7540a9f42d2b63 | [
"MIT"
] | null | null | null | Ejemplo7.py | batzzinga/Curso-Python | b9abd5d0d2625e5f64a4ea3f8c7540a9f42d2b63 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# -*- coding iso-8859-15
#Lanzar dados y que muestre los resultados hasta que salga 3
from random import randint
n = int(input("Ingrese la cantidad de lanzamientos: "))
x = 0
for i in range(n):
x = randint(1,6)
print (x)
if x == 3:
break | 26 | 61 | 0.622378 |
1790571a81eb1297ac04a00459a856f1ffcc5f6e | 9,651 | py | Python | docs/conf.py | davidemoro/pytest-pypom-navigation | 3af2e264d2b52aa234f4962a903ea7b860a70404 | [
"Apache-2.0"
] | 2 | 2017-06-01T20:18:20.000Z | 2017-06-13T19:18:19.000Z | docs/conf.py | tierratelematics/pytest-pypom-navigation | 3af2e264d2b52aa234f4962a903ea7b860a70404 | [
"Apache-2.0"
] | 13 | 2017-05-30T08:02:58.000Z | 2018-04-01T21:16:31.000Z | docs/conf.py | davidemoro/pytest-pypom-navigation | 3af2e264d2b52aa234f4962a903ea7b860a70404 | [
"Apache-2.0"
] | 1 | 2017-10-10T11:04:35.000Z | 2017-10-10T11:04:35.000Z | # -*- coding: utf-8 -*-
#
# pytest-pypom-navigation documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 1 00:43:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pytest-pypom-navigation'
copyright = u'2015, Tierra QA team'
author = u'Tierra QA team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytest-cookiecutterplugin_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc,
'pytest-cookiecutterplugin_name.tex',
u'pytest-\\{\\{cookiecutter.plugin\\_name\\}\\} Documentation',
u'\\{\\{cookiecutter.full\\_name\\}\\}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc,
'pytest-cookiecutterplugin_name',
u'pytest-pypom-navigation Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc,
'pytest-cookiecutterplugin_name', u'pytest-pypom-navigation Documentation',
author, 'pytest-cookiecutterplugin_name',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 32.60473 | 79 | 0.711947 |
f9cf7e79d705c7085423242ec77f7a0ab5daa235 | 734 | py | Python | src/mahjong.py | LittleYe233/majhong-connect | 0ba711852ba7e0d5a54f346cfb606da7223f2972 | [
"Apache-2.0"
] | null | null | null | src/mahjong.py | LittleYe233/majhong-connect | 0ba711852ba7e0d5a54f346cfb606da7223f2972 | [
"Apache-2.0"
] | null | null | null | src/mahjong.py | LittleYe233/majhong-connect | 0ba711852ba7e0d5a54f346cfb606da7223f2972 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from src.utils.cardsys import Card, Cardset
from src.profiles import japanese
from src.profiles import competition
# Constants
_MJ_GAME_PROFILE_KEYS = [
'japanese', # Japanese Mahjong, 日本麻将
'competition' # Mahjong Competition Rules, 国标麻将
]
_MJ_GAME_PROFILES = { # TODO: additional rules defined in these files
'japanese': japanese,
'competition': competition
}
# Classes
class MahjongCard(Card):
def __init__(self, rank=None, suit=None, name=None, tags=None):
super().__init__(rank, suit, name)
self.tags = tags or []
def __repr__(self):
return (f'<MahjongCard rank={self.rank} suit={self.suit} '
f'name={self.name} tags={self.tags}>') | 25.310345 | 70 | 0.667575 |
91bd8babbb751909dcfacbba1ae9f0bb75659a6a | 5,218 | py | Python | src/garage/experiment/meta_evaluator.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | src/garage/experiment/meta_evaluator.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | src/garage/experiment/meta_evaluator.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | """Evaluator which tests Meta-RL algorithms on test environments."""
from dowel import logger, tabular
from garage import log_multitask_performance, TrajectoryBatch
from garage.experiment.deterministic import get_seed
from garage.sampler import LocalSampler
from garage.sampler.worker import DefaultWorker
from garage.sampler.worker_factory import WorkerFactory
class MetaEvaluator:
"""Evaluates Meta-RL algorithms on test environments.
Args:
test_task_sampler (garage.experiment.TaskSampler): Sampler for test
tasks. To demonstrate the effectiveness of a meta-learning method,
these should be different from the training tasks.
max_path_length (int): Maximum path length used for evaluation
trajectories.
n_test_tasks (int or None): Number of test tasks to sample each time
evaluation is performed. Note that tasks are sampled "without
replacement". If None, is set to `test_task_sampler.n_tasks`.
n_exploration_traj (int): Number of trajectories to gather from the
exploration policy before requesting the meta algorithm to produce
an adapted policy.
n_test_rollouts (int): Number of rollouts to use for each adapted
policy. The adapted policy should forget previous rollouts when
`.reset()` is called.
prefix (str): Prefix to use when logging. Defaults to MetaTest. For
example, this results in logging the key 'MetaTest/SuccessRate'.
If not set to `MetaTest`, it should probably be set to `MetaTrain`.
test_task_names (list[str]): List of task names to test. Should be in
an order consistent with the `task_id` env_info, if that is
present.
worker_class (type): Type of worker the Sampler should use.
worker_args (dict or None): Additional arguments that should be
passed to the worker.
"""
# pylint: disable=too-few-public-methods
def __init__(self,
*,
test_task_sampler,
max_path_length,
n_exploration_traj=10,
n_test_tasks=None,
n_test_rollouts=1,
prefix='MetaTest',
test_task_names=None,
worker_class=DefaultWorker,
worker_args=None):
self._test_task_sampler = test_task_sampler
self._worker_class = worker_class
if worker_args is None:
self._worker_args = {}
else:
self._worker_args = worker_args
if n_test_tasks is None:
n_test_tasks = test_task_sampler.n_tasks
self._n_test_tasks = n_test_tasks
self._n_test_rollouts = n_test_rollouts
self._n_exploration_traj = n_exploration_traj
self._max_path_length = max_path_length
self._eval_itr = 0
self._prefix = prefix
self._test_task_names = test_task_names
self._test_sampler = None
def evaluate(self, algo, test_rollouts_per_task=None):
"""Evaluate the Meta-RL algorithm on the test tasks.
Args:
algo (garage.np.algos.MetaRLAlgorithm): The algorithm to evaluate.
test_rollouts_per_task (int or None): Number of rollouts per task.
"""
if test_rollouts_per_task is None:
test_rollouts_per_task = self._n_test_rollouts
adapted_trajectories = []
logger.log('Sampling for adapation and meta-testing...')
if self._test_sampler is None:
self._test_sampler = LocalSampler.from_worker_factory(
WorkerFactory(seed=get_seed(),
max_path_length=self._max_path_length,
n_workers=1,
worker_class=self._worker_class,
worker_args=self._worker_args),
agents=algo.get_exploration_policy(),
envs=self._test_task_sampler.sample(1))
for env_up in self._test_task_sampler.sample(self._n_test_tasks):
policy = algo.get_exploration_policy()
traj = TrajectoryBatch.concatenate(*[
self._test_sampler.obtain_samples(self._eval_itr, 1, policy,
env_up)
for _ in range(self._n_exploration_traj)
])
adapted_policy = algo.adapt_policy(policy, traj)
adapted_traj = self._test_sampler.obtain_samples(
self._eval_itr, test_rollouts_per_task * self._max_path_length,
adapted_policy)
adapted_trajectories.append(adapted_traj)
logger.log('Finished meta-testing...')
if self._test_task_names is not None:
name_map = dict(enumerate(self._test_task_names))
else:
name_map = None
with tabular.prefix(self._prefix + '/' if self._prefix else ''):
log_multitask_performance(
self._eval_itr,
TrajectoryBatch.concatenate(*adapted_trajectories),
getattr(algo, 'discount', 1.0),
name_map=name_map)
self._eval_itr += 1
| 43.848739 | 79 | 0.629743 |
aaa2f0004cfe4fe02a2257b48ce6ded479166259 | 2,281 | py | Python | tests/unit/grains/test_nvme.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 5 | 2017-02-07T05:39:29.000Z | 2020-06-13T02:07:33.000Z | tests/unit/grains/test_nvme.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | tests/unit/grains/test_nvme.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 11 | 2017-01-26T19:36:29.000Z | 2021-12-11T07:54:16.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Simon Dodsley <simon@purestorage.com>`
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import textwrap
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
mock_open,
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.grains.nvme as nvme
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NvmeGrainsTestCase(TestCase):
'''
Test cases for nvme grains
'''
def test_linux_nvme_nqn_grains(self):
_nvme_file = textwrap.dedent('''\
nqn.2014-08.org.nvmexpress:fc_lif:uuid:2cd61a74-17f9-4c22-b350-3020020c458d
''')
with patch('salt.utils.files.fopen', mock_open(read_data=_nvme_file)):
nqn = nvme._linux_nqn()
assert isinstance(nqn, list)
assert len(nqn) == 1
assert nqn == ['nqn.2014-08.org.nvmexpress:fc_lif:uuid:2cd61a74-17f9-4c22-b350-3020020c458d']
@patch('salt.utils.files.fopen', MagicMock(side_effect=IOError(errno.EPERM,
'The cables are not the same length.')))
@patch('salt.grains.nvme.log', MagicMock())
def test_linux_nqn_non_root(self):
'''
Test if linux_nqn is running on salt-master as non-root
and handling access denial properly.
:return:
'''
assert nvme._linux_nqn() == []
nvme.log.debug.assert_called()
assert 'Error while accessing' in nvme.log.debug.call_args[0][0]
assert 'cables are not the same' in nvme.log.debug.call_args[0][2].strerror
assert nvme.log.debug.call_args[0][2].errno == errno.EPERM
assert nvme.log.debug.call_args[0][1] == '/etc/nvme/hostnqn'
@patch('salt.utils.files.fopen', MagicMock(side_effect=IOError(errno.ENOENT, '')))
@patch('salt.grains.nvme.log', MagicMock())
def test_linux_nqn_no_nvme_initiator(self):
'''
Test if linux_nqn is running on salt-master as root.
nvme initiator is not there accessible or is not supported.
:return:
'''
assert nvme._linux_nqn() == []
nvme.log.debug.assert_not_called()
| 33.544118 | 107 | 0.642701 |
6e3efd03489a4c9936edd60688e9e7d427607877 | 902 | py | Python | tests/distance_matrix.py | jni/microscopium | b9cddd8ef5f3003a396ace602228651b3020c4a3 | [
"BSD-3-Clause"
] | 53 | 2016-08-30T09:45:12.000Z | 2022-02-03T06:22:50.000Z | tests/distance_matrix.py | jni/microscopium | b9cddd8ef5f3003a396ace602228651b3020c4a3 | [
"BSD-3-Clause"
] | 151 | 2015-01-15T06:16:27.000Z | 2021-03-22T01:01:26.000Z | tests/distance_matrix.py | jni/microscopium | b9cddd8ef5f3003a396ace602228651b3020c4a3 | [
"BSD-3-Clause"
] | 19 | 2015-01-15T06:13:26.000Z | 2021-09-13T13:06:47.000Z | """Generate pre-computed distance matrix for Euclidean distance.
"""
import os
from numpy import sqrt
import pandas as pd
import numpy as np
abspath = os.path.dirname(__file__)
def string2tuple(string_tuple):
# TODO add docstring
string_values = string_tuple.split(', ')
coords = (int(string_values[0][1:]), string_values[1][1:-2])
return coords
def euclidean_distance(a, b):
return sqrt(sum((a-b)**2 for a, b in zip(a, b)))
test_data = pd.read_csv(os.path.join(abspath, 'testdata/data_test.csv'),
index_col=0)
distance_matrix = np.zeros((8, 8))
for i in range(0, 8):
for j in range(0, 8):
dist = euclidean_distance(test_data.values[i], test_data.values[j])
distance_matrix[i, j] = dist
distance_pd = pd.DataFrame(distance_matrix)
distance_pd.to_csv(os.path.join(abspath, 'testdata/distance_test.csv'), index=False, header=False)
| 28.1875 | 98 | 0.689579 |
2bb10e4f967f8db94aac12e6a82f5f5523ad56db | 9,045 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20170301/route_table.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20170301/route_table.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20170301/route_table.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['RouteTable']
class RouteTable(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: Gets a unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['etag'] = etag
__props__['id'] = id
__props__['location'] = location
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if route_table_name is None:
raise TypeError("Missing required property 'route_table_name'")
__props__['route_table_name'] = route_table_name
__props__['routes'] = routes
__props__['tags'] = tags
__props__['name'] = None
__props__['subnets'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20150615:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160330:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20160901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20191101:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteTable"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteTable")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteTable, __self__).__init__(
'azure-nextgen:network/v20170301:RouteTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteTable':
"""
Get an existing RouteTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return RouteTable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> pulumi.Output[Optional[Sequence['outputs.RouteResponse']]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubnetResponse']]:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 52.283237 | 2,301 | 0.674627 |
0aa089b2b4b94b22d0347b5b56b33783c263a4df | 9,344 | py | Python | src/sdk/pynni/nni/nas/pytorch/enas/trainer.py | struss/nni | 2e84b445125aa2365eb5e79c94287d869db3366d | [
"MIT"
] | 2 | 2020-04-19T15:57:46.000Z | 2020-04-28T18:14:19.000Z | src/sdk/pynni/nni/nas/pytorch/enas/trainer.py | arita37/nni | d51d755e2a591a166d2a8669e592da4d2c704ad6 | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/nas/pytorch/enas/trainer.py | arita37/nni | d51d755e2a591a166d2a8669e592da4d2c704ad6 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from itertools import cycle
import torch
import torch.nn as nn
import torch.optim as optim
from nni.nas.pytorch.trainer import Trainer
from nni.nas.pytorch.utils import AverageMeterGroup, to_device
from .mutator import EnasMutator
logger = logging.getLogger(__name__)
class EnasTrainer(Trainer):
"""
ENAS trainer.
Parameters
----------
model : nn.Module
PyTorch model to be trained.
loss : callable
Receives logits and ground truth label, return a loss tensor.
metrics : callable
Receives logits and ground truth label, return a dict of metrics.
reward_function : callable
Receives logits and ground truth label, return a tensor, which will be feeded to RL controller as reward.
optimizer : Optimizer
The optimizer used for optimizing the model.
num_epochs : int
Number of epochs planned for training.
dataset_train : Dataset
Dataset for training. Will be split for training weights and architecture weights.
dataset_valid : Dataset
Dataset for testing.
mutator : EnasMutator
Use when customizing your own mutator or a mutator with customized parameters.
batch_size : int
Batch size.
workers : int
Workers for data loading.
device : torch.device
``torch.device("cpu")`` or ``torch.device("cuda")``.
log_frequency : int
Step count per logging.
callbacks : list of Callback
list of callbacks to trigger at events.
entropy_weight : float
Weight of sample entropy loss.
skip_weight : float
Weight of skip penalty loss.
baseline_decay : float
Decay factor of baseline. New baseline will be equal to ``baseline_decay * baseline_old + reward * (1 - baseline_decay)``.
child_steps : int
How many mini-batches for model training per epoch.
mutator_lr : float
Learning rate for RL controller.
mutator_steps_aggregate : int
Number of steps that will be aggregated into one mini-batch for RL controller.
mutator_steps : int
Number of mini-batches for each epoch of RL controller learning.
aux_weight : float
Weight of auxiliary head loss. ``aux_weight * aux_loss`` will be added to total loss.
test_arc_per_epoch : int
How many architectures are chosen for direct test after each epoch.
"""
def __init__(self, model, loss, metrics, reward_function,
optimizer, num_epochs, dataset_train, dataset_valid,
mutator=None, batch_size=64, workers=4, device=None, log_frequency=None, callbacks=None,
entropy_weight=0.0001, skip_weight=0.8, baseline_decay=0.999, child_steps=500,
mutator_lr=0.00035, mutator_steps_aggregate=20, mutator_steps=50, aux_weight=0.4,
test_arc_per_epoch=1):
super().__init__(model, mutator if mutator is not None else EnasMutator(model),
loss, metrics, optimizer, num_epochs, dataset_train, dataset_valid,
batch_size, workers, device, log_frequency, callbacks)
self.reward_function = reward_function
self.mutator_optim = optim.Adam(self.mutator.parameters(), lr=mutator_lr)
self.batch_size = batch_size
self.workers = workers
self.entropy_weight = entropy_weight
self.skip_weight = skip_weight
self.baseline_decay = baseline_decay
self.baseline = 0.
self.mutator_steps_aggregate = mutator_steps_aggregate
self.mutator_steps = mutator_steps
self.child_steps = child_steps
self.aux_weight = aux_weight
self.test_arc_per_epoch = test_arc_per_epoch
self.init_dataloader()
def init_dataloader(self):
n_train = len(self.dataset_train)
split = n_train // 10
indices = list(range(n_train))
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:-split])
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[-split:])
self.train_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=self.batch_size,
sampler=train_sampler,
num_workers=self.workers)
self.valid_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=self.batch_size,
sampler=valid_sampler,
num_workers=self.workers)
self.test_loader = torch.utils.data.DataLoader(self.dataset_valid,
batch_size=self.batch_size,
num_workers=self.workers)
self.train_loader = cycle(self.train_loader)
self.valid_loader = cycle(self.valid_loader)
def train_one_epoch(self, epoch):
# Sample model and train
self.model.train()
self.mutator.eval()
meters = AverageMeterGroup()
for step in range(1, self.child_steps + 1):
x, y = next(self.train_loader)
x, y = to_device(x, self.device), to_device(y, self.device)
self.optimizer.zero_grad()
with torch.no_grad():
self.mutator.reset()
logits = self.model(x)
if isinstance(logits, tuple):
logits, aux_logits = logits
aux_loss = self.loss(aux_logits, y)
else:
aux_loss = 0.
metrics = self.metrics(logits, y)
loss = self.loss(logits, y)
loss = loss + self.aux_weight * aux_loss
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 5.)
self.optimizer.step()
metrics["loss"] = loss.item()
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
logger.info("Model Epoch [%d/%d] Step [%d/%d] %s", epoch + 1,
self.num_epochs, step, self.child_steps, meters)
# Train sampler (mutator)
self.model.eval()
self.mutator.train()
meters = AverageMeterGroup()
for mutator_step in range(1, self.mutator_steps + 1):
self.mutator_optim.zero_grad()
for step in range(1, self.mutator_steps_aggregate + 1):
x, y = next(self.valid_loader)
x, y = to_device(x, self.device), to_device(y, self.device)
self.mutator.reset()
with torch.no_grad():
logits = self.model(x)
metrics = self.metrics(logits, y)
reward = self.reward_function(logits, y)
if self.entropy_weight:
reward += self.entropy_weight * self.mutator.sample_entropy.item()
self.baseline = self.baseline * self.baseline_decay + reward * (1 - self.baseline_decay)
loss = self.mutator.sample_log_prob * (reward - self.baseline)
if self.skip_weight:
loss += self.skip_weight * self.mutator.sample_skip_penalty
metrics["reward"] = reward
metrics["loss"] = loss.item()
metrics["ent"] = self.mutator.sample_entropy.item()
metrics["log_prob"] = self.mutator.sample_log_prob.item()
metrics["baseline"] = self.baseline
metrics["skip"] = self.mutator.sample_skip_penalty
loss /= self.mutator_steps_aggregate
loss.backward()
meters.update(metrics)
cur_step = step + (mutator_step - 1) * self.mutator_steps_aggregate
if self.log_frequency is not None and cur_step % self.log_frequency == 0:
logger.info("RL Epoch [%d/%d] Step [%d/%d] [%d/%d] %s", epoch + 1, self.num_epochs,
mutator_step, self.mutator_steps, step, self.mutator_steps_aggregate,
meters)
nn.utils.clip_grad_norm_(self.mutator.parameters(), 5.)
self.mutator_optim.step()
def validate_one_epoch(self, epoch):
with torch.no_grad():
for arc_id in range(self.test_arc_per_epoch):
meters = AverageMeterGroup()
for x, y in self.test_loader:
x, y = to_device(x, self.device), to_device(y, self.device)
self.mutator.reset()
logits = self.model(x)
if isinstance(logits, tuple):
logits, _ = logits
metrics = self.metrics(logits, y)
loss = self.loss(logits, y)
metrics["loss"] = loss.item()
meters.update(metrics)
logger.info("Test Epoch [%d/%d] Arc [%d/%d] Summary %s",
epoch + 1, self.num_epochs, arc_id + 1, self.test_arc_per_epoch,
meters.summary())
| 44.923077 | 130 | 0.58658 |
3cd34a21a4810b677af341bee0a6587ba7799423 | 22,773 | py | Python | utils/io_utils.py | buaaxyszgl/AISafety | b5bc52b6b9fc764c3ebb59674d3a7bd77b5646b0 | [
"MIT"
] | 32 | 2020-10-20T06:12:48.000Z | 2022-03-30T03:31:24.000Z | utils/io_utils.py | buaaxyszgl/AISafety | b5bc52b6b9fc764c3ebb59674d3a7bd77b5646b0 | [
"MIT"
] | 2 | 2021-03-24T13:54:50.000Z | 2021-10-11T13:37:31.000Z | utils/io_utils.py | buaaxyszgl/AISafety | b5bc52b6b9fc764c3ebb59674d3a7bd77b5646b0 | [
"MIT"
] | 19 | 2020-10-22T05:42:51.000Z | 2022-02-04T07:07:39.000Z | # -*- coding: utf-8 -*-
import importlib
import numpy as np
from PIL import Image
import os
import sys
import cv2
import random
import json
import string
from .file_utils import inorder_choose_data
import torch
def get_datasets_without_mask(
imgs_dir, label_dir, height, width, channels, train_test="null"
):
Nimgs = len(os.listdir(imgs_dir))
imgs = np.empty((Nimgs, height, width, channels))
# print(imgs.shape)
groundTruth = np.empty((Nimgs, height, width))
for path, subdirs, files in os.walk(
imgs_dir
): # list all files, directories in the path
print("images num", len(files))
for i in range(len(files)):
# original
print("image count", i + 1)
print("original image: " + files[i])
img = Image.open(os.path.join(imgs_dir, files[i]))
img = img.convert(mode="RGB")
# check whether padding is needed
origin_img = np.asarray(img)
print("image shape:" + str(origin_img.shape))
need_padding = False
imgs[i] = np.asarray(img)
# corresponding ground truth
groundTruth_name = "MA_" + files[i].split("_", 1)[-1]
print("ground truth name: " + groundTruth_name)
g_truth = Image.open(os.path.join(label_dir, groundTruth_name))
origin_manual = np.asarray(g_truth)
print("manual shape:" + str(origin_manual.shape))
groundTruth[i] = np.asarray(g_truth)
# print ("imgs max: " +str(np.max(imgs)))
# print ("imgs min: " +str(np.min(imgs)))
# assert(np.max(groundTruth)==255)
# assert(np.min(groundTruth)==0)
print(
"ground truth and border masks are correctly withih pixel value range 0-255 (black-white)"
)
# reshaping for my standard tensors
# imgs = np.transpose(imgs,(0,3,1,2))
assert imgs.shape == (Nimgs, height, width, channels)
# groundTruth = np.reshape(groundTruth,(Nimgs,1,height,width))
# border_masks = np.reshape(border_masks,(Nimgs,1,height,width))
assert groundTruth.shape == (Nimgs, height, width)
return imgs, groundTruth
def get_datasets(
imgs_dir, label_dir, bordermask_dir, height, width, channels, train_test="null"
):
Nimgs = len(os.listdir(imgs_dir))
imgs = np.empty((Nimgs, height, width, channels))
print(imgs.shape)
groundTruth = np.empty((Nimgs, height, width))
border_masks = np.empty((Nimgs, height, width))
for path, subdirs, files in os.walk(
imgs_dir
): # list all files, directories in the path
for i in range(len(files)):
# original
print("original image: " + files[i])
img = Image.open(os.path.join(imgs_dir, files[i]))
# check whether padding is needed
origin_img = np.asarray(img)
print("image shape:" + str(origin_img.shape))
need_padding = False
if (origin_img.shape[0] == height) and (origin_img.shape[1] == width):
imgs[i] = np.asarray(img)
else:
need_padding = True
print("padding image.......")
origin_img = np.asarray(img)
target_h, target_w, img_h, img_w = (
height,
width,
origin_img.shape[0],
origin_img.shape[1],
)
if len(origin_img.shape) == 3:
d = origin_img.shape[2]
padded_img = np.zeros((target_h, target_w, d))
elif len(origin_img.shape) == 2:
padded_img = np.zeros((target_h, target_w))
padded_img[
(target_h - img_h) // 2 : (target_h - img_h) // 2 + img_h,
(target_w - img_w) // 2 : (target_w - img_w) // 2 + img_w,
...,
] = origin_img
# newImage = Image.fromarray(np.uint8(padded_img))
# newImage.save("/home/rock/devdata/"+files[i])
imgs[i] = padded_img
# corresponding ground truth
groundTruth_name = "manual_" + files[i].split("_")[-1]
print("ground truth name: " + groundTruth_name)
g_truth = Image.open(os.path.join(label_dir, groundTruth_name))
origin_manual = np.asarray(g_truth)
print("manual shape:" + str(origin_manual.shape))
if (origin_manual.shape[0] == height) and (origin_manual.shape[1] == width):
groundTruth[i] = np.asarray(g_truth)
else:
print("padding manual.......")
target_h, target_w, img_h, img_w = (
height,
width,
origin_manual.shape[0],
origin_manual.shape[1],
)
if len(origin_manual.shape) == 3:
d = origin_manual.shape[2]
padded_manual = np.zeros((target_h, target_w, d))
elif len(origin_manual.shape) == 2:
padded_manual = np.zeros((target_h, target_w))
padded_manual[
(target_h - img_h) // 2 : (target_h - img_h) // 2 + img_h,
(target_w - img_w) // 2 : (target_w - img_w) // 2 + img_w,
...,
] = origin_manual
groundTruth[i] = padded_manual
print("imgs max: " + str(np.max(imgs)))
print("imgs min: " + str(np.min(imgs)))
assert np.max(groundTruth) == 255 and np.max(border_masks) == 255
assert np.min(groundTruth) == 0 and np.min(border_masks) == 0
print(
"ground truth and border masks are correctly withih pixel value range 0-255 (black-white)"
)
# reshaping for my standard tensors
# imgs = np.transpose(imgs,(0,3,1,2))
assert imgs.shape == (Nimgs, height, width, channels)
# groundTruth = np.reshape(groundTruth,(Nimgs,1,height,width))
# border_masks = np.reshape(border_masks,(Nimgs,1,height,width))
assert groundTruth.shape == (Nimgs, height, width)
assert border_masks.shape == (Nimgs, height, width)
return imgs, groundTruth, border_masks
def get_cam_list(self, cam_path, sample_index_list):
print("get_cam_list")
cam_list = []
orig_list = []
abspath = os.path.abspath(cam_path)
test_study_list = os.listdir(abspath)
for study in test_study_list:
cam_list.append(
[
os.path.join(abspath, study, "cam_" + index + ".jpg")
for index in sample_index_list
]
)
orig_list.append(
[
os.path.join(abspath, study, "orig_" + index + ".jpg")
for index in sample_index_list
]
)
return cam_list, orig_list
def get_top_k_list(self, top_k_path):
print("get_all_test_data_list")
channels_list = self._config.prepare_data.channels_list
Nchs = len(channels_list)
imgs_list = []
mannual_list = []
abspath = os.path.abspath(self._config.prepare_data.test_data_dir)
test_study_list = os.listdir(abspath)
for study in test_study_list:
imgs_list.append(
[os.path.join(abspath, study, ch + ".jpg") for ch in channels_list]
)
mannual_list.append([os.path.join(abspath, study, "manual.jpg")])
# print(imgs_list)
# print(mannual_list)
return imgs_list, mannual_list
def result_as_html(base_abspath, x_list, predict_y_list, y_list=None):
print("result as html")
# print(x_list)
# print(predict_y_list)
# print(y_list)
assert len(x_list) == len(predict_y_list)
html_content = (
"<html><head><title>priction_title</title>"
'<style type="text/css">.card {float:left;margin: 5px 5px 5px 5px;text-align:center;}'
"ul {list-style-type:none;}h3.hl {margin-top: 2;margin-bottom: 2;text-align: center;}</style></head><body><ul>"
)
for index in range(len(x_list)):
html_content = html_content + "<li>"
imgs = x_list[index]
results = predict_y_list[index]
assert type(imgs) == type([])
assert type(results) == type([])
item_num = len(imgs) + len(results)
if y_list:
assert type(y_list[index]) == type([])
item_num = item_num + len(y_list[index])
html_img_width = 1800 // item_num - 20
html_content = (
html_content
+ '<hr><h3 class="hl">'
+ imgs[0][0 : imgs[0].rfind("/")]
+ "</h3>"
)
# imgs = [imgs]
for i in range(len(imgs)):
html_content = (
html_content
+ '<div class="card"><h3>'
+ imgs[i].split("/")[-1]
+ '</h3><img src="'
+ os.path.relpath(imgs[i], base_abspath)
+ '" width='
+ str(html_img_width)
+ " height="
+ str(html_img_width)
+ " /></div>"
)
if y_list:
groundTruth = y_list[index]
for i in range(len(groundTruth)):
html_content = (
html_content
+ '<div class="card"><h3>'
+ groundTruth[i].split("/")[-1]
+ '</h3><img src="'
+ os.path.relpath(groundTruth[i], base_abspath)
+ '" width='
+ str(html_img_width)
+ " height="
+ str(html_img_width)
+ " /></div>"
)
for i in range(len(results)):
html_content = (
html_content
+ '<div class="card"><h3>'
+ results[i].split("/")[-1]
+ '</h3><img src="'
+ os.path.relpath(results[i], base_abspath)
+ '" width='
+ str(html_img_width)
+ " height="
+ str(html_img_width)
+ " /></div>"
)
html_content = html_content + "</li>"
html_content = html_content + "</ul></body></html>"
return html_content
def get_resized_img_from_dir(
MSI_filename, MSI_image_name, original_row, original_col, resizeratio
):
resize_row = int(original_row / resizeratio)
resize_col = int(original_col / resizeratio)
img_data = np.ndarray((resize_row, resize_col, len(MSI_image_name)), dtype=np.uint8)
print("MSI_filename:", MSI_filename)
if len(MSI_image_name) > 1:
for i in range(len(MSI_image_name)):
MSI_image = cv2.imread(MSI_filename + "/" + MSI_image_name[i] + ".jpg")
if resizeratio > 1 or resizeratio < 1:
MSI_image = cv2.resize(MSI_image, (resize_row, resize_col))
img_data[:, :, i] = MSI_image[:, :, 0]
else:
MSI_image = cv2.imread(MSI_filename + "/" + MSI_image_name[0] + ".jpg")
if resizeratio > 1 or resizeratio < 1:
MSI_image = cv2.resize(MSI_image, (resize_row, resize_col))
img_data = MSI_image
print("img_data:", img_data.shape)
return img_data
def subimage(image, center, theta, width, height):
theta *= np.pi / 180 # convert to rad
v_x = (np.cos(theta), np.sin(theta))
v_y = (-np.sin(theta), np.cos(theta))
s_x = center[0] - v_x[0] * (width / 2) - v_y[0] * (height / 2)
s_y = center[1] - v_x[1] * (width / 2) - v_y[1] * (height / 2)
mapping = np.array([[v_x[0], v_y[0], s_x], [v_x[1], v_y[1], s_y]])
return cv2.warpAffine(
image,
mapping,
(width, height),
flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_REPLICATE,
)
def CreateSave_dir(MSI_image_save_file, PosOrNeg, patch_size, scale_ratio):
patch_name0 = PosOrNeg
save_dir0 = os.path.join(MSI_image_save_file, patch_name0)
if os.path.exists(save_dir0) == False:
os.makedirs(save_dir0)
patch_name1 = str(patch_size)
save_dir1 = os.path.join(MSI_image_save_file, patch_name0, patch_name1)
if os.path.exists(save_dir1) == False:
os.makedirs(save_dir1)
save_dir2 = os.path.join(MSI_image_save_file, patch_name0, patch_name1, "images")
if os.path.exists(save_dir2) == False:
os.makedirs(save_dir2)
save_dir3 = os.path.join(MSI_image_save_file, patch_name0, patch_name1, "masks")
if os.path.exists(save_dir3) == False:
os.makedirs(save_dir3)
if scale_ratio > 0:
patch_name2 = str(int(patch_size * scale_ratio))
save_dir1 = os.path.join(MSI_image_save_file, patch_name0, patch_name2)
if os.path.exists(save_dir1) == False:
os.makedirs(save_dir1)
save_dir2 = os.path.join(
MSI_image_save_file, patch_name0, patch_name2, "images"
)
if os.path.exists(save_dir2) == False:
os.makedirs(save_dir2)
save_dir3 = os.path.join(MSI_image_save_file, patch_name0, patch_name2, "masks")
if os.path.exists(save_dir3) == False:
os.makedirs(save_dir3)
def SaveImageName(File, PosOrNeg, patch_size, scale_ratio):
save_dir_image2 = ""
patch_name1 = str(patch_size)
print("PosOrNeg:", PosOrNeg)
save_dir_image = os.path.join(File, PosOrNeg, patch_name1, "images")
if scale_ratio > 0:
patch_name2 = str(int(patch_size * scale_ratio))
save_dir_image2 = os.path.join(File, PosOrNeg, patch_name2, "images")
return save_dir_image, save_dir_image2
def SaveMaskName(File, PosOrNeg, patch_size, scale_ratio):
save_dir_mask2 = ""
patch_name1 = str(patch_size)
save_dir_mask = os.path.join(File, PosOrNeg, patch_name1, "masks")
if scale_ratio > 0:
patch_name2 = str(int(patch_size * scale_ratio))
save_dir_mask2 = os.path.join(File, PosOrNeg, patch_name2, "masks")
return save_dir_mask, save_dir_mask2
def SaveWithJson(
DIR, save_type, table_name="", model_name="", evaluation_name="", row="", value=0
):
# 检测结果文件是否存在
if not os.path.exists(DIR):
os.makedirs(DIR)
if not os.path.exists(DIR + "/result.txt"):
with open(DIR + "/result.txt", "w") as file:
json.dump({}, file)
file = open(DIR + "/result.txt", "r")
js = file.read()
file.close()
dic = json.loads(js)
if not save_type in dic:
dic[save_type] = {}
# 存储内容为基础评测
if save_type == "table_list":
if not table_name in dic[save_type]:
dic[save_type][table_name] = {}
model_name = model_name.split(".")[-1]
print(model_name)
dic[save_type][table_name]["TITLE"] = [model_name, "CLEAN", table_name, []]
dic[save_type][table_name][row] = [row, 0]
if not row in dic[save_type][table_name]:
dic[save_type][table_name][row] = [row, 0]
if evaluation_name == "CLEAN ACC":
dic[save_type][table_name][row][1] = value
else:
dic[save_type][table_name]["TITLE"][3].append(evaluation_name)
if type(value) == list:
value = value[0]
dic[save_type][table_name][row].append((value))
# 存储内容为热力图分析
elif save_type == "cam":
pass
# 存储内容为mCE分析
elif save_type == "mCE":
pass
# 存储内容为EENI分析
elif save_type == "EENI":
pass
with open(DIR + "/result.txt", "w") as file:
file.write(json.dumps(dic, ensure_ascii=False))
# SaveWithJson_Result(args.save_path, "table_list", attName[0], f, args.evaluation_method, "evaluation result", rst)
def SaveWithJson_Result(
DIR, save_type, attName="", Attack_file_name="", evaluation_name="", value=0
):
# 检测结果文件是否存在
if not os.path.exists(DIR):
os.makedirs(DIR)
if not os.path.exists(DIR + "/result.txt"):
with open(DIR + "/result.txt", "w") as file:
json.dump({}, file)
file = open(DIR + "/result.txt", "r")
js = file.read()
file.close()
dic = json.loads(js)
if not save_type in dic:
dic[save_type] = {}
# 存储内容为基础评测
att_param_name = Attack_file_name.split(".")[0]
print(att_param_name)
if save_type == "table_list":
if not attName in dic[save_type]:
dic[save_type][attName] = {}
dic[save_type][attName][att_param_name] = {}
dic[save_type][attName][att_param_name] = [[], []]
if not att_param_name in dic[save_type][attName]:
dic[save_type][attName][att_param_name] = [[evaluation_name], []]
dic[save_type][attName][att_param_name][1] = [value]
else:
dic[save_type][attName][att_param_name][0].append(evaluation_name)
if type(value) == list:
value = value[0]
dic[save_type][attName][att_param_name][1].append((value))
with open(DIR + "/result.txt", "w") as file:
file.write(json.dumps(dic, ensure_ascii=False))
def update_current_status(DIR, attack_method, value):
# 检查文件是否存在
if not os.path.exists(DIR + "/temp.txt"):
return
with open(DIR + "/temp.txt", "r") as file:
js = file.read()
dic = json.loads(js)
if attack_method in dic:
dic[attack_method] = value
with open(DIR + "/temp.txt", "w") as file:
file.write(json.dumps(dic))
def mkdir(path):
# 引入模块
import os
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
os.makedirs(path)
# print(path + ' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
# print(path + ' 目录已存在')
return False
def get_label_lines(path_label):
sample_names, labels = inorder_choose_data(path_label, 1, division=" ")
return sample_names, labels, len(labels)
def center_Crop(Image, ImageScale_size, crop_size):
imgcv = cv2.resize(Image, (ImageScale_size[0], ImageScale_size[1]))
if ImageScale_size == crop_size:
return imgcv
center_x = imgcv.shape[0] // 2
center_y = imgcv.shape[1] // 2
cropImg = imgcv[
center_x - crop_size[0] // 2 : center_x + crop_size[0] // 2,
center_y - crop_size[1] // 2 : center_y + crop_size[1] // 2,
]
return cropImg
def get_image_from_path(ImagePath, index, ImageScale_size, crop_size):
pathnames = os.listdir(ImagePath)
pathname = pathnames[index]
image = cv2.imread(ImagePath + "/" + pathname)
imgcv = center_Crop(image, ImageScale_size, crop_size)
image = np.ascontiguousarray(np.transpose(imgcv, (2, 0, 1)))
image = np.float32(image) / 255
return image, imgcv
def convertlist_to_numpy(list_inputs):
outputs_numpy = []
for i in range(len(list_inputs)):
output_numpy = list_inputs[i].cpu().numpy()[np.newaxis, :]
outputs_numpy.extend(output_numpy)
return np.array(outputs_numpy)
def save_json(path, tensor_data):
with open(path, "w") as jsonFile:
json.dump({"data": tensor_data.tolist()}, jsonFile)
def read_json(path, dict_name):
with open(path, "r") as load_f:
load_dict = json.load(load_f)
# print(np.array(load_dict[dict_name]).shape)
return np.array(load_dict[dict_name])
def load_json(path):
with open(path, "r") as load_f:
load_dict = json.load(load_f)
# print(np.array(load_dict[dict_name]).shape)
return load_dict
def gen_attack_adv_save_path(save_base_path, args_Attack_param):
path_origins = str(args_Attack_param).split(".")[0].split("/")
mkdir(save_base_path)
new_base_path = save_base_path + path_origins[0]
for i in range(len(path_origins) - 1):
new_base_path = new_base_path + "_" + path_origins[i + 1]
return new_base_path
def analyze_json(jsons):
"""
解析传进来的jsons,将jsons解析成key-value并输出
:param jsons: 需要解析的json字符串
:return:
"""
key_value = ""
# isinstance函数是Python的内部函数,他的作用是判断jsons这个参数是否为dict类型
# 如果是的话返回True,否则返回False
if isinstance(jsons, dict):
for key in jsons.keys():
key_value = jsons.get(key)
if isinstance(key_value, dict):
analyze_json(key_value)
elif isinstance(key_value, list):
for json_array in key_value:
analyze_json(json_array)
else:
print(str(key) + " = " + str(key_value))
elif isinstance(jsons, list):
for json_array in jsons:
analyze_json(json_array)
def output_value(jsons, key):
"""
通过参数key,在jsons中进行匹配并输出该key对应的value
:param jsons: 需要解析的json串
:param key: 需要查找的key
:return:
"""
key_value = ""
if isinstance(jsons, dict):
for json_result in jsons.values():
if key in jsons.keys():
key_value = jsons.get(key)
else:
output_value(json_result, key)
elif isinstance(jsons, list):
for json_array in jsons:
output_value(json_array, key)
if key_value != "":
# print(str(key) + " = " + str(key_value))
return key_value
def dict_list_to_np(dict_list):
outputs = []
for single_output in dict_list:
outputs.append(single_output)
return np.array(outputs, dtype=np.float32)
def configurate_Device(seed, gpu_counts, gpu_indexs):
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
if torch.cuda.is_available():
device_counts = torch.cuda.device_count()
if device_counts < int(gpu_counts):
print("Can't set the gpu number larger than the available numbers")
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
return device
else:
gpu_indexs = str(gpu_indexs).split(",")
np_gpu_indexs = np.array(gpu_indexs).astype(int)
# gpu的设备号满足个数要求,但是要看看是否满足别的
if min(np_gpu_indexs) >= 0 and max(np_gpu_indexs) < int(gpu_counts):
rand_index = random.randint(0, int(gpu_counts))
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_indexs[rand_index])
# for i in range(np_gpu_indexs.shape[0]):
# devicename=torch.cuda.get_device_name(i)
else:
# 名称设置的过多,或者没有和实际用的设备同名
# 设备号的设备名称为准,如果和设备的类型一致,就用用户的
# Set the random seed manually for reproducibility.
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
return device
| 34.245113 | 119 | 0.580907 |
b1d0f1159ea5c84049c191f0c21de60111ea47ba | 1,889 | py | Python | api/program_routes.py | bcli4d/IDC-API | 29da3d93937d02f6d848f329dcebd2f706254f6f | [
"Apache-2.0"
] | null | null | null | api/program_routes.py | bcli4d/IDC-API | 29da3d93937d02f6d848f329dcebd2f706254f6f | [
"Apache-2.0"
] | null | null | null | api/program_routes.py | bcli4d/IDC-API | 29da3d93937d02f6d848f329dcebd2f706254f6f | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
from flask import jsonify, request
from api import app
from django.conf import settings
from django.db import close_old_connections
from program_views import get_programs
logger = logging.getLogger(settings.LOGGER_NAME)
@app.route('/v1/programs/', methods=['GET'], strict_slashes=False)
def programs():
"""Retrieve the list of programs and builds currently available for cohort creation."""
response = None
try:
program_info = get_programs()
if program_info:
response = jsonify({
'code': 200,
'data': program_info
})
response.status_code = 200
else:
response = jsonify({
'code': 500,
'message': 'Encountered an error while retrieving the program list.'
})
response.status_code = 500
except Exception as e:
logger.error("[ERROR] While retrieving program information:")
logger.exception(e)
response = jsonify({
'code': 500,
'message': 'Encountered an error while retrieving the program list.'
})
response.status_code = 500
finally:
close_old_connections()
return response
| 30.967213 | 91 | 0.651668 |
bb33e4541f71f23193abb70070ccf4844f52845e | 901 | py | Python | ooobuild/dyn/drawing/enhanced_custom_shape_geometry.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/drawing/enhanced_custom_shape_geometry.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/drawing/enhanced_custom_shape_geometry.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.drawing
from ...lo.drawing.enhanced_custom_shape_geometry import EnhancedCustomShapeGeometry as EnhancedCustomShapeGeometry
__all__ = ['EnhancedCustomShapeGeometry']
| 34.653846 | 115 | 0.773585 |
be7a73f8081e938d0b6258f2b1ba9d49ea027139 | 876 | py | Python | collect_world.py | Slauva/robot_shop | 7d0556cf4c614b3f991f334ed3a1016a89acd339 | [
"MIT"
] | null | null | null | collect_world.py | Slauva/robot_shop | 7d0556cf4c614b3f991f334ed3a1016a89acd339 | [
"MIT"
] | null | null | null | collect_world.py | Slauva/robot_shop | 7d0556cf4c614b3f991f334ed3a1016a89acd339 | [
"MIT"
] | 1 | 2021-06-01T16:26:39.000Z | 2021-06-01T16:26:39.000Z | # import xml.etree.ElementTree as ET
import lxml.etree as ET
class World:
def __init__(self, world_path):
# self.world_path = world_path
self.tree = ET.parse(world_path)
self.root = self.tree.getroot() # sdf tag
self.world = self.root[0]
def include_model(self, model, name=None, pose=None):
if name is None:
name = model
if pose is None:
pose = [0, 0, 0, 0, 0, 0]
incl = ET.SubElement(self.world, 'include')
uri = ET.SubElement(incl, 'uri')
uri.text = "model://" + model
pose_tag = ET.SubElement(incl, 'pose')
pose_tag.text = " ".join("{0:0.2f}".format(i) for i in pose)
name_tag = ET.SubElement(incl, 'name')
name_tag.text = name
def generate(self, result_path):
ET.indent(self.root)
self.tree.write(result_path)
| 31.285714 | 68 | 0.58105 |
05d007b91496e93abee9ff163a586bf567313fc0 | 2,320 | py | Python | exp_clustering/plot_exp_clustering.py | RandallBalestriero/PMASO | 780b06f8d8496000f3ecda04a49c8eda72393b5d | [
"MIT"
] | null | null | null | exp_clustering/plot_exp_clustering.py | RandallBalestriero/PMASO | 780b06f8d8496000f3ecda04a49c8eda72393b5d | [
"MIT"
] | null | null | null | exp_clustering/plot_exp_clustering.py | RandallBalestriero/PMASO | 780b06f8d8496000f3ecda04a49c8eda72393b5d | [
"MIT"
] | null | null | null | import cPickle
from pylab import *
import glob
import matplotlib as mpl
import cPickle
import os
SAVE_DIR = os.environ['SAVE_DIR']
label_size = 13
mpl.rcParams['xtick.labelsize'] = label_size+10
mpl.rcParams['ytick.labelsize'] = label_size
fs=15
def plotclasses(classes,samplesclass1):
for i,k in zip(range(len(classes)),classes):
for j in xrange(10):
subplot(len(classes),10,1+i*10+j)
imshow(samplesclass1[k][j,:,:,0],aspect='auto',cmap='Greys',interpolation='nearest')
xticks([])
yticks([])
def doit(MODEL,NEURONS,pl=1):
f=open(SAVE_DIR+'exp_clustering_'+MODEL+'_'+NEURONS+'.pkl','rb')
LOSSES,reconstruction,x,y,samples0,samples1,CL=cPickle.load(f)
print CL,LOSSES
return 0
f.close()
LLL = []
for i in xrange(len(reconstruction)):
LLL.append(((reconstruction[i]-x[i])**2).sum())
MSE = mean(LLL)
if(pl==0): return MSE,squeeze(LOSSES)
for i in xrange(50):
figure(figsize=(2,2))
imshow(x[i,:,:,0],aspect='auto',cmap='Greys',interpolation='nearest')
xticks([])
yticks([])
tight_layout()
savefig('../BASE_EXP/FA/sigmass'+'_'+MODEL+'_'+NEURONS+'_x_'+str(i)+'.png')
close()
figure(figsize=(2,2))
imshow(reconstruction[i,:,:,0],aspect='auto',cmap='Greys',interpolation='nearest')
xticks([])
yticks([])
tight_layout()
savefig('../BASE_EXP/FA/'+sigmass+'_'+MODEL+'_'+NEURONS+'_reconstruction_'+str(i)+'.png')
figure(figsize=(2,2))
imshow(samples[i,:,:,0],aspect='auto',cmap='Greys',interpolation='nearest')
xticks([])
yticks([])
tight_layout()
savefig('../BASE_EXP/FA/'+sigmass+'_'+MODEL+'_'+NEURONS+'_samples_'+str(i)+'.png')
print LOSSES
return MSE,squeeze(LOSSES)
#MS = ['x','o','o']
for k in ['10']:
for model in ['0','1','2']:
l=doit(model,k,pl=1)
# print shape(l)[5::20],len(l),l[5::20]
# plot(arange(len(l)),l,color='k',linewidth=2.5)
# plot(arange(len(l))[5::20],l[5::20],linestyle='None',color='k',linewidth=1,marker=MS[int(model)],markersize=9)
# yticks([])
# ylim([0,3100])
# tight_layout()
# savefig('../BASE_EXP/FA/'+sig+'_'+k+'_loss.png')
| 27.619048 | 123 | 0.576724 |
b64e1656e73144b5cb80dc17f932f67de76e25f2 | 1,246 | py | Python | asiapay/tests/models_tests.py | bitlabstudio/django-oscar-asiapay | 950f62b4d7d5edb79a65a8fd5affa9cb1e9772d9 | [
"MIT"
] | 1 | 2019-03-11T13:52:31.000Z | 2019-03-11T13:52:31.000Z | asiapay/tests/models_tests.py | bitmazk/django-oscar-asiapay | 950f62b4d7d5edb79a65a8fd5affa9cb1e9772d9 | [
"MIT"
] | 3 | 2020-02-12T00:06:51.000Z | 2021-06-10T19:45:35.000Z | asiapay/tests/models_tests.py | bitmazk/django-oscar-asiapay | 950f62b4d7d5edb79a65a8fd5affa9cb1e9772d9 | [
"MIT"
] | null | null | null | """Tests for the models of the ``user_profiles`` app."""
from django.test import TestCase
from . import factories
class AsiaPayTransactionTestCase(TestCase):
"""Tests for the ``AsiaPayTransaction`` model class."""
longMessage = True
def setUp(self):
self.txn = factories.AsiaPayTransactionFactory()
def test_instantiation(self):
"""Test instantiation of the ``AsiaPayTransaction`` model."""
self.assertTrue(self.txn.pk)
def test_is_successful(self):
self.assertTrue(self.txn.is_successful, msg=(
'Should be True, if transaction is successful.'))
def test_translate_success_code(self):
self.assertEqual(
self.txn.translate_success_code().translate('en'),
'Succeeded', msg=('Should return a success message if code is 0.'))
self.txn.success_code = '1'
self.assertEqual(
self.txn.translate_success_code().translate('en'), 'Failure',
msg=('Should return a failure message if code is 1.'))
self.txn.success_code = '2'
self.assertEqual(
self.txn.translate_success_code().translate('en'), 'Error',
msg=('Should return an error message if code is whether 0 nor 1.'))
| 36.647059 | 79 | 0.65008 |
c4a08a8cd7b25d34164f4d0bcfaa1f094d6b4a59 | 647 | py | Python | HLTrigger/Configuration/python/HLT_75e33/modules/hgcalDigisL1Seeded_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hgcalDigisL1Seeded_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hgcalDigisL1Seeded_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
hgcalDigisL1Seeded = cms.EDProducer("HLTHGCalDigisInRegionsProducer",
etaPhiRegions = cms.VPSet(cms.PSet(
inputColl = cms.InputTag("hltL1TEGammaHGCFilteredCollectionProducer"),
maxDEta = cms.double(0.0),
maxDPhi = cms.double(0.0),
maxDeltaR = cms.double(0.35),
maxEt = cms.double(999999.0),
minEt = cms.double(5.0),
type = cms.string('L1EGamma')
)),
inputCollTags = cms.VInputTag("hgcalDigis:EE", "hgcalDigis:HEback", "hgcalDigis:HEfront"),
outputProductNames = cms.vstring(
'EE',
'HEback',
'HEfront'
)
)
| 32.35 | 94 | 0.638331 |
e68e539b16cc764dbed6b2d3711812bf74b360db | 711 | py | Python | task_2/tabs.py | yves147/bwinf40 | b6ecb23cd66482b137e59cb1f28714f81746e1a2 | [
"MIT"
] | 2 | 2021-11-25T17:08:49.000Z | 2021-11-25T21:15:33.000Z | task_2/tabs.py | yves147/bwinf40 | b6ecb23cd66482b137e59cb1f28714f81746e1a2 | [
"MIT"
] | null | null | null | task_2/tabs.py | yves147/bwinf40 | b6ecb23cd66482b137e59cb1f28714f81746e1a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from typing import List
SPACES_PER_TAB = 8
def main():
with open(sys.argv[1], "r", encoding="utf-8") as file:
in_lines = [line.rstrip() for line in file.readlines()]
out_lines: List[str] = []
for line in in_lines:
pos = 0
out_lines.append("")
for char in line:
if char == "\t":
filled = pos % SPACES_PER_TAB
to_be_added = SPACES_PER_TAB - filled
out_lines[-1] += to_be_added * " "
pos += to_be_added
else:
out_lines[-1] += char
pos += 1
print("\n".join(out_lines))
if __name__ == "__main__":
main()
| 23.7 | 63 | 0.514768 |
3fe1ddd20faf0fd0d2a7bfd97da5c5a3fbee8f16 | 772 | py | Python | C9/9.4/restaurant.py | Triple-Z/Python-Crash-Course | 7e59104420f6110e4d60668314264105534016ce | [
"MIT"
] | null | null | null | C9/9.4/restaurant.py | Triple-Z/Python-Crash-Course | 7e59104420f6110e4d60668314264105534016ce | [
"MIT"
] | null | null | null | C9/9.4/restaurant.py | Triple-Z/Python-Crash-Course | 7e59104420f6110e4d60668314264105534016ce | [
"MIT"
] | null | null | null | class Restaurant:
def __init__(self, name, cuisine_type):
self.restaurant_name = name
self.cuisine_type = cuisine_type
self.number_served = 0
def describe_restaurant(self):
print('\nRestaurant name: ' + self.restaurant_name.title())
print('Cuisine type: ' + self.cuisine_type.title())
print('Served: ' + str(self.number_served))
def open_restaurant(self):
print(self.restaurant_name.title() + ' is open')
def set_number_served(self, set_number):
self.number_served = set_number
def increment_number_served(self, increment_number):
if increment_number < 0:
print('You cannot put negative numbers!')
else:
self.number_served += increment_number
| 32.166667 | 67 | 0.654145 |
a6fbd43807df4c1320012f5e7f1c975a5b1d3d54 | 21 | py | Python | ImageProcess/__init__.py | DongCX-LDHSP/WindowsFocusWallpaperGetter | d35896f1ddf717c29569a385ba7bd0ffcac857e3 | [
"MIT"
] | 1 | 2020-09-07T08:30:39.000Z | 2020-09-07T08:30:39.000Z | ImageProcess/__init__.py | DongCX-LDHSP/WindowsFocusWallpaperGetter | d35896f1ddf717c29569a385ba7bd0ffcac857e3 | [
"MIT"
] | null | null | null | ImageProcess/__init__.py | DongCX-LDHSP/WindowsFocusWallpaperGetter | d35896f1ddf717c29569a385ba7bd0ffcac857e3 | [
"MIT"
] | null | null | null | """
ImageProcess包
""" | 7 | 13 | 0.619048 |
08d68b8203888e0322739bc9cd3781975bb39f03 | 2,884 | py | Python | rpython/jit/backend/ppc/rassemblermaker.py | jptomo/pypy-lang-scheme | 55edb2cec69d78f86793282a4566fcbc1ef9fcac | [
"MIT"
] | 1 | 2019-11-25T10:52:01.000Z | 2019-11-25T10:52:01.000Z | rpython/jit/backend/ppc/rassemblermaker.py | jptomo/pypy-lang-scheme | 55edb2cec69d78f86793282a4566fcbc1ef9fcac | [
"MIT"
] | null | null | null | rpython/jit/backend/ppc/rassemblermaker.py | jptomo/pypy-lang-scheme | 55edb2cec69d78f86793282a4566fcbc1ef9fcac | [
"MIT"
] | null | null | null | from rpython.tool.sourcetools import compile2
from rpython.rlib.rarithmetic import r_uint
from rpython.jit.backend.ppc.form import IDesc, IDupDesc
from rpython.jit.backend.ppc.ppc_field import IField
## "opcode": ( 0, 5),
## "rA": (11, 15, 'unsigned', regname._R),
## "rB": (16, 20, 'unsigned', regname._R),
## "Rc": (31, 31),
## "rD": ( 6, 10, 'unsigned', regname._R),
## "OE": (21, 21),
## "XO2": (22, 30),
## XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc")
## add = XO(31, XO2=266, OE=0, Rc=0)
## def add(rD, rA, rB):
## v = 0
## v |= (31&(2**(5-0+1)-1)) << (32-5-1)
## ...
## return v
def make_func(name, desc):
sig = []
fieldvalues = []
for field in desc.fields:
if field in desc.specializations:
fieldvalues.append((field, desc.specializations[field]))
else:
sig.append(field.name)
fieldvalues.append((field, field.name))
if isinstance(desc, IDupDesc):
for destfield, srcfield in desc.dupfields.iteritems():
fieldvalues.append((destfield, srcfield.name))
body = ['v = r_uint(0)']
assert 'v' not in sig # that wouldn't be funny
#body.append('print %r'%name + ', ' + ', '.join(["'%s:', %s"%(s, s) for s in sig]))
for field, value in fieldvalues:
if field.name == 'spr':
body.append('spr1 = (%s&31) << 5 | (%s >> 5 & 31)'%(value, value))
value = 'spr1'
elif field.name == 'mbe':
body.append('mbe1 = (%s & 31) << 1 | (%s & 32) >> 5' % (value, value))
value = 'mbe1'
elif field.name == 'sh':
body.append('sh1 = (%s & 31) << 10 | (%s & 32) >> 5' % (value, value))
value = 'sh1'
if isinstance(field, IField):
body.append('v |= ((%3s >> 2) & r_uint(%#05x)) << 2' % (value, field.mask))
else:
body.append('v |= (%3s & r_uint(%#05x)) << %d'%(value,
field.mask,
(32 - field.right - 1)))
#body.append('self.check(desc, v, %s)' % ', '.join(sig))
body.append('self.emit(v)')
src = 'def %s(self, %s):\n %s'%(name, ', '.join(sig), '\n '.join(body))
d = {'r_uint':r_uint, 'desc': desc}
#print src
exec compile2(src) in d
return d[name]
def make_rassembler(cls):
# XXX tooons of very-old code patched to get medium-old code patched
# to get newer code :-(
bases = [make_rassembler(b) for b in cls.__bases__]
ns = {}
for k, v in cls.__dict__.iteritems():
if isinstance(v, IDesc):
v = make_func(k, v)
ns[k] = v
rcls = type('R' + cls.__name__, tuple(bases), ns)
def emit(self, value):
self.write32(value)
rcls.emit = emit
return rcls
| 37.454545 | 87 | 0.496879 |
1a3e7f3d76ebaf80295d10133b5cb78e8979ef3b | 1,511 | py | Python | tests/snippets/stdlib_struct.py | hatchling13/RustPython | b8c94501b8d0d6aeb5a589f6884d6a4a15c2c074 | [
"CC-BY-4.0",
"MIT"
] | 1 | 2019-08-16T06:53:06.000Z | 2019-08-16T06:53:06.000Z | tests/snippets/stdlib_struct.py | hatchling13/RustPython | b8c94501b8d0d6aeb5a589f6884d6a4a15c2c074 | [
"CC-BY-4.0",
"MIT"
] | 6 | 2021-10-14T15:55:16.000Z | 2022-03-31T14:04:02.000Z | tests/snippets/stdlib_struct.py | hatchling13/RustPython | b8c94501b8d0d6aeb5a589f6884d6a4a15c2c074 | [
"CC-BY-4.0",
"MIT"
] | 1 | 2021-01-17T17:49:04.000Z | 2021-01-17T17:49:04.000Z |
from testutils import assert_raises
import struct
data = struct.pack('IH', 14, 12)
assert data == bytes([14, 0, 0, 0, 12, 0])
v1, v2 = struct.unpack('IH', data)
assert v1 == 14
assert v2 == 12
data = struct.pack('<IH', 14, 12)
assert data == bytes([14, 0, 0, 0, 12, 0])
v1, v2 = struct.unpack('<IH', data)
assert v1 == 14
assert v2 == 12
data = struct.pack('>IH', 14, 12)
assert data == bytes([0, 0, 0, 14, 0, 12])
v1, v2 = struct.unpack('>IH', data)
assert v1 == 14
assert v2 == 12
data = struct.pack('3B', 65, 66, 67)
assert data == bytes([65, 66, 67])
v1, v2, v3 = struct.unpack('3B', data)
assert v1 == 65
assert v2 == 66
assert v3 == 67
with assert_raises(Exception):
data = struct.pack('B0B', 65, 66)
with assert_raises(Exception):
data = struct.pack('B2B', 65, 66)
data = struct.pack('B1B', 65, 66)
with assert_raises(Exception):
struct.pack('<IH', "14", 12)
assert struct.calcsize("B") == 1
assert struct.calcsize("<L4B") == 8
assert struct.Struct('3B').pack(65, 66, 67) == bytes([65, 66, 67])
class Indexable(object):
def __init__(self, value):
self._value = value
def __index__(self):
return self._value
data = struct.pack('B', Indexable(65))
assert data == bytes([65])
data = struct.pack('5s', b"test1")
assert data == b"test1"
data = struct.pack('3s', b"test2")
assert data == b"tes"
data = struct.pack('7s', b"test3")
assert data == b"test3\0\0"
data = struct.pack('?', True)
assert data == b'\1'
data = struct.pack('?', [])
assert data == b'\0' | 20.69863 | 66 | 0.622105 |
9857e03cac93f65eb8623f498e653cf244309b42 | 5,435 | py | Python | server/src/database.py | Perruci/UnBox | bf77d384c489fb8d421271fa9db962511d40597d | [
"MIT"
] | null | null | null | server/src/database.py | Perruci/UnBox | bf77d384c489fb8d421271fa9db962511d40597d | [
"MIT"
] | null | null | null | server/src/database.py | Perruci/UnBox | bf77d384c489fb8d421271fa9db962511d40597d | [
"MIT"
] | null | null | null | """ Database module for UnBox application """
import os
import ruamel.yaml as yaml
import pathlib
USER_DATA_FILE = 'server/user_data.yaml'
SERVER_DATABASE_FOLDER = 'server/database/'
# Files ---------------------------------------------------
def create_dir(directory_path):
""" Creates a directory if it doesnt exist """
if not os.path.exists(directory_path):
os.makedirs(directory_path)
def file_exists(file_path):
""" Returns true if file exists, false if it doesnt """
file = pathlib.Path(file_path)
return file.is_file()
def path_to_filename(username, path_to_file):
""" Converts a path formated as path/to/file.txt to a filename, ie. path_to_file.txt """
filename = '{}_{}'.format(username, path_to_file)
filename = filename.replace('/','_')
print(filename)
return filename
def delete_file(filename):
""" Deletes a file given its filename """
try:
os.remove(filename)
except OSError:
pass
# YAML ----------------------------------------------------
def load_yaml(file_path):
""" Loads YAML file and returns its data as a dictionary """
with open(file_path, 'r') as usernames_yaml:
try:
data = yaml.safe_load(usernames_yaml)
except yaml.YAMLError as exc:
print(exc)
return data
def dump_yaml(file_path, data):
""" Writes data to a YAML file and replaces its contents"""
with open(file_path, 'w+') as usernames_yaml:
yaml.dump(data, usernames_yaml)
# Database ---------------------------------------------------
def create_database_dir():
""" Creates directory to host user files on SERVER_DATABASE_FOLDER """
create_dir(SERVER_DATABASE_FOLDER)
def get_database_file_path(filename):
""" Returns the filename appended to SERVER_DATABASE_FOLDER """
filename = SERVER_DATABASE_FOLDER + filename
if file_exists(filename):
return filename
else:
return ''
def write_file_to_database(filename, bin_data):
""" Writes binary data for a given filename inside SERVER_DATABASE_FOLDER """
filename = SERVER_DATABASE_FOLDER + filename
try:
with open(filename, 'wb') as file:
file.write(bin_data)
except:
print('Could not write to {}'.format(filename))
return False
return True
def load_user_data(username):
""" Returns user data stored on USER_DATA_FILE """
data = load_yaml(USER_DATA_FILE)
return data.get(username)
def update_user_data(username, new_dict):
""" Updates stored user data with new_dict """
data = load_yaml(USER_DATA_FILE)
data[username] = new_dict
dump_yaml(USER_DATA_FILE, data)
def add_user_filesystem(username, path_to_file, file_size):
""" Adds a new file on user data dictionary
Each entry is formated as:
path_to_file:
size: file_size
location: filename
return:
filename: filename to initiate the file recieving procedure
"""
filename = path_to_filename(username, path_to_file)
new_file = {path_to_file : {'size' : file_size, 'location' : filename}}
user_dict = load_user_data(username)
if 'files' not in user_dict:
user_dict['files'] = new_file
else:
files_dict = user_dict['files']
files_dict.update(new_file)
user_dict['files'] = files_dict
print('Updating {} filesystem'.format(username))
update_user_data(username, user_dict)
return filename
def update_user_filesystem(username, file_dict):
""" Updates user files dict """
user_dict = load_user_data(username)
user_dict['files'] = file_dict
print('Updating {} filesystem'.format(username))
update_user_data(username, user_dict)
def get_user_filesystem(username):
""" Returns given user 'files' dictionary """
user_dict = load_user_data(username)
if 'files' not in user_dict:
return None
else:
files_dict = user_dict['files']
return files_dict
def remove_user_file(username, filename):
""" Removes file from database and user data """
user_files = get_user_filesystem(username)
if filename in user_files:
database_file = get_database_file_path(user_files[filename]['location'])
delete_file(database_file)
user_files.pop(filename)
update_user_filesystem(username, user_files)
return True
else:
return False
def register_user(username, password):
""" Append new username to USER_DATA_FILE """
new_user_data = {username : {'password' : password}}
if file_exists(USER_DATA_FILE):
# Reads whole file and then updates it
data = load_yaml(USER_DATA_FILE)
data.update(new_user_data)
dump_yaml(USER_DATA_FILE, data)
else:
# create new file
data = new_user_data
dump_yaml(USER_DATA_FILE, data)
def authenticate_user(username, password):
""" Verify if user is listed on USER_DATA_FILE
return:
Boolean tuple (user_exists, password_correct)
"""
user_exists, password_correct = False, False
if not file_exists(USER_DATA_FILE):
return user_exists, password_correct
data = load_yaml(USER_DATA_FILE)
if username in data:
""" Checks for username as a key in database """
user_exists = True
if data[username]['password'] == password:
password_correct = True
return user_exists, password_correct
| 32.159763 | 92 | 0.661638 |
9b43322c18e46264cb5d892ca20ec0e459019394 | 425 | py | Python | files/generator/generator_example.py | aikrasnov/otus-examples | 607fe0c11d812bdff57d92fc0949d5bdf3356c22 | [
"MIT"
] | 8 | 2019-11-03T18:59:08.000Z | 2020-10-10T18:50:09.000Z | files/generator/generator_example.py | aikrasnov/otus-examples | 607fe0c11d812bdff57d92fc0949d5bdf3356c22 | [
"MIT"
] | null | null | null | files/generator/generator_example.py | aikrasnov/otus-examples | 607fe0c11d812bdff57d92fc0949d5bdf3356c22 | [
"MIT"
] | null | null | null | def squares(start, stop):
for i in range(start, stop):
value = yield i * i
print("passed to generator", value)
foo = squares(1, 5)
print("\n")
print("squares() type is", type(squares))
print("foo type is", type(foo))
# print(next(foo))
# print(next(foo))
# print(next(foo))
#
# foo.send("bar")
# показать дефолт
# next(foo)
# next(foo)
# next(foo)
# next(foo)
# next(foo)
for i in foo:
print(i)
| 15.178571 | 43 | 0.602353 |
800158e0283c9031d51feeb3002da20f3a5f6cac | 12,503 | py | Python | networks/SimpleNetworkSpine.py | sbelharbi/Permutohedral_attention_module | b0d13347e2aa314e14ac21f56ec16e61947000ae | [
"Apache-2.0"
] | 25 | 2019-07-03T01:17:25.000Z | 2022-02-07T19:01:15.000Z | networks/SimpleNetworkSpine.py | sbelharbi/Permutohedral_attention_module | b0d13347e2aa314e14ac21f56ec16e61947000ae | [
"Apache-2.0"
] | 6 | 2019-07-17T09:14:57.000Z | 2021-09-08T15:05:10.000Z | networks/SimpleNetworkSpine.py | sbelharbi/Permutohedral_attention_module | b0d13347e2aa314e14ac21f56ec16e61947000ae | [
"Apache-2.0"
] | 12 | 2019-07-23T05:24:55.000Z | 2022-01-19T14:38:49.000Z | import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
from PAM import PAM
class SimpleNet(nn.Module):
def __init__(self, non_local=False, dilated=False):
super(SimpleNet, self).__init__()
self.non_local = non_local
self.dilated = dilated
n_f = 18
if non_local:
self.conv_init = nn.Sequential(
nn.Conv3d(1, n_f, 3, 1, 1),
nn.InstanceNorm3d(n_f),
nn.ReLU()
)
else:
self.conv_init = nn.Sequential(
nn.Conv3d(1, n_f + 2, 3, 1, 1),
nn.InstanceNorm3d(n_f + 2),
nn.ReLU()
)
if dilated:
if non_local:
self.conv1_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv1_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv1_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
else:
self.conv1_0 = nn.Sequential(
nn.Conv3d(n_f + 2, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv1_1 = nn.Sequential(
nn.Conv3d(n_f + 2, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv1_2 = nn.Sequential(
nn.Conv3d(n_f + 2, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv2_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv2_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv2_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv3_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv3_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv3_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv4_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv4_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv4_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
else:
if non_local:
self.conv1 = nn.Sequential(
nn.Conv3d(n_f, n_f, 3, 1, 1),
nn.InstanceNorm3d(n_f),
nn.ReLU()
)
else:
self.conv1 = nn.Sequential(
nn.Conv3d(n_f + 2, n_f, 3, 1, 1),
nn.InstanceNorm3d(n_f),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv3d(n_f, n_f, 3, 1, 1),
nn.InstanceNorm3d(n_f),
nn.ReLU()
)
self.conv3 = nn.Sequential(
nn.Conv3d(n_f, n_f, 3, 1, 1),
nn.InstanceNorm3d(n_f),
nn.ReLU()
)
if self.non_local:
self.pam = PAM(n_f, n_f//2, n_f//2, 2)
self.conv4 = nn.Sequential(
nn.Conv3d(n_f, n_f, 3, 1, 1),
nn.InstanceNorm3d(n_f),
nn.ReLU()
)
self.segm = nn.Conv3d(n_f, 25, 1)
def forward(self, x):
x = self.conv_init(x)
if self.dilated:
x_0 = self.conv1_0(x)
x_1 = self.conv1_1(x)
x_2 = self.conv1_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
x_0 = self.conv2_0(x)
x_1 = self.conv2_1(x)
x_2 = self.conv2_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
x_0 = self.conv3_0(x)
x_1 = self.conv3_1(x)
x_2 = self.conv3_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
x_0 = self.conv4_0(x)
x_1 = self.conv4_1(x)
x_2 = self.conv4_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
else:
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.non_local:
x = self.pam(x)
x = self.conv4(x)
x = self.segm(x)
x = F.softmax(x, dim=1)
return x
class DilFCN_PAM(nn.Module):
def __init__(self):
super(DilFCN_PAM, self).__init__()
n_f = 18
self.conv_init = nn.Sequential(
nn.Conv3d(1, n_f, 3, 1, 1),
nn.InstanceNorm3d(n_f),
nn.ReLU()
)
self.conv1_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv1_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv1_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv2_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv2_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv2_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv3_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv3_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv3_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv4_0 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 1),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv4_1 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 2, dilation=2),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.conv4_2 = nn.Sequential(
nn.Conv3d(n_f, n_f//3, 3, 1, 4, dilation=4),
nn.InstanceNorm3d(n_f//3),
nn.ReLU()
)
self.pam = PAM(n_f, n_f//2, n_f//2, 2)
self.segm = nn.Conv3d(n_f, 25, 1)
def forward(self, x):
x = self.conv_init(x)
x_0 = self.conv1_0(x)
x_1 = self.conv1_1(x)
x_2 = self.conv1_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
x_0 = self.conv2_0(x)
x_1 = self.conv2_1(x)
x_2 = self.conv2_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
x_0 = self.conv3_0(x)
x_1 = self.conv3_1(x)
x_2 = self.conv3_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
x = self.pam(x)
x_0 = self.conv4_0(x)
x_1 = self.conv4_1(x)
x_2 = self.conv4_2(x)
x = torch.cat([x_0, x_1, x_2], 1)
x = self.segm(x)
x = F.softmax(x, dim=1)
return x
| 47.181132 | 92 | 0.29313 |
7bad726695e707c5f11b33063afabd324dc1fd96 | 88,771 | py | Python | Scripts/Main_simulations_functions_1.py | PJGilmw/MicroRep | 08e72e3eb153d324751172675b7beeb118975cbf | [
"BSD-3-Clause"
] | null | null | null | Scripts/Main_simulations_functions_1.py | PJGilmw/MicroRep | 08e72e3eb153d324751172675b7beeb118975cbf | [
"BSD-3-Clause"
] | null | null | null | Scripts/Main_simulations_functions_1.py | PJGilmw/MicroRep | 08e72e3eb153d324751172675b7beeb118975cbf | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 10:45:34 2021
@author: Pierre Jouannais, Department of Planning, DCEA, Aalborg University
pijo@plan.aau.dk
"""
'''
Script containing the main functions calling the other scripts to calculate the LCAs,
performs uncertainty propagation and sensitivy analysis.
'''
import datetime
from time import *
import requests
import pickle
import cProfile
from scipy.integrate import odeint
import numpy as np
import os
import pandas as pd
import decimal
from random import *
import pstats
from itertools import *
from math import*
import csv
import copy
import bw2data
import bw2io
from bw2data.parameters import *
import brightway2 as bw
from SALib.test_functions import Ishigami
import math
from SALib.sample import saltelli
from SALib.sample import fast_sampler
from SALib.analyze import sobol
from SALib.analyze import fast
import SALib
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits import mplot3d
import Cultivation_simul_Night_Harvest_1 as cultsimul
import Functions_for_physical_and_biological_calculations_1 as functions
# Set working directory to file location
# (works only when executing the whole file and not only sections (Run Current cell))
currentfolder=os.path.dirname(os.path.realpath(__file__))
os.chdir(currentfolder)
def waste_water_impact_biosphere(my_conc_N,
my_conc_P,
my_conc_C,
my_conc_Mg,
my_conc_K,
my_conc_S,
methods):
'''Function which :
-Establishes the mass balances which match the input
concentrations for a given microalgal waste water.
-Calculates the impact of these emissions for each impact category ( method)
Inputs :
#my_conc_N, my_conc_P, my_conc_C, my_conc_Mg, my_conc_K, my_conc_S:
concentrations in g.L-1 of different elements in the actual
microalgal wastewater entering the treatment
#methods: The list of impact assessment methods.
Outputs :
#list_sum_impacts_biosphere_waste_water : list containing the total
impacts due to biosphere emissions for the treatment of 1 cubic meter of wastewater.
1 element per Impact category
'''
# Defining materials from original wastewater activity
# All original values of the output flows classified by element
# ['ID of the exchange, amount of the exchange']
N_original_flows = [['ae70ca6c-807a-482b-9ddc-e449b4893fe3', 0.00049],
['0017271e-7df5-40bc-833a-36110c1fe5d5', 0.000644],
['6dc1b46f-ee89-4495-95c4-b8a637bcd6cb', 0.0001146],
['d068f3e2-b033-417b-a359-ca4f25da9731', 0.00067568],
['b61057a3-a0bc-4158-882e-b819c4797419', 2.393e-05],
['13331e67-6006-48c4-bdb4-340c12010036', 0.011027],
['9990b51b-7023-4700-bca0-1a32ef921f74', 0.00059438],
['7ce56135-2ca5-4fba-ad52-d62a34bfeb35', 0.048295]]
P_original_flows = [['490b267b-f429-4d9a-ac79-224e37fb4d58', 7.2652e-05],
['1727b41d-377e-43cd-bc01-9eaba946eccb', 0.0027476],
['329fc7d8-4011-4327-84e4-34ff76f0e42d', 2.705e-05],
['198ce8e3-f05a-4bec-9f7f-325347453326', 6.2034e-07]]
C_original_flows = [['f65558fb-61a1-4e48-b4f2-60d62f14b085', 0.0072992],
['8734eb08-50cf-4f5a-8d1a-db76d38efe3c', 4.8293e-05],
['725c7923-0ed8-43e5-b485-fad7e34bef08', 4.8293e-05],
['62859da4-f3c5-417b-a575-8b00d8d658b1', 0.012346],
['73ed05cc-9727-4abf-9516-4b5c0fe54a16', 0.17202],
['960c0f37-f34c-4fc1-b77c-22d8b35fd8d5', 0.0075377],
['9afa0173-ecbd-4f2c-9c5c-b3128a032812', 0.0001613],
['baf58fc9-573c-419c-8c16-831ac03203b9', 0.00050213]]
S_original_flows = [['bfc0bf1c-e5e2-4702-a502-08c892031837', 0.0011039],
['d4049741-cef2-4edd-a3af-9728b9e3a568', 0.0010988],
['8c52f40c-69b7-4538-8923-b371523c71f5', 0.000884],
['37d35fd0-7f07-4b9b-92eb-de3c27050172', 0.14465]]
Mg_original_flows = [['ce9fd912-233a-4807-a33e-0323b1e4a7a2', 0.00014782],
['ebfe261d-ab0d-4ade-8743-183c8c6bdcc6', 2.205e-07],
['e8475907-2081-4fd5-9526-bfcef88380db', 0.00039974],
['7bdab722-11d0-4c42-a099-6f9ed510a44a', 0.0051478]]
K_original_flows = [['1653bf60-f682-4088-b02d-6dc44eae2786', 0.0003989]]
Al_original_flows = [['2baa4381-b781-4f5e-90de-508b0fa3fd1f', 0.0010518],
['97e498ec-f323-4ec6-bcc0-d8a4c853bae3', 6.228e-05],
['01056d4b-f9b0-4dfc-b8d9-8407c8376efb', 0.00031181],
['6f0b8b7c-3888-4174-b7e3-916d42d678ee', 6.5822e-07]]
Na_original_flows = [['1fc409bc-b8e7-48b2-92d5-2ced4aa7bae2', 0.002186]]
Ca_original_flows = [['ac066c02-b403-407b-a1f0-b29ad0f8188f', 0.045852],
['ae28c923-a4a3-4f00-b862-1ae6e748efb9', 0.0012412],
['a912f450-5233-489b-a2e9-8c029fab480f', 2.3777e-06],
['f16fa1da-e426-4820-bf9d-71595c22283b', 0.0035605]]
Fe_original_flows = [['9b6d6f07-ebc6-447d-a3c0-f2017d77d852', 0.0017779],
['7c335b9c-a403-47a8-bb6d-2e7d3c3a230e', 0.0036009],
['db364689-e1a3-4629-8835-e6c59d6daf09', 0.009475],
['32cd0492-c0cb-4898-a2b1-675eedc5b688', 1.2671e-07]]
Cl_original_flows = [['5e050fab-1837-4c42-b597-ed2f376f768f', 0.040484]]
# The inputs of different elements in the orginal wastewater treament activity.
# Added to the waste water as treatment.
added_treatment_N = 2.61388*10**-5
added_treatment_P = 0
added_treatment_C = 0
added_treatment_S = 0.003339284
added_treatment_Al = 0.000497558
added_treatment_Fe = 0.0098005
added_treatment_Na = 9.44323*10**-5
added_treatment_Ca = 4.17657*10**-7
added_treatment_Cl = 0.010468958
added_treatment_Mg = 0
added_treatment_K = 0
# The total outputs (and thus inputs) of elements in the original activity.
# Including the added treatments.
totalNoutput = 0.020770454172634983
totalPoutput = 0.0009270353321544698
totalCoutput = 0.0746397575218131
totalSoutput = 0.05012543333333333
totalAloutput = 0.0014265482200000001
totalFeoutput = 0.01485392671
totalNaoutput = 0.002186
totalCaoutput = 0.050656077699999996
totalCloutput = 0.040484
totalMgoutput = 0.0056955805
totalKoutput = 0.0003989
# The actual inputs of elements contained in the waste
# water of the original activity.
# If the value is negative it means that the mass balance in the original
# activity was not respected and we assume the element was not in the
# incoming waste water.
absolute_input_N = max(totalNoutput-added_treatment_N, 0)
absolute_input_C = max(totalCoutput-added_treatment_C, 0)
absolute_input_P = max(totalPoutput-added_treatment_P, 0)
absolute_input_S = max(totalSoutput-added_treatment_S, 0)
absolute_input_Al = max(totalAloutput-added_treatment_Al, 0)
absolute_input_Fe = max(totalFeoutput-added_treatment_Fe, 0)
absolute_input_Na = max(totalNaoutput-added_treatment_Na, 0)
absolute_input_Ca = max(totalCaoutput-added_treatment_Ca, 0)
absolute_input_Cl = max(totalCloutput-added_treatment_Cl, 0)
absolute_input_K = max(totalKoutput-added_treatment_K, 0)
absolute_input_Mg = max(totalMgoutput-added_treatment_Mg, 0)
total_flows=(N_original_flows
+ P_original_flows
+ C_original_flows
+ S_original_flows
+ Mg_original_flows
+ K_original_flows
+ Al_original_flows
+ Na_original_flows
+ Ca_original_flows
+ Fe_original_flows
+ Cl_original_flows)
# Initialize the dicitonnary that will contain the impacts associated to each substance in the wastewater
dictionnary_original_flows= {flow[0] : [0]*len(methods) for flow in total_flows}
# Collect the characterization factors for each impact category
list_cfs= [bw.Method((meth)).load() for meth in methods]
meth_index = -1
for meth in methods:
meth_index += 1
cfs_dictionnary = { subst[0][1] : subst[1] for subst in list_cfs[meth_index]}
# For all the biosphere flows in the original waste water activity
for flow in total_flows:
if flow in N_original_flows: # If this flow contains nitrogen
# We assume that the added treatment is
# equally shared among the flows of a same element.
original_added_treatment = (flow[1] * added_treatment_N/totalNoutput)
if flow[0] in cfs_dictionnary: # if there is a cf for this flow in this method
# The impact is cf * new value of the flow in the microalgal wastewater
# The new value of the flow is :
# (Original flow - part that comes from the treatment)
# * (new concentration waste water/original concentration waste water)
# + Share of the treatment ending up in this flow.
impact_percubic_meter = cfs_dictionnary[flow[0]] * ((flow[1] - original_added_treatment)
* my_conc_N/absolute_input_N
+ original_added_treatment)
# Update the total impact associated to this flow for the right method
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in P_original_flows:
original_added_treatment = (flow[1] * added_treatment_P/totalPoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_P/absolute_input_P
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in C_original_flows:
original_added_treatment = (flow[1] * added_treatment_C/totalCoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_C/absolute_input_C
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in S_original_flows:
original_added_treatment = (flow[1] * added_treatment_S/totalSoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_S/absolute_input_S
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in Mg_original_flows:
original_added_treatment = (flow[1] * added_treatment_Mg/totalMgoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_Mg/absolute_input_Mg
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in K_original_flows:
original_added_treatment = (flow[1] * added_treatment_K/totalKoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_K/absolute_input_K
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in Al_original_flows:
original_added_treatment = (flow[1] * added_treatment_Al/totalAloutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_Al/absolute_input_Al
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in Na_original_flows:
original_added_treatment = (flow[1] * added_treatment_Na/totalNaoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_Na/absolute_input_Na
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in Ca_original_flows:
original_added_treatment = (flow[1] * added_treatment_Ca/totalCaoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_Ca/absolute_input_Ca
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in Fe_original_flows:
original_added_treatment = (flow[1] * added_treatment_Fe/totalFeoutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_Fe/absolute_input_Fe
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
elif flow in Cl_original_flows:
original_added_treatment = (flow[1] * added_treatment_Cl/totalCloutput)
if flow[0] in cfs_dictionnary:
impact_percubic_meter = cfs_dictionnary[flow[0]]*((flow[1]-original_added_treatment)
* my_conc_Cl/absolute_input_Cl
+ original_added_treatment)
dictionnary_original_flows[flow[0]][meth_index] = impact_percubic_meter
# Summing the impacts of all flows for 1 cubic meter
list_sum_impacts_biosphere_waste_water =[]
for meth_index in range(len(methods)):
sum_impact = sum([dictionnary_original_flows[flow][meth_index] for flow in dictionnary_original_flows ])
list_sum_impacts_biosphere_waste_water.append(sum_impact)
#wastewater_copy.save()
return list_sum_impacts_biosphere_waste_water
'''Main calculating functions '''
def LCI_one_strain_uniquevalues(Biodict,
Physicdict,
Tech_opdict,
Locationdict,
LCIdict,
months_suitable_for_cultivation,
fraction_maxyield,
fishfeed_table,
elemental_contents):
'''Calculate the LCI for one set of parameters given in input by simulating
the cultivation and scaling the values to the FU.
Inputs:
#Biodict : Dictionnary with biological parameters
#Physicdict : Dictionnary with physical parameters
#Tech_opdict : Dictionnary with techno-operational parameters
#LCIdict : Initialized LCI dictionnary
#months_suitable_for_cultivation : Months for cultivation ;
list of month numbers : [a,b,c]
#fraction_maxyield : Fraction of the maximum yield achieved ; .
#fishfeed_table : DataFrame with fish feed composition
#elemental_contents : Table with elemental compositons of macronutrients
Outputs:
# LCIdict_updated : Dictionnary containing the calculated LCI
Other variables resulting from the simulation for further investigation
and review of the code (Non necessary):
# surfaceyield : Areal yield ; kg.m-2
# volumetricyield : Volmetric yield ; kg.m-3
# optimization_performance : Fish feed Substitution alrogithm performance
# needed_dbio_check : Non necessary
# substitution_check : Non necessary
# total_production_kg_dw : Total production ; kg dw
# total_production_harvested_kg_dw : Actual harvested production harvested ; kg dw
# total_production_loss_kg_dw : Production not harvested ; kg dw
# conc_waste_water_nutrient_N, : N Concentration in wastewater ; kg.m-3
# Same for all elements
#conc_waste_water_biomass : Biomass Concentration in wastewater ; kg.m-3
#bioact_molec_dbio : Molecule Concentration in the dried biomass ; kg. kg dbio -1
# min_centrifugation_rate_m3_h : Obsolete
# max_centrifugation_rate_m3_h : Obsolete
# totalwatercentrifuged : Total volume of water centrifuged ; m3
# tubelength : Total tube length over 1m2 ; m
# facilityvolume : Cultivation volume over 1m2 ; m3
# exchange_area : Exchange area tube/air over 1m2 ; m
# totalcooling_thermal : Thermal Cooling needed per m2
(not via Heat Exchanger) ; kWh
'''
LCIdict_updated = LCIdict.copy()
# Collecting all values from the dictionnaries and creating local variables
# Tech_opdict
height = Tech_opdict['height']
tubediameter = Tech_opdict['tubediameter']
gapbetweentubes = Tech_opdict['gapbetweentubes']
horizontaldistance = Tech_opdict['horizontaldistance']
length_of_PBRunit = Tech_opdict['length_of_PBRunit']
width_of_PBR_unit = Tech_opdict['width_of_PBR_unit']
biomassconcentration = Tech_opdict['biomassconcentration']
flowrate = Tech_opdict['flowrate']
centrifugation_efficiency = Tech_opdict['centrifugation_efficiency']
pumpefficiency = Tech_opdict['pumpefficiency']
slurry_concentration = Tech_opdict['slurry_concentration']
water_after_drying = Tech_opdict['water_after_drying']
recyclingrateaftercentrifuge = Tech_opdict['recyclingrateaftercentrifuge']
roughness = Tech_opdict['roughness']
rhosuspension = Tech_opdict['rhosuspension']
cleaningvolumeVSfacilityvolume = Tech_opdict['cleaningvolumeVSfacilityvolume']
concentration_hypo = Tech_opdict['concentration_hypo']
concentration_hydro = Tech_opdict['concentration_hydro']
boilerefficiency = Tech_opdict['boilerefficiency']
glass_life_expectancy = Tech_opdict['glass_life_expectancy']
prob_night_monitoring = Tech_opdict['prob_night_monitoring']
extraction = Tech_opdict['extraction']
prob_market_subst_animal_feed = Tech_opdict['prob_market_subst_animal_feed']
# Physicdict
Cp = Physicdict['Cp']
hconv = Physicdict['hconv']
rhomedium = Physicdict['rhomedium']
rhowater = Physicdict['rhowater']
Cw = Physicdict['Cw']
# Biodict
lipid_af_dw = Biodict['lipid_af_dw']
rhoalgae = Biodict['rhoalgae']
MJ_kglip = Biodict['MJ_kglip']
MJ_kgcarb = Biodict['MJ_kgcarb']
MJ_kgprot = Biodict['MJ_kgprot']
PAR = Biodict['PAR']
losspigmentantenna = Biodict['losspigmentantenna']
quantumyield = Biodict['quantumyield']
lossvoltagejump = Biodict['lossvoltagejump']
losstoATPNADPH = Biodict['losstoATPNADPH']
losstohexose = Biodict['losstohexose']
lossrespiration = Biodict['lossrespiration']
bioact_fraction_molec = Biodict['bioact_fraction_molec']
prob_no3 = Biodict['prob_no3']
Topt = Biodict['Topt']
T_plateau = Biodict['T_plateau']
# Conversion to Tmax, Tmin for simpler calculation
Tmax = Topt+T_plateau/2
Tmin = Topt-T_plateau/2
dcell = Biodict['dcell']
incorporation_rate = Biodict['incorporation_rate']
ash_dw = Biodict['ash_dw']
nutrient_utilisation = Biodict['nutrient_utilisation']
co2_utilisation = Biodict['co2_utilisation']
phospholipid_fraction = Biodict['phospholipid_fraction']
# Locationdict
lat = Locationdict['lat']
long = Locationdict['long']
Twell = Locationdict['Twell']
depth_well = Locationdict['depth_well']
azimuthfrontal = Locationdict['azimuthfrontal']
# Qualitative parameters are determined based on the probabilities
# and a new entry is created in the LCI dict
# Nsource
if random() < prob_no3: # Then the source is Nitrate
Nsource = 'no3'
else:
Nsource = 'nh3'
LCIdict_updated['Nsource'] = Nsource
# FOR STOCHASTICITY # NOT USED IN PAPER
# dice =random()
# if dice<0.3:
# #print('okC1')
# biochemicalclass='lip'
# elif 0.3<dice<0.6 :
# #print('okC2')
# biochemicalclass='prot'
# else:
# #print('okC2')
# biochemicalclass='carb'
# The molecule is a carbohydrate
biochemicalclass = 'carb'
LCIdict_updated['Bio_class'] = biochemicalclass
# Thermoregulation at night
if random() < prob_night_monitoring:
night_monitoring = 'yes'
else:
night_monitoring = 'no'
LCIdict_updated['night_monitoring'] = night_monitoring
# Market for substitution
if random() < prob_market_subst_animal_feed:
market_for_substitution = 'animal feed'
else:
market_for_substitution = 'fish feed'
LCIdict_updated['market_for_substitution'] = market_for_substitution
# Collecting PBR geometry
geom = functions.PBR_geometry(height,
tubediameter,
gapbetweentubes,
horizontaldistance,
length_of_PBRunit,
width_of_PBR_unit)
tubelength = geom[1]
facilityvolume = geom[0]
exchange_area = geom[-1]
# LCI values which do not depend on the cultivation simulation
# Calculating biomass composition at different levels
biomass_composition = functions.biomasscompo(
lipid_af_dw,
ash_dw,
water_after_drying,
phospholipid_fraction,
elemental_contents)
# ash-free biomass composition (af_dw)
prot_af_dw = biomass_composition[0]
carb_af_dw = biomass_composition[1]
# Including ash (dw)
lip_dw = biomass_composition[2]
prot_dw = biomass_composition[3]
carb_dw = biomass_composition[4]
# After harvesting and drying (dbio)
lip_dbio = biomass_composition[5]
prot_dbio = biomass_composition[6]
carb_dbio = biomass_composition[7]
ash_dbio = biomass_composition[8]
# Elementary composition
C_af_dw = biomass_composition[9]
N_af_dw = biomass_composition[10]
P_af_dw = biomass_composition[11]
K_af_dw = biomass_composition[12]
Mg_af_dw = biomass_composition[13]
S_af_dw = biomass_composition[14]
C_dw = biomass_composition[15]
N_dw = biomass_composition[16]
P_dw = biomass_composition[17]
K_dw = biomass_composition[18]
Mg_dw = biomass_composition[19]
S_dw = biomass_composition[20]
# Calculating the absolute bioactive molecule content in the dried biomass
if biochemicalclass == 'lip':
bioact_molec_dbio = bioact_fraction_molec * lip_dbio
elif biochemicalclass == 'carb':
bioact_molec_dbio = bioact_fraction_molec * carb_dbio
elif biochemicalclass == 'prot':
bioact_molec_dbio = bioact_fraction_molec * prot_dbio
# Nutrients
# Nitrogen consumption
# considering ash content
N_demand = N_dw * ((1/bioact_molec_dbio) * (1/(1 - water_after_drying)))
# Recycling part of the nutrients with supernatant
N_input = (N_demand / nutrient_utilisation + N_demand *
recyclingrateaftercentrifuge) / (1 + recyclingrateaftercentrifuge)
N_waste = N_input - N_demand
#Only the correct source of N is updated
if Nsource == 'nh3':
# Already as N in Ecoinvent
LCIdict_updated['market for ammonium sulfate, as N PBR'] = N_input
LCIdict_updated['market for calcium nitrate PBR'] = 0
if Nsource == 'no3':
LCIdict_updated['market for ammonium sulfate, as N PBR'] = 0
# Conversion from N to Calcium nitrate
LCIdict_updated['market for calcium nitrate PBR'] = N_input/0.15
# Phosphorus consumption
P_demand = P_dw * ((1/bioact_molec_dbio) * (1/(1 - water_after_drying)))
P2O5_demand = P_demand/0.4366 # Conversion P to P2O5
P2O5_input = (P2O5_demand / nutrient_utilisation + P2O5_demand *
recyclingrateaftercentrifuge) / (1 + recyclingrateaftercentrifuge) # Recylcing
P_waste = P2O5_input*0.4366 - P_demand
LCIdict_updated['P source production PBR'] = P2O5_input
# C
C_demand = C_dw * ((1 / bioact_molec_dbio) * (1 / (1 - water_after_drying)))
CO2_demand = C_demand * (44/12) # Conversion C to CO2
CO2_input = CO2_demand / co2_utilisation
CO2_direct_emission = CO2_input - CO2_demand
LCIdict_updated['Microalgae CO2 PBR'] = CO2_input
LCIdict_updated['CO2 direct emissions PBR'] = CO2_direct_emission
# K
K_demand = K_dw * ((1/bioact_molec_dbio) * (1/(1 - water_after_drying)))
K2O5_demand = K_demand*1.2 # Conversion to K2O5
K2O5_input = (K2O5_demand / nutrient_utilisation + K2O5_demand *
recyclingrateaftercentrifuge) / (1 + recyclingrateaftercentrifuge) # Recycling
K2O5_waste = K2O5_input - K2O5_demand
K_waste = K2O5_input/1.2 - K_demand
LCIdict_updated['K source production PBR'] = K2O5_input
# Mg
Mg_demand = Mg_dw * ((1 / bioact_molec_dbio)*(1/(1 - water_after_drying)))
MgSO4_demand = Mg_demand * (120.4/24.3)
MgSO4_input = (MgSO4_demand / nutrient_utilisation + MgSO4_demand *
recyclingrateaftercentrifuge) / (1 + recyclingrateaftercentrifuge) # Recycling
Mg_input = MgSO4_input * (24.3/120.4)
Mg_waste = Mg_input-Mg_demand
LCIdict_updated['Mg source production PBR'] = MgSO4_input
# S
S_demand = S_dw * ((1/bioact_molec_dbio) * (1/(1-water_after_drying)))
S_input = MgSO4_input*(32/120.4)
if Nsource == 'nh3': # Then ammonium sulfate also brings sulfate
# N input --> (NH4)2SO4 input --> S input
S_input += (N_input/0.21) * 0.24
S_waste = S_input-S_demand
# Cultivation simulation
# Intializing variables
totalcooling_thermal = 0
totalcooling = 0
totalheating = 0
totalproduction = 0
totalproduction_loss = 0
totalproduction_harvested = 0
totalwaterpumpedfromthefacility = 0
totalwaterpumpedfromthewell = 0
total_elec_centrifuge = 0
total_elec_mixing = 0
totalwatercentrifuged = 0
meantemp_at_harvest_time_cultivationperiod = 0
min_centrifugation_rate_list = []
max_centrifugation_rate_list = []
# Simulating an average day for each month of the cultivation period
for month in months_suitable_for_cultivation:
# Calling the cultivation simulation function
simulation_averageday = cultsimul.cultivation_simulation_timestep10(hconv,
Twell,
depth_well,
lat,
long,
azimuthfrontal,
month,
Cp,
height,
tubediameter,
gapbetweentubes,
horizontaldistance,
length_of_PBRunit,
width_of_PBR_unit,
rhoalgae,
rhomedium,
rhosuspension,
dcell,
Tmax,
Tmin,
Biodict,
ash_dw,
Nsource,
fraction_maxyield,
biomassconcentration,
flowrate,
centrifugation_efficiency,
pumpefficiency,
slurry_concentration,
water_after_drying,
recyclingrateaftercentrifuge,
night_monitoring,
elemental_contents)
# Collecting results and multiplying by
# average number of days in a month : 30.4
monthly_heating_energy = simulation_averageday[1]*30.4 #kWh
monthly_waterpumped_from_the_facility = simulation_averageday[2]*30.4 # L
monthly_waterpumped_from_the_well = simulation_averageday[3]*30.4 # L
monthly_production = simulation_averageday[4]*30.4 # g dw
monthly_production_harvested = simulation_averageday[5]*30.4 # g dw
monthly_production_loss = simulation_averageday[6]*30.4 # g dw
monthly_volumetric_yield = simulation_averageday[7]*30.4 # g dw.m-3
monthly_energy_tocentrifuge = simulation_averageday[8]*30.4 # kWh
collectedtemperatureevolution = simulation_averageday[9] # °C
monthly_cooling_energy_thermal = simulation_averageday[17]*30.4 # kWh
monthly_cooling_energy = simulation_averageday[17]*30.4 # kWh
# Collection min and max centrifugation rate (Obsolete)
# list centrifugation rate L.s-1
list_centrifugation_rate_wholeunit = simulation_averageday[20]
# list centrifugation rate L.s-1
water_centrifuged = simulation_averageday[21]*30.4
list_centrifugation_rate_wholeunit_not_0 = [
i for i in list_centrifugation_rate_wholeunit if i != 0]
min_centrifugation_rate_list.append(
min(list_centrifugation_rate_wholeunit_not_0))
max_centrifugation_rate_list.append(
max(list_centrifugation_rate_wholeunit_not_0))
# Temperature : Some processes have temperature as an input for their
# electricity consumption
#
# For Mixing : Mixing is needed at any time of the day and night.
meantemp_daytotal = (sum(collectedtemperatureevolution) /
len(collectedtemperatureevolution))
monthly_Electricity_mixing_day = functions.mixing_perday(
rhosuspension,
tubediameter,
pumpefficiency,
flowrate,
roughness,
meantemp_daytotal,
biomassconcentration,
tubelength)[0] * 30.4 # MJ.m-2.month
# For Drying : Drying requires to heat the slurry to 100 C and the
# energy will depend on the initial temperature : temperature of the
# culture at harvest time (9PM)
temp_at_harvest_time = collectedtemperatureevolution[7560]
# Summing over months in the loop
totalproduction += monthly_production # g dw
totalproduction_harvested += monthly_production_harvested # g dw
totalproduction_loss += monthly_production_loss # g dw
totalcooling_thermal += monthly_cooling_energy_thermal # kWh
totalcooling += monthly_cooling_energy # kWh
totalheating += monthly_heating_energy # kWh
total_elec_centrifuge += monthly_energy_tocentrifuge # kWh
total_elec_mixing += monthly_Electricity_mixing_day/3.6 # conversion to kWh
totalwaterpumpedfromthefacility += monthly_waterpumped_from_the_facility # L
totalwaterpumpedfromthewell += monthly_waterpumped_from_the_well # L
totalwatercentrifuged += water_centrifuged # L
# Collecting the mean temperature over the cultivation period
# For drying
meantemp_at_harvest_time_cultivationperiod += temp_at_harvest_time/len(months_suitable_for_cultivation) # °C
# End of the loop
# Collecting min and max centrifugation rate during cultivation period (Obsolete)
min_centrifugation_rate_m3_h = min(
min_centrifugation_rate_list)*3.6 # m3.h-1
max_centrifugation_rate_m3_h = max(
max_centrifugation_rate_list)*3.6 # m3.h-1
# Total production conversion to kg
total_production_kg_dw = totalproduction/1000 # kg dw
total_production_harvested_kg_dw = totalproduction_harvested/1000
total_production_loss_kg_dw = totalproduction_loss/1000
# Adding the energy for the initial heating of the well water
# Water of the well is heaten to Tmin
if Twell < Tmin:
initalheating = facilityvolume*Cp*(Tmin-Twell)/3.6 # kWh
else: # If the water is already warm enough
initalheating = 0 # KwH
#Updating LCI with calculated values
# Scaling down to 1 kg of molecule in dried biomass
LCIdict_updated['Heating kWh PBR'] = ((totalheating + initalheating)/total_production_harvested_kg_dw)*(
1/bioact_molec_dbio)*(1/(1-water_after_drying))
LCIdict_updated['Cooling kWh PBR'] = (
totalcooling/total_production_harvested_kg_dw)*(1/bioact_molec_dbio)*(1/(1-water_after_drying))
LCIdict_updated['Electricity centrifuge kWh PBR'] = (
total_elec_centrifuge/total_production_harvested_kg_dw) * (1/bioact_molec_dbio)*(1/(1-water_after_drying))
LCIdict_updated['Electricity mixing kWh PBR'] = (
total_elec_mixing/total_production_harvested_kg_dw) * (1/bioact_molec_dbio)*(1/(1-water_after_drying))
# In this version, no electricity consumption is assumed for aeration
LCIdict_updated['Electricity aeration kWh PBR'] = 0
# Pumping water from the well and facility
#Calling the function for depth = well depth
energy_perm3_fromthewell = functions.pumping_per_m3(
rhowater, depth_well, pumpefficiency)
# Pumping from the facility
energy_perm3_fromthefacility = functions.pumping_per_m3(
rhowater, 1, pumpefficiency)
# Water pumped from the well
initialpumping = facilityvolume*energy_perm3_fromthewell
pumpingforcleaning = (cleaningvolumeVSfacilityvolume
* facilityvolume
* energy_perm3_fromthewell)
# Energy consumption for pumping water during the cultivation
pumping_during_cultiv = (totalwaterpumpedfromthefacility
* energy_perm3_fromthefacility
+ totalwaterpumpedfromthewell
* energy_perm3_fromthewell)/1000 # MJ Conversion L to m3
totalenergypumping = initialpumping+pumpingforcleaning + pumping_during_cultiv
LCIdict_updated['Electricity pumping kWh PBR'] = ((
(totalenergypumping/3.6)
/ total_production_harvested_kg_dw)
* (1/bioact_molec_dbio)
* (1/(1 - water_after_drying))) # kWh
# Glass consumption
# Assuming a constant wall thickness of 2 mm.
glass_perm2 = exchange_area * 0.002 # m3 of glass
glass_volume_perkgmolecule = ((glass_perm2/total_production_harvested_kg_dw)
* (1/bioact_molec_dbio)
* (1/(1 - water_after_drying))
* 1/(glass_life_expectancy)) # m3
glass_mass_perkgmolec = glass_volume_perkgmolecule * 2700 # kg # 2700 kg.m-3
LCIdict_updated['Glass PBR'] = (glass_mass_perkgmolec
*1/(glass_life_expectancy))
# Drying
water_to_vaporize_perkilo_dbio = ((1/slurry_concentration)
* (1 - slurry_concentration)
* (1 - water_after_drying)) # L. kg-1 dbio
# /1000 for conversion from kJ to MJ and /3.6 from MJ to kWh
Electricity_drying_perkg = (water_to_vaporize_perkilo_dbio
* (Cw + Cp*(100-meantemp_at_harvest_time_cultivationperiod))
/ (boilerefficiency*1000))/3.6 # kWh.kg dbio-1
LCIdict_updated['Electricity drying kWh PBR'] = (Electricity_drying_perkg *
(1/bioact_molec_dbio)) # Scaled up to 1 kg of molecule kWh
# Water consumption
initialfilling = facilityvolume # m3
refillingduringcultivation = totalwaterpumpedfromthewell/1000 # m3
totalwater = (initialfilling
+ refillingduringcultivation
+ cleaningvolumeVSfacilityvolume*initialfilling) # m3
# Water used for cultivation and not for cleaning
totalwater_cultivation = refillingduringcultivation + initialfilling # m3
totalwater_cultivation_perkgmolecule = ((totalwater_cultivation/total_production_harvested_kg_dw)
* (1/bioact_molec_dbio)
* (1/(1-water_after_drying))) # m3
LCIdict_updated['Water(Cultivation) PBR'] = totalwater_cultivation_perkgmolecule # m3
#Cleaning
totalwater_cleaning_perkgmolecule = ((cleaningvolumeVSfacilityvolume*initialfilling/total_production_harvested_kg_dw)
* (1/bioact_molec_dbio)
* (1/(1-water_after_drying)))
LCIdict_updated['Water Cleaning PBR'] = totalwater_cleaning_perkgmolecule # m3
# Wastewater
# All water used for cultivatiion - what has been vaporized during drying (scaled per kg molecule)
# / 1000 to convert water_to_vaporize_perkilo_dbio from L to m3
totalwater_towaste_perkgmolecule = (totalwater_cultivation_perkgmolecule
- water_to_vaporize_perkilo_dbio
* (1/bioact_molec_dbio) / 1000) # m3
# Negative sign as waste treatment activity (specific to brightway)
LCIdict_updated['Wastewater treatment PBR'] = - totalwater_towaste_perkgmolecule
# Not scaled to molecule for easier wastewater concentration calculation
totalwater_towaste = (totalwater_cultivation
- water_to_vaporize_perkilo_dbio*total_production_harvested_kg_dw
* (1-water_after_drying) / 1000) # m3
# Average Concentration waste water in biomass
conc_waste_water_biomass = total_production_loss_kg_dw/totalwater_towaste # kg.m-3
# kg.m-3 or g.L-1 Waste nutrient per kg molecule produced/total wastewater per kg molecule produced
# Includes the elements in the biomass
conc_waste_water_nutrient_N = ((N_waste/totalwater_towaste_perkgmolecule
+ conc_waste_water_biomass * N_dw)) # kg.m-3 or g.L-1
conc_waste_water_nutrient_P = ((P_waste/totalwater_towaste_perkgmolecule
+ conc_waste_water_biomass * P_dw)) # kg.m-3 or g.L-1
conc_waste_water_nutrient_K = ((K_waste/totalwater_towaste_perkgmolecule
+ conc_waste_water_biomass * K_dw)) # kg.m-3 or g.L-1
conc_waste_water_nutrient_Mg =((Mg_waste/totalwater_towaste_perkgmolecule
+ conc_waste_water_biomass * Mg_dw)) # kg.m-3 or g.L-1
conc_waste_water_nutrient_S = ((S_waste/totalwater_towaste_perkgmolecule
+ conc_waste_water_biomass * S_dw)) # kg.m-3 or g.L-1
# Carbon only in biomass, CO2 is degazed
conc_waste_water_C = C_dw * conc_waste_water_biomass # kg.m-3
# Land
LCIdict_updated['Land PBR'] = (
1/total_production_harvested_kg_dw)*(1/bioact_molec_dbio)*(1/(1-water_after_drying)) # m2
# Cleaning substances
# Half of the water with 1 substance, half with the other one
#Hypochlorite
totalhypo = ((cleaningvolumeVSfacilityvolume*facilityvolume)/2) * concentration_hypo # kg
#Hydrogen peroxide
totalhydro = ((cleaningvolumeVSfacilityvolume*facilityvolume)/2) * concentration_hydro # kg
LCIdict_updated['Hydrogen peroxyde PBR'] = (
totalhydro/total_production_harvested_kg_dw)*(1/bioact_molec_dbio)*(1/(1-water_after_drying)) # kg
LCIdict_updated['Hypochlorite PBR'] = (
totalhypo/total_production_harvested_kg_dw) *(1/bioact_molec_dbio)*(1/(1-water_after_drying)) # kg
#Extraction and substitution
if extraction == 'yes':
# 1 kWh to disrupt 1 kg of microalgal biomass (kg dbio)
LCIdict_updated['Electricity cell disruption kWh PBR'] = 1 * (1/bioact_molec_dbio) # kWh.kg-1
# If extraction, then the
# remaining biomass composition is changed according to the biochemical class of the extracted molecule
if biochemicalclass == 'lip':
lip_dbio_after_extract = (lip_dbio - bioact_molec_dbio)/(1-bioact_molec_dbio)
carb_dbio_after_extract = carb_dbio / (1-bioact_molec_dbio)
prot_dbio_after_extract = prot_dbio / (1-bioact_molec_dbio)
ash_dbio_after_extract = ash_dbio / (1-bioact_molec_dbio)
water_dbio_after_extract = water_after_drying /(1-bioact_molec_dbio)
elif biochemicalclass == 'carb':
lip_dbio_after_extract = lip_dbio / (1-bioact_molec_dbio)
carb_dbio_after_extract = (carb_dbio-bioact_molec_dbio) / (1-bioact_molec_dbio)
prot_dbio_after_extract = prot_dbio / (1-bioact_molec_dbio)
ash_dbio_after_extract = ash_dbio / (1-bioact_molec_dbio)
water_dbio_after_extract = water_after_drying / (1-bioact_molec_dbio)
elif biochemicalclass == 'prot':
lip_dbio_after_extract = lip_dbio / (1-bioact_molec_dbio)
carb_dbio_after_extract = carb_dbio / (1-bioact_molec_dbio)
prot_dbio_after_extract = (prot_dbio-bioact_molec_dbio) / (1-bioact_molec_dbio)
ash_dbio_after_extract = ash_dbio / (1-bioact_molec_dbio)
water_dbio_after_extract = water_after_drying / (1-bioact_molec_dbio)
# After extraction, the substitution will occur with the new composition of the biomass
# Call the function which calculates the masses of subsituted fish feed ingredient
substitution = functions.optimization_for_fishfeed_substitution(fishfeed_table,
lip_dbio_after_extract,
prot_dbio_after_extract,
carb_dbio_after_extract,
water_dbio_after_extract,
ash_dbio_after_extract,
incorporation_rate,
MJ_kgcarb,
MJ_kgprot,
MJ_kglip)
# if no extraction (molecule given to fish directly),
# the biomass composition stays the same (Obsolete)
else:
LCIdict_updated['Electricity cell disruption kWh PBR'] = 0 # kWH.kg-1
LCIdict_updated['Extraction electricity kWh PBR'] = 0
LCIdict_updated['Co solvent Extraction PBR'] = 0
substitution = functions.optimization_for_fishfeed_substitution(fishfeed_table,
lip_dbio,
prot_dbio,
carb_dbio,
water_after_drying,
ash_dbio,
incorporation_rate,
MJ_kgcarb,
MJ_kgprot,
MJ_kglip)
# Choose the market that the dependent coproducts enter
if market_for_substitution == 'animal feed': # (Obsolete, always fish feed)
# Model substitution 1 Animal Feed
# kg #the same subsitution occurs for every kilo
feedprot_m1 = substitution[0] * (1/bioact_molec_dbio - 1) # kg
feedenergy_m1 = substitution[1] * (1/bioact_molec_dbio - 1) # MJ
LCIdict_updated['Feed energy PBR'] = feedenergy_m1
LCIdict_updated['Feed protein PBR'] = feedprot_m1
optimization_performance = 'No optimization'
substitution_check = 'No optimization'
# Model substitution 2 Fish Feed
elif market_for_substitution == 'fish feed':
LCIdict_updated['Feed energy PBR'] = 0 # Do not use Model 1
LCIdict_updated['Feed protein PBR'] = 0
if extraction == 'yes': # Then the substituion only takes place with the remaining biomass
# vect_substitution is a list containing the masses of fish feed ingredient substituted by the remaining biomass
# (1/bioact_molec_dbio-1) = remaining biomass after extraction of the FU : 1kg of molecule
# substitution[5] is the list of masses of fish feed ingredient
# replaced by the given biomass composition. in kg. kg dbio-1 (sum =1 kg)
vect_substitution = substitution[5]*(1/bioact_molec_dbio-1) # kg
# evaluation of the optimized recipe
optimization_performance = substitution[-1]*(1/bioact_molec_dbio-1)
else: # then the molecule incorporated in the biomass takes part in the substutition
# substitution[5] is the list of masses of fish feed ingredient
# replaced by the given biomass composition. in kg. kg dbio-1 (sum =1 kg)
vect_substitution = substitution[5]*(1/bioact_molec_dbio) # kg
# evaluation of the optimized recipe
optimization_performance = substitution[-1]*(1/bioact_molec_dbio)
# Adding the ingredients of the fish feed to the LCI for substitution
substitution_check = 0 # Obsolete
for a in range(0, len(fishfeed_table['Ingredient'])):
# Ingredients are ranked in the same order in the vector and in the fish feed table
LCIdict_updated[fishfeed_table['Ingredient'][a]] = vect_substitution[a]
substitution_check = (substitution_check
+ LCIdict_updated[fishfeed_table['Ingredient'][a]]) #Obsolete
# Yields
numberofcultivationdays = len(months_suitable_for_cultivation)*30.4 # days
volumetricyield = (total_production_kg_dw
/ (facilityvolume*1000*numberofcultivationdays)) # kg.L-1.d-1
surfaceyield = total_production_kg_dw/numberofcultivationdays # kg.days-1
# Check mass balance (Obsolete)
needed_dbio_check = 1/(lipid_af_dw * (1-ash_dw) *
(1-water_after_drying) * bioact_molec_dbio)
return [LCIdict_updated,
surfaceyield,
volumetricyield,
optimization_performance,
needed_dbio_check,
substitution_check,
total_production_kg_dw,
total_production_harvested_kg_dw,
total_production_loss_kg_dw,
conc_waste_water_nutrient_N,
conc_waste_water_nutrient_P,
conc_waste_water_nutrient_K,
conc_waste_water_nutrient_Mg,
conc_waste_water_biomass,
conc_waste_water_C,
conc_waste_water_nutrient_S,
bioact_molec_dbio,
min_centrifugation_rate_m3_h,
max_centrifugation_rate_m3_h,
totalwatercentrifuged,
tubelength,
facilityvolume,
exchange_area,
totalcooling_thermal] # []
def sampling_func(Tech_opdict_distributions,
Biodict_distributions,
Locationdict_distributions,
Physicdict_distributions,
size,
type_sens):
'''Function which returns a random sample for the input space of
non-constant parameters according to the Sensitivity analysis alorgithm (Sobol or Fast)
Inputs:
# All parameters distributions dictionnaries :
Tech_opdict_distributions,
Biodict_distributions,
Locationdict_distributions,
Physicdict_distributions
#size : Size of the sample
( Final number of combiantions =size*number uncertain parameters for FAST,
size*(number uncertain parameters+2) for Sobol)
#type_sens: "SOBOL" or "FAST"
Outputs :
-sample : Generated sample. Array with 1 row = 1 combination of uncertain parameters
-names_param : List of names of the uncertain parameters
-names_param_op : List of names of the uncertain parameters from Tech_opdict_distributions
-names_param_bio : List of names of the uncertain parameters from Biodict_distributions
-names_param_geo : List of names of the uncertain parameters from Locationdict_distributions
-names_param_phy : List of names of the uncertain parameters from Physicdict_distributions
-problem : Problem format for the sensitivity analysis from Salib library (Saltelli)
'''
# Creation of the problem
# Creating a copy of the distribtutions dictionnaires containing only the
# parameters with distributions(not the constant ones)
# Tech_opdict
Tech_opdict_distributions_input = Tech_opdict_distributions.copy()
to_be_deleted_op = []
# Collecting the parameters that are constant
for param in Tech_opdict_distributions_input:
if Tech_opdict_distributions_input[param][0] == 'unique' or Tech_opdict_distributions_input[param][0] == 'binary':
to_be_deleted_op.append(param)
# Deleting the parameters that are constant
for a in to_be_deleted_op:
Tech_opdict_distributions_input.pop(a)
# Biodict
Biodict_distributions_input = Biodict_distributions.copy()
to_be_deleted_bio = []
for param in Biodict_distributions_input:
if Biodict_distributions_input[param][0] == 'unique' or Biodict_distributions_input[param][0] == 'binary':
to_be_deleted_bio.append(param)
for a in to_be_deleted_bio:
Biodict_distributions_input.pop(a)
# Geography
Locationdict_distributions_input = Locationdict_distributions.copy()
to_be_deleted_bio = []
for param in Locationdict_distributions_input:
if Locationdict_distributions_input[param][0] == 'unique' or Locationdict_distributions_input[param][0] == 'binary':
to_be_deleted_bio.append(param)
for a in to_be_deleted_bio:
Locationdict_distributions_input.pop(a)
# Physics
Physicdict_distributions_input = Physicdict_distributions.copy()
to_be_deleted_bio = []
for param in Physicdict_distributions_input:
if Physicdict_distributions_input[param][0] == 'unique' or Physicdict_distributions_input[param][0] == 'binary':
to_be_deleted_bio.append(param)
for a in to_be_deleted_bio:
Physicdict_distributions_input.pop(a)
# Collecting names, bounds , dists to create Saltelli problem
names_param = []
bounds = []
dists = []
# 1) Operational
names_param_op = []
count_col = -1
col_with_triang = [] # Triangular distributions will need post processing
actual_lower_bounds = []
for param in Tech_opdict_distributions_input:
count_col += 1
names_param_op.append(param)
distrib = Tech_opdict_distributions_input[param][0]
dists.append(distrib)
if distrib == 'unif': # then bounds = upper bound, lower bound
bounds.append([Tech_opdict_distributions_input[param]
[1][1], Tech_opdict_distributions_input[param][1][2]])
elif distrib == 'norm': # then bounds = mean, sd
bounds.append([Tech_opdict_distributions_input[param]
[1][3], Tech_opdict_distributions_input[param][1][4]])
elif distrib == 'triang': # then bounds = width, location of the peak in % of width, Assume lower bound = 0
width = (Tech_opdict_distributions_input[param][1][2]
- Tech_opdict_distributions_input[param][1][1])
peak_lowerbound = (Tech_opdict_distributions_input[param][1][3]
- Tech_opdict_distributions_input[param][1][1])
bounds.append([width, peak_lowerbound/width])
# Collect column with triang distribution to shift the values eventually (otherwise lower bound=0)
col_with_triang.append(count_col)
actual_lower_bounds.append(Tech_opdict_distributions_input[param][1][1])
# 2) Biological
names_param_bio = []
for param in Biodict_distributions_input:
count_col += 1
names_param_bio.append(param)
distrib = Biodict_distributions_input[param][0]
dists.append(distrib)
if distrib == 'unif': # then bounds = upper bound, lower bound
bounds.append([Biodict_distributions_input[param][1]
[1], Biodict_distributions_input[param][1][2]])
elif distrib == 'norm': # then bounds = mean, sd
bounds.append([Biodict_distributions_input[param][1]
[3], Biodict_distributions_input[param][1][4]])
elif distrib == 'triang': # then bounds = width, location of the peak in % of width, Assume lower bound = 0
width = (Biodict_distributions_input[param][1][2]
- Biodict_distributions_input[param][1][1])
peak_lowerbound = (Biodict_distributions_input[param][1][3]
- Biodict_distributions_input[param][1][1])
bounds.append([width, peak_lowerbound/width])
# Collect column with triangle distribution to shift the values eventually (otherwise lower bound=0)
col_with_triang.append(count_col)
actual_lower_bounds.append(Biodict_distributions_input[param][1][1])
# 3) Geography
names_param_geo = []
for param in Locationdict_distributions_input:
count_col += 1
names_param_geo.append(param)
distrib = Locationdict_distributions_input[param][0]
dists.append(distrib)
if distrib == 'unif': # then bounds = upper bound, lower bound
bounds.append([Locationdict_distributions_input[param]
[1][1], Locationdict_distributions_input[param][1][2]])
elif distrib == 'norm': # then bounds = mean, sd
bounds.append([Locationdict_distributions_input[param]
[1][3], Locationdict_distributions_input[param][1][4]])
elif distrib == 'triang': # then bounds = width, location of the peak in % of width, Assume lower bound = 0
width = (Locationdict_distributions_input[param][1][2]
- Locationdict_distributions_input[param][1][1])
peak_lowerbound = (Locationdict_distributions_input[param][1][3]
- Locationdict_distributions_input[param][1][1])
bounds.append([width, peak_lowerbound/width])
# Collect column with triang distribution to shift the values eventually (otherwise lower bound=0)
col_with_triang.append(count_col)
actual_lower_bounds.append(Locationdict_distributions_input[param][1][1])
# 3) Physics
names_param_phy = []
for param in Physicdict_distributions_input:
distrib = Physicdict_distributions_input[param][0]
dists.append(distrib)
count_col += 1
names_param_phy.append(param)
if distrib == 'unif': # then bounds = upper bound, lower bound
bounds.append([Physicdict_distributions_input[param]
[1][1], Physicdict_distributions_input[param][1][2]])
elif distrib == 'norm': # then bounds = mean, sd
bounds.append([Physicdict_distributions_input[param]
[1][3], Physicdict_distributions_input[param][1][4]])
elif distrib == 'triang': # then bounds = width, location of the peak in % of width, Assume lower bound = 0
width = (Physicdict_distributions_input[param][1][2]
- Physicdict_distributions_input[param][1][1])
peak_lowerbound = (Physicdict_distributions_input[param][1][3] - Physicdict_distributions_input[param][1][1])
bounds.append([width, peak_lowerbound/width])
# Collect column with triang distribution to shift the values eventually (otherwise lower bound=0)
col_with_triang.append(count_col)
actual_lower_bounds.append(Physicdict_distributions_input[param][1][1])
names_param = names_param_op + names_param_bio + names_param_geo + names_param_phy
problem = {'num_vars': len(names_param), # number of variables
'names': names_param,
'bounds': bounds,
'dists': dists}
if type_sens == 'SOBOL':
sample = SALib.sample.saltelli.sample(problem,
size,
calc_second_order=False)
if type_sens == 'FAST':
sample = SALib.sample.fast_sampler.sample(problem,
size)
# Shift the values for the triangular distributions, otherwise lowerbound=0
for index_col in range(len(col_with_triang)):
sample[:, col_with_triang[index_col]] = sample[:,col_with_triang[index_col]]+actual_lower_bounds[index_col]
return sample, names_param, names_param_op, names_param_bio, names_param_geo, names_param_phy, problem
def final_function_simulations(dict_mono_technosphere_lcas,
Tech_opdict,
Biodict,
Locationdict,
Physicdict,
Tech_opdict_distributions,
Biodict_distributions,
Locationdict_distributions,
Physicdict_distributions,
LCIdict,
size,
months_suitable_for_cultivation,
fraction_maxyield,
elemental_contents,
fishfeed_table,
methods,
categories_contribution,
processes_in_categories,
type_sens):
'''Function which calls all other functions and generates the LCA results,
uncertainty, sensitivity and contribution analysis.
Inputs:
#All normal parameters dictionnaries:
-Tech_opdict
-Biodict
-Locationdict
-Physicdict
# All parameters distributions dictionnaries :
-Tech_opdict_distributions,
-Biodict_distributions,
-Locationdict_distributions,
-Physicdict_distributions
#LCIdict: the LCI dictionnary
#size : Size of the sample
( Final number of combiantions =size*number uncertain parameters for FAST,
size*(number uncertain parameters+2) for Sobol)
#months_suitable_for_cultivation : Months for cultivation ;
list of month numbers : [a,b,c]
#fraction_maxyield : Fraction of the maximum yield achieved ; .
#elemental_contents : DataFrame with elemental compositons of macronutrients
#fishfeed_table : DataFrame with fish feed composition
#methods : List of Impact categories to apply for the LCA
#categories_contribution : Names of process categories considered for the contribution analysis
#processes_in_categories : List of processes to assign to categories (same order)
#type_sens: "SOBOL" or "FAST"
Outputs :
#sample : Randomly generated sample. Array with 1 row = 1 combination of uncertain parameters (1 iteration)
#problem_sobol_FAST : Sobol or Fast problem as generated by SAlib
#results_table_df : Final dataframe with 1 row per simulation (iteration).
Each row contains the values of the uncertain parameters, the LCI figures,
key figures about the simulation (yields etc.) and the LCIA.
#results_table : Same but as an numpy array
#results_sobol_fast : List of lists of type : [name impact category, Dataframe with indexes for each index]
sensitivity indexes for uncertain parameters for one impact category
#list_tables_contribution_df_melted : List of melted dataframes with
each dataframe containing contributions for each processes for one impact category
#list_tables_contribution_abs_df_melted : List of melted dataframes with
each dataframe containing contributions for each processes for one impact category
Contribution calculated as share oh the sum of absolute values.
#all_methods_contribution_abs_df_melted : Dataframes merging the previous ones (all impact categories)
#list_tables_contribution_df : Same as previous but not melted
#list_opti_perfo : list of performances obtained by the optimization
algorithnm for each iteration. (Obsolete)
#result_LCI_rescaled : Dataframe with 1 row per simulation (iteration)
but LCI figures are scaled to 1kg of dried biomass instead of 1 kg of molecule
#sensi_multi_melt : Dataframe with Total_order Sensitivity index for each parameter and each impact category
#desc_stat_results : Dataframe with statitstical description of the simulation's outputs.
#total_desc_stat_contri_df : Dataframe with statitstical description of the contribution analysis.
'''
# Columns that should not be processed numerically
columns_not_float = ['Nsource', 'night_monitoring',
'Bio_class', 'market_for_substitution']
# Columns that are not LCI nor LCIA but other simulation's outputs
names_suppl_info = ['bioact_molec_dbio',
'Areal productivity kg.m-2.d-1',
'tube length m',
'PBR volume m3',
'exchange area m2',
'total cooling (thermal kWh)',
'Volumetric productivity kg.L-2.d-1',
'Total production kg dw.m-2',
'Total production harvested kg dw.m-2',
'Total_production_loss_kg dw.m-2',
'Conc wastewater g N.L-1',
'Conc wastewater g P.L-1',
'Conc wastewater g K.L-1',
'Conc wastewater g Mg.L-1',
'Conc wastewater g dw.L-1',
'Conc wastewater g C.L-1',
'Conc wastewater g S.L-1',
' min_centrifugation_rate_m3_h',
'max_centrifugation_rate_m3_h',
'Total volume centrifuged m3'
]
# Creates a sample and a saltelli problem with the function
sampling_res = sampling_func(Tech_opdict_distributions,
Biodict_distributions,
Locationdict_distributions,
Physicdict_distributions,
size,
type_sens)
sample = sampling_res[0]
names_param = sampling_res[1]
names_param_op = sampling_res[2]
names_param_bio = sampling_res[3]
names_param_geo = sampling_res[4]
names_param_phy = sampling_res[5]
problem_sobol_FAST = sampling_res[6]
# Initialize variables that will receive results
results_table = np.empty((0,
len(names_param)+len(LCIdict) +len(names_suppl_info)+len(columns_not_float)+len(methods)),
dtype=float)
# list of tables whih will contain the conribution of each process category to each impact category
list_tables_contribution = [np.zeros((len(sample),
len(categories_contribution)),
dtype=float) for i in range(len(methods))]
# Contributions calculated by dividing by the sum of the absolute values
# We will only keep this one eventually
list_tables_contribution_abs=[np.zeros((len(sample),
len(categories_contribution)),
dtype=float) for i in range(len(methods))]
print('ok1')
count = -1
list_opti_perfo = []
for param_set in sample: # One set of uncertain parameters
# Copy that will be modified for calculation of the LCA for this set of parameters
new_dict_mono_technosphere_lcas = copy.deepcopy(dict(dict_mono_technosphere_lcas))
count += 1
# Update the dictionnaries whith the values of the sample
# 1 ) Tech_opdict
for param in Tech_opdict:
# We browse the parameters to look for the uncertain ones
# which need to be updated
for index in range(len(names_param_op)):
# Looking for the corresponding parameter in the saltelli set
if names_param[index] == param:
# Then it is an uncertain paramater and its value is
# updated with the one from the generated sample
Tech_opdict[param] = param_set[index]
# We will do the same thing for other dictionnaries but there is no need to browse all possible parameters,
# just the ones from other dictionnaries which are left.
new_start = len(names_param_op)
# 2) Biodict
for param in Biodict:
# We browse the parameters to look for the variable ones
# which need to be updated
for index in range(new_start, new_start+len(names_param_bio)):
# Looking for the corresponding parameter in the saltelli set
if names_param[index] == param:
Biodict[param] = param_set[index]
new_start = new_start+len(names_param_bio)
# 3) Locationdict
for param in Locationdict:
# We browse the parameters to look for the variable ones
# that need to be updated
for index in range(new_start, new_start+len(names_param_geo)):
# Looking for the corresponding parameter in the saltelli set
if names_param[index] == param:
Locationdict[param] = param_set[index]
new_start = new_start+len(names_param_geo)
# 4) Physicdict
for param in Physicdict:
# We browse the parameters to look for the variable ones
# that need to be updated
for index in range(new_start, new_start+len(names_param_phy)):
# Looking for the corresponding parameter in the saltelli set
if names_param[index] == param:
Physicdict[param] = param_set[index]
# Calculates LCI for this set with the updated dictionnaries
LCI = LCI_one_strain_uniquevalues(Biodict,
Physicdict,
Tech_opdict,
Locationdict,
LCIdict,
months_suitable_for_cultivation,
fraction_maxyield,
fishfeed_table,
elemental_contents)
# Collecting the results of the function
LCIdict_collected = LCI[0]
surfaceyield = LCI[1] # kg dw .d-1
volumetricyield = LCI[2] # kg dw.L-1.d-1
optimization_performance = LCI[3] # kg dw.d-1
total_production_kg_dw = LCI[6] # kg dw
total_production_harvested_kg_dw = LCI[7] # kg dw
total_production_loss_kg_dw = LCI[8] # kg dw
conc_waste_water_nutrient_N = LCI[9] # kg.m-3
conc_waste_water_nutrient_P = LCI[10] # kg.m-3
conc_waste_water_nutrient_K = LCI[11] # kg.m-3
conc_waste_water_nutrient_Mg = LCI[12] # kg.m-3
conc_waste_water_biomass = LCI[13] # kg.m-3
conc_waste_water_C = LCI[14] # kg.m-3
conc_waste_water_nutrient_S = LCI[15] # kg.m-3
bioact_molec_dbio = LCI[16] # .
min_centrifugation_rate_m3_h = LCI[17]
max_centrifugation_rate_m3_h = LCI[18]
totalwater_centrifuged = LCI[19]
totalwater_centrifuged = LCI[19]
tubelength = LCI[20]
facilityvolume = LCI[21]
exchange_area = LCI[22]
totalcooling_thermal = LCI[23]
list_opti_perfo.append(optimization_performance)
# List containing simualtions values which are not LCI or LCIA
# (same order as their names in names_suppl_info)
values_simu = [bioact_molec_dbio,
surfaceyield,
tubelength,
facilityvolume,
exchange_area,
totalcooling_thermal,
volumetricyield,
total_production_kg_dw,
total_production_harvested_kg_dw,
total_production_loss_kg_dw,
conc_waste_water_nutrient_N,
conc_waste_water_nutrient_P,
conc_waste_water_nutrient_K,
conc_waste_water_nutrient_Mg,
conc_waste_water_biomass,
conc_waste_water_C,
conc_waste_water_nutrient_S,
min_centrifugation_rate_m3_h,
max_centrifugation_rate_m3_h,
totalwater_centrifuged]
values_LCI = [LCIdict_collected[i] for i in LCIdict_collected]
names_LCI = [a for a in LCIdict_collected]
# Now calculating the LCIA for this row
# Calling the fuction that calculates the impacts associated to the emissions
# of the wastewater treatment of 1 cubic meter of the wastewater
list_sum_impacts_biosphere_waste_water= waste_water_impact_biosphere(
conc_waste_water_nutrient_N,
conc_waste_water_nutrient_P,
conc_waste_water_C,
conc_waste_water_nutrient_Mg,
conc_waste_water_nutrient_K,
conc_waste_water_nutrient_S,
methods)
# Adding the biosphere flows for 1 cubic meter of wastewater
new_dict_mono_technosphere_lcas['Wastewater treatment PBR'] =[a+b for (a,b) in zip(new_dict_mono_technosphere_lcas['Wastewater treatment PBR'],list_sum_impacts_biosphere_waste_water) ]
# Multipliying each impact per unit of input processes by the input amount in the calculated LCI
for i in new_dict_mono_technosphere_lcas:
new_dict_mono_technosphere_lcas[i]=[LCIdict_collected[i]*a for a in new_dict_mono_technosphere_lcas[i]]
# Calculating total LCA by summing
list_LCA_res =[]
for meth_index in range(len(methods)):
sum_impact = sum([new_dict_mono_technosphere_lcas[flow][meth_index] for flow in new_dict_mono_technosphere_lcas ])
list_LCA_res.append(sum_impact)
# Row to add to the results dataframe (Uncertain parameters values, LCI figures, Other values about the simulation, LCIA)
row_to_add = list(param_set) + values_LCI + values_simu + list_LCA_res
# Adding this new row
results_table = np.vstack((results_table, row_to_add))
names_methods_adjusted = [a[-1] for a in methods]
names_for_df = names_param + names_LCI + names_suppl_info + names_methods_adjusted
results_table_df = pd.DataFrame(results_table, columns=names_for_df)
# Contribution per process category
for process in new_dict_mono_technosphere_lcas :
# browsing the categories
for index_content_categ in range(len(processes_in_categories)):
# if this process belongs to category
if process in processes_in_categories[index_content_categ]:
# Then we add this value to the corresponding colum in the list_tables_contribution
for meth_index in range(len(methods)): #we do this for all methods
list_tables_contribution[meth_index][count, index_content_categ] = (
list_tables_contribution[meth_index][count, index_content_categ]
+ new_dict_mono_technosphere_lcas[process][meth_index])
print('ok2')
# Contribution
# Calculating % contribution
#Calulating contribution sum
for index_method in range(len(methods)):
for index_row in range(len(sample)):
sumrow = np.sum(list_tables_contribution[index_method][index_row])
sumrow_abs = sum([abs(a) for a in list_tables_contribution[index_method][index_row,:]])
for index_col in range(len(categories_contribution)):
list_tables_contribution_abs[index_method][index_row][index_col] =(
list_tables_contribution[index_method][index_row][index_col]
*100/sumrow_abs)
list_tables_contribution[index_method][index_row][index_col] =(
list_tables_contribution[index_method][index_row][index_col]
*100/sumrow)
# Conversion to Dataframes
list_tables_contribution_df = [pd.DataFrame(
table, columns=categories_contribution) for table in list_tables_contribution]
list_tables_contribution_abs_df=[pd.DataFrame(
table, columns=categories_contribution) for table in list_tables_contribution_abs]
# Statistical description of contributions
total_desc_stat_contri_df =pd.DataFrame()
# For all methods
for index_meth in range(len(methods)):
# statisitical description of the contributions
desc_stat_contrib=list_tables_contribution_abs_df[index_meth].describe()
# add a speration row to start a new method
separation_row = {i:'' for i in desc_stat_contrib.columns}
# Change First key of the new row to name of method
firstkey = list(separation_row.keys())[0]
separation_row[firstkey]=methods[index_meth][-1]
# add this separation row nrow
total_desc_stat_contri_df = total_desc_stat_contri_df.append(separation_row, ignore_index=True)
# Add the statistical description
total_desc_stat_contri_df=pd.concat([total_desc_stat_contri_df,
desc_stat_contrib])
# Melting contribution tables for density plots
list_tables_contribution_df_melted = [pd.melt(dataframe, value_vars=categories_contribution,
var_name="Processes", value_name="% of total impact")for dataframe in list_tables_contribution_df]
list_tables_contribution_abs_df_melted = [pd.melt(dataframe, value_vars=categories_contribution,
var_name="Processes", value_name="% of total impact")for dataframe in list_tables_contribution_abs_df]
# Sensitivity
# Cleaning Dataframe, change object type to float except for qualitative variables
columnnames_without_quali = [a for a in names_for_df if a not in columns_not_float]
results_table_df[columnnames_without_quali] = results_table_df[columnnames_without_quali].astype(float)
# Calculating sensitivity indexes for each method
results_sobol_fast = []
for IC in methods: # For each column of LCIA results = for each method (Impact category)
output = results_table_df.loc[:, IC[-1]] # Calls the result column for this impact category
# Rearranging into a monodimensional array
array_output = pd.DataFrame(output).to_numpy()
flat_array = array_output.flat
output_list_clean = []
for a in flat_array:
output_list_clean.append(a)
output_clean = np.array(output_list_clean)
# Performing the sensitivy analysis
if type_sens == 'SOBOL':
sobol_res = SALib.analyze.sobol.analyze(
problem_sobol_FAST, output_clean, calc_second_order=False)
elif type_sens == 'FAST':
sobol_res = SALib.analyze.fast.analyze(
problem_sobol_FAST, output_clean)
results_sobol_fast.append([IC[-1], sobol_res])
# Sensitivity
# Inititalizing table for sensitivy indices for all methods
sensi_multi = pd.DataFrame(np.zeros((len(results_sobol_fast[0][1]['ST']),
len(results_sobol_fast))),
columns=[IC[0] for IC in results_sobol_fast])
count=-1
for IC in results_sobol_fast :
count += 1
sensi_multi.iloc[:,count]=IC[1]['ST']
# Calculating shares of total variance for all paramters, for all methods
for col in range(sensi_multi.shape[1]):
sumcol=sum(sensi_multi.iloc[:,col])
sensi_multi.iloc[:,col]=sensi_multi.iloc[:,col]/sumcol
sensi_multi['Parameter'] = names_param
sensi_multi_melt=pd.melt(sensi_multi,id_vars=['Parameter'],value_vars=[meth[0] for meth in results_sobol_fast],var_name="Impact_Category")
# Statistical description of LCI values
desc_stat_results = results_table_df.describe()
toexclude = ['night_monitoring',
'Bio_class',
'market_for_substitution',
'Nsource',
' min_centrifugation_rate_m3_h',
'max_centrifugation_rate_m3_h']
desc_stat_results = desc_stat_results[[a for a in names_for_df if a not in toexclude ]]
# Creating a table combining contibutions for all categories
all_methods_contribution_df_melted=pd.DataFrame()
all_methods_contribution_abs_df_melted=pd.DataFrame()
for i in range(len(methods)):
# Combine all impact categories in one table for plot R
tablemeth_copy=list_tables_contribution_df_melted[i].copy()
tablemeth_copy['Impact Category']=methods[i][-1]
all_methods_contribution_df_melted=pd.concat([all_methods_contribution_df_melted,tablemeth_copy])
tablemeth_copy_abs=list_tables_contribution_abs_df_melted[i].copy()
tablemeth_copy_abs['Impact Category']=methods[i][-1]
all_methods_contribution_abs_df_melted=pd.concat([all_methods_contribution_abs_df_melted,tablemeth_copy_abs])
# Rescaling to 1 kg dw biomass for comparison with literature
result_LCI_rescaled = results_table_df.copy()
quali_col = ['night_monitoring', 'Bio_class',
'market_for_substitution', 'Nsource']
names_LCI_without_quali = [a for a in names_LCI if a not in quali_col]
if 'water_after_drying' in result_LCI_rescaled.columns:
columns_to_keep = names_LCI_without_quali + ['bioact_molec_dbio', 'water_after_drying']
else:
columns_to_keep = names_LCI_without_quali + ['bioact_molec_dbio']
result_LCI_rescaled = result_LCI_rescaled[columns_to_keep]
#LCI figures that must be divided by 1/bioact_molec_dbio
list_todivide_1 = ['Electricity drying kWh PBR',
'Electricity cell disruption kWh PBR',
'Feed energy PBR',
'Feed protein PBR',
'Co solvent Extraction PBR',
'Extraction electricity kWh PBR',
'LT Fishmeal PBR',
'Rapeseed oil PBR',
'Wheat PBR',
'Wheat gluten PBR',
'Fish oil PBR',
'Soyabean meal PBR',
'Poultry meal PBR',
'Hemoglobin meal PBR']
# The others must be divided by (1/bioact_molec_dbio)*(1/(1-water_after_drying)
list_todivide_2 = [a for a in names_LCI_without_quali if a not in list_todivide_1]
result_LCI_rescaled.loc[:, list_todivide_1] = result_LCI_rescaled.loc[:, list_todivide_1].div(
1/result_LCI_rescaled['bioact_molec_dbio'], axis=0)
if 'water_after_drying' in result_LCI_rescaled.columns:
result_LCI_rescaled.loc[:, list_todivide_2] = result_LCI_rescaled.loc[:, list_todivide_2].div(
((1/result_LCI_rescaled['bioact_molec_dbio'])*(1/(1-result_LCI_rescaled['water_after_drying']))), axis=0)
else: # If it is not an uncertain parameter it will not appear in the table
result_LCI_rescaled.loc[:, list_todivide_2] = result_LCI_rescaled.loc[:, list_todivide_2].div(
((1/result_LCI_rescaled['bioact_molec_dbio'])*(1/(1-Tech_opdict['water_after_drying']))), axis=0)
# Adding the interesting columns to the rescaled LCI table
col_to_concat = names_param + names_suppl_info + names_methods_adjusted
part_to_concat = results_table_df[col_to_concat]
# Merging param and LCI with LCA dataframes
result_LCI_rescaled = pd.concat([result_LCI_rescaled, part_to_concat], axis=1)
# Calculating centrifugating energy per m3 to compare to literature
result_LCI_rescaled['centrifuge_energy_perm3'] = (result_LCI_rescaled['Electricity centrifuge kWh PBR']
* result_LCI_rescaled['biomassconcentration'])
result_LCI_rescaled['centrifuge_energy_perm3'] = (result_LCI_rescaled['Electricity centrifuge kWh PBR']
* result_LCI_rescaled['biomassconcentration'])
return (sample,
problem_sobol_FAST,
results_table_df,
results_table,
results_sobol_fast,
list_tables_contribution_df_melted,
list_tables_contribution_abs_df_melted,
all_methods_contribution_abs_df_melted,
list_tables_contribution_df,
list_opti_perfo,
result_LCI_rescaled,
sensi_multi_melt,
desc_stat_results,
total_desc_stat_contri_df)
| 38.680174 | 192 | 0.595758 |
170ccce2431894ea5943b1b784e6683e3dfa278d | 4,159 | py | Python | tests/python/gaia-ui-tests/gaiatest/apps/settings/regions/device_info.py | ahal/gaia | f90af6ed55795d13d16e1a4d7745643a124b4001 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/apps/settings/regions/device_info.py | ahal/gaia | f90af6ed55795d13d16e1a4d7745643a124b4001 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/apps/settings/regions/device_info.py | ahal/gaia | f90af6ed55795d13d16e1a4d7745643a124b4001 | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest.apps.base import Base
class DeviceInfo(Base):
_phone_number_locator = (By.ID, 'deviceInfo-msisdns')
_model_locator = (By.CSS_SELECTOR, '#about small[data-name="deviceinfo.hardware"]')
_software_locator = (By.CSS_SELECTOR, '#about small[data-name="deviceinfo.software"]')
_more_info_button_locator = (By.CSS_SELECTOR, 'a[href="#about-moreInfo"]')
def __init__(self, marionette):
Base.__init__(self, marionette)
self.wait_for_element_displayed(*self._phone_number_locator)
@property
def phone_number(self):
return self.marionette.find_element(*self._phone_number_locator).text
@property
def model(self):
return self.marionette.find_element(*self._model_locator).text
@property
def software(self):
return self.marionette.find_element(*self._software_locator).text
def tap_more_info(self):
self.marionette.find_element(*self._more_info_button_locator).tap()
return self.MoreInfo(self.marionette)
class MoreInfo(Base):
_os_version_locator = (By.CSS_SELECTOR, '#about-moreInfo small[data-name="deviceinfo.os"]')
_hardware_revision_locator = (By.CSS_SELECTOR, '#about-moreInfo small[data-name="deviceinfo.hardware"]')
_mac_address_locator = (By.CSS_SELECTOR, '#about-moreInfo small[data-name="deviceinfo.mac"]')
_imei1_locator = (By.CSS_SELECTOR, '#deviceInfo-imeis span[data-slot="0"]')
_imei2_locator = (By.CSS_SELECTOR, '#deviceInfo-imeis span[data-slot="1"]')
_iccid_locator = (By.ID, 'deviceInfo-iccids')
_platform_version_locator = (By.CSS_SELECTOR, '#about-moreInfo small[data-name="deviceinfo.platform_version"]')
_build_id_locator = (By.CSS_SELECTOR, '#about-moreInfo small[data-name="deviceinfo.platform_build_id"]')
_build_number_locator = (By.CSS_SELECTOR, '#about-moreInfo small[data-name="deviceinfo.build_number"]')
_update_channel_locator = (By.CSS_SELECTOR, '#about-moreInfo small[data-name="app.update.channel"]')
_git_commit_timestamp_locator = (By.ID, 'gaia-commit-date')
_git_commit_hash_locator = (By.ID, 'gaia-commit-hash')
def __init__(self, marionette):
Base.__init__(self, marionette)
self.wait_for_element_displayed(*self._os_version_locator)
@property
def os_version(self):
return self.marionette.find_element(*self._os_version_locator).text
@property
def hardware_revision(self):
return self.marionette.find_element(*self._hardware_revision_locator).text
@property
def mac_address(self):
return self.marionette.find_element(*self._mac_address_locator).text
@property
def imei1(self):
return self.marionette.find_element(*self._imei1_locator).text.split()[2]
@property
def imei2(self):
return self.marionette.find_element(*self._imei2_locator).text.split()[2]
@property
def iccid(self):
return self.marionette.find_element(*self._iccid_locator).text
@property
def platform_version(self):
return self.marionette.find_element(*self._platform_version_locator).text
@property
def build_id(self):
return self.marionette.find_element(*self._build_id_locator).text
@property
def build_number(self):
return self.marionette.find_element(*self._build_number_locator).text
@property
def update_channel(self):
return self.marionette.find_element(*self._update_channel_locator).text
@property
def git_commit_timestamp(self):
return self.marionette.find_element(*self._git_commit_timestamp_locator).text
@property
def git_commit_hash(self):
return self.marionette.find_element(*self._git_commit_hash_locator).text
| 40.77451 | 119 | 0.693195 |
303d22ee80fad4cdd23a5dd47e16997debe71a1c | 1,069 | py | Python | ifmap/metadata.py | kiran-vemuri/ifmap-python-client | 06198cface6421c50c7352009b8370713a414db5 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2015-01-21T11:52:27.000Z | 2017-01-24T05:13:55.000Z | ifmap/metadata.py | ITI/ifmap-python-client | 15c9e12454a0e277a6668e9c469eb401e3fae100 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ifmap/metadata.py | ITI/ifmap-python-client | 15c9e12454a0e277a6668e9c469eb401e3fae100 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2015-03-11T07:51:30.000Z | 2016-10-12T06:08:08.000Z | # Copyright 2011, Infoblox, All Rights Reserved
#
# Open Source, see LICENSE
#
from util import attr, link_ids
class MetadataBase:
""" foundation class for metadata factory """
pass
class Metadata(MetadataBase):
"""
Metadata factory
"""
__ns_uri = ''
def __init__(self, name, value=None, attributes=None, ns_prefix=None, ns_uri=None, elements=''):
self.__value = value
self.__attributes = attributes
self.__elements = elements
if ns_prefix:
self.__name = ns_prefix + ':' + name
elif not ns_uri:
self.__name = 'meta:' + name
if ns_uri:
if ns_prefix:
self.__ns_uri = ' xmlns:' + ns_prefix + '="' + ns_uri + '"'
else:
self.__ns_uri = ' xmlns="' + ns_uri + '"'
def __str__(self):
__attr = ' '+ attr(self.__attributes)
return '<metadata><' + self.__name + self.__ns_uri + __attr + '>' + self.__value + self.__elements + '</' + self.__name + '></metadata>'
| 29.694444 | 144 | 0.552853 |
1aedc1c38660cef8db4292d5612165f04c29f662 | 6,031 | py | Python | lightreid/data/datasets/reid_samples.py | nataliamiccini/light-reid | 1bd823849e4a40c9a17497bb7d7ec6097635f4d6 | [
"MIT"
] | 296 | 2020-07-20T02:11:34.000Z | 2022-03-27T12:42:11.000Z | lightreid/data/datasets/reid_samples.py | nataliamiccini/light-reid | 1bd823849e4a40c9a17497bb7d7ec6097635f4d6 | [
"MIT"
] | 33 | 2020-08-27T03:20:43.000Z | 2022-03-14T11:25:07.000Z | lightreid/data/datasets/reid_samples.py | nataliamiccini/light-reid | 1bd823849e4a40c9a17497bb7d7ec6097635f4d6 | [
"MIT"
] | 61 | 2020-07-20T06:19:22.000Z | 2022-03-28T07:10:39.000Z | """
@author: wangguanan
@contact: guan.wang0706@gmail.com
"""
import os
import os.path as osp
from prettytable import PrettyTable
import tarfile
import zipfile
import time
import sys
import errno
import copy
class ReIDSamples:
'''
An abstract class representing a Re-ID samples.
Attrs:
train (list): contains tuples of (img_path(s), pid, camid).
query (list): contains tuples of (img_path(s), pid, camid).
gallery (list): contains tuples of (img_path(s), pid, camid).
'''
def __init__(self, train, query, gallery, combineall=False, **kwargs):
if combineall:
print('[{} Combine All] combine train, query and gallery and training set ... ...'.format(self.__class__.__name__))
train += copy.deepcopy(query) + copy.deepcopy(gallery)
if train is not None:
train = self.relabel(train)
self.train, self.query, self.gallery = train, query, gallery
# show information
self.statistics(train=self.train, query=self.query, gallery=self.gallery, combineall=combineall)
def statistics(self, **kwargs):
'''show sample statistics'''
def analyze(samples):
if samples is None:
return None, None, None
pid_num = len(set([sample[1] for sample in samples]))
cid_num = len(set([sample[2] for sample in samples]))
sample_num = len(samples)
return sample_num, pid_num, cid_num
table = PrettyTable([self.__class__.__name__, 'images', 'identities', 'cameras', 'imgs/id', 'imgs/cam', 'imgs/id&cam'])
for key, val in kwargs.items():
if key in ['train', 'query', 'gallery']:
info = analyze(val)
key_str = str(key)
if 'combineall' in kwargs.keys() and kwargs['combineall'] and key == 'train':
key_str += '(combineall)'
img_num, pid_num, cid_num = info
imgs_per_id = round(img_num / float(pid_num), 2) if img_num is not None else None
imgs_per_cam = round(img_num / float(cid_num), 2) if img_num is not None else None
imgs_per_idcam = round(img_num / float(pid_num) / float(cid_num), 2) if img_num is not None else None
table.add_row([str(key_str), str(info[0]), str(info[1]), str(info[2]),
str(imgs_per_id), str(imgs_per_cam), str(imgs_per_idcam)])
print(table)
def os_walk(self, folder_dir):
for root, dirs, files in os.walk(folder_dir):
files = sorted(files, reverse=True)
dirs = sorted(dirs, reverse=True)
return root, dirs, files
def relabel(self, samples):
'''relabel person identities'''
ids = list(set([sample[1] for sample in samples]))
ids.sort()
for sample in samples:
sample[1] = ids.index(sample[1])
return samples
def download_dataset(self, dataset_dir, dataset_url):
"""Downloads and extracts dataset.
Args:
dataset_dir (str): dataset directory.
dataset_url (str): url to download dataset.
"""
if osp.exists(dataset_dir):
return
if dataset_url is None:
raise RuntimeError(
'{} dataset needs to be manually '
'prepared, please follow the '
'document to prepare this dataset'.format(
self.__class__.__name__
)
)
print('Creating directory "{}"'.format(dataset_dir))
self.mkdir_if_missing(dataset_dir)
fpath = osp.join(dataset_dir, osp.basename(dataset_url))
print(
'Downloading {} dataset to "{}"'.format(
self.__class__.__name__, dataset_dir
)
)
self.download_url(dataset_url, fpath)
print('Extracting "{}"'.format(fpath))
try:
tar = tarfile.open(fpath)
tar.extractall(path=dataset_dir)
tar.close()
except:
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(dataset_dir)
zip_ref.close()
print('{} dataset is ready'.format(self.__class__.__name__))
def download_url(self, url, dst):
"""Downloads file from a url to a destination.
Args:
url (str): url to download file.
dst (str): destination path.
"""
from six.moves import urllib
print('* url="{}"'.format(url))
print('* destination="{}"'.format(dst))
def _reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024*duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write(
'\r...%d%%, %d MB, %d KB/s, %d seconds passed' %
(percent, progress_size / (1024*1024), speed, duration)
)
sys.stdout.flush()
urllib.request.urlretrieve(url, dst, _reporthook)
sys.stdout.write('\n')
def mkdir_if_missing(self, dirname):
"""Creates dirname if it is missing."""
if not osp.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def check_before_run(self, required_files):
"""Checks if required files exist before going deeper.
Args:
required_files (str or list): string file name(s).
"""
if isinstance(required_files, str):
required_files = [required_files]
for fpath in required_files:
if not osp.exists(fpath):
raise RuntimeError('"{}" is not found'.format(fpath)) | 35.063953 | 127 | 0.566573 |
c1517c3be953e131f9283f73720240c94637e722 | 1,318 | py | Python | btre-project/listings/migrations/0002_auto_20210227_1314.py | amirzp/btre-project | 270fa639d71df5d3d11c356715e6b134da23b9cd | [
"MIT"
] | 1 | 2021-03-02T11:43:30.000Z | 2021-03-02T11:43:30.000Z | btre-project/listings/migrations/0002_auto_20210227_1314.py | amirzp/btre-project | 270fa639d71df5d3d11c356715e6b134da23b9cd | [
"MIT"
] | null | null | null | btre-project/listings/migrations/0002_auto_20210227_1314.py | amirzp/btre-project | 270fa639d71df5d3d11c356715e6b134da23b9cd | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-02-27 13:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='listings',
name='photo_1',
field=models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='listings',
name='photo_2',
field=models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='listings',
name='photo_3',
field=models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='listings',
name='photo_4',
field=models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='listings',
name='photo_5',
field=models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='listings',
name='photo_main',
field=models.ImageField(upload_to='photos/%Y/%m/%d/'),
),
]
| 29.954545 | 78 | 0.545524 |
d0bf0f7ee341518c4ef31d2c375167e087e7475a | 4,589 | py | Python | image.py | jbilskie/BME547_final | fe29a452259f3ac311ba083f45076ce79de2bec6 | [
"MIT"
] | null | null | null | image.py | jbilskie/BME547_final | fe29a452259f3ac311ba083f45076ce79de2bec6 | [
"MIT"
] | 49 | 2019-04-10T18:16:29.000Z | 2019-04-29T05:51:05.000Z | image.py | jbilskie/BME547_final | fe29a452259f3ac311ba083f45076ce79de2bec6 | [
"MIT"
] | null | null | null | # image.py
# Authors: Jessica Bilskie, Janet Chen, Kevin Chu
# Last Modified: 4/25/19
import base64
import io
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import zipfile
import re
from PIL import Image
def read_img_as_b64(file_path):
""" Reads in image as b64
This function reads in an image and encodes it into
a base64 string so that it can be transmitted over
the internet.
Args:
file_path (str): file directory and name
Returns:
b64_string (str): image represented as a string
"""
with open(file_path, "rb") as img_file:
b64_bytes = base64.b64encode(img_file.read())
b64_string = str(b64_bytes, encoding='utf-8')
return b64_string
def save_b64_img(b64_string, file_path):
""" Saves image file
This function reads in a base64 string and decodes so that
it can be saved.
Args:
b64_string (str): image represented as a string
file_path (str): path of save file
"""
img_bytes = base64.b64decode(b64_string)
with open(file_path, "wb") as out_file:
out_file.write(img_bytes)
return
def b64_to_image(b64_string):
""" Convert b64 string to image
This function takes a b64 string and decodes it into
a color image. The image is represented as a 3-dimensional
np.array where each element represents the pixel intensity
ranging from 0-255. The three dimensions represent red,
blue, and green channels.
Args:
b64_string (str): image represented as a string
Returns:
img (np.array): image represented as RBG intensities
"""
img_bytes = base64.b64decode(b64_string)
img_buf = io.BytesIO(img_bytes)
img = mpimg.imread(img_buf, format='PNG')
# rgb_im = Image.convert("RGB")
# img = rgb_im.save("downloaded_image"+type_extension, quality=95)
return img
def image_to_b64(img):
""" Convert np.array image to b64 string
This function uses the skimage.io.imsave function to convert
an np.array image into a b64 string. The image is saved in
a BytesIO buffer, which is then encoded in base 64 and then
converted into a string.
Args:
img (np.array): image represented as np.array
Returns:
b64_string (string): image represented as base64 string
"""
from skimage.io import imsave
f = io.BytesIO()
imsave(f, img, plugin='pil')
y = base64.b64encode(f.getvalue())
b64_string = str(y, encoding='utf-8')
return b64_string
def is_b64(b64_image):
""" Check if the input is a b64 image
This function decodes and then re-encodes a given string to check
whether it is a valid b64 string.
Args:
b64_image (str): string in question for b64 validity
Returns:
truth (Boolean): whether the string is b64 encoded
"""
truth = False
if isinstance(b64_image, str) is True:
if len(b64_image) != 0:
try:
decode_encode = str(base64.b64encode
(base64.b64decode(b64_image)),
encoding='utf-8')
if decode_encode == b64_image:
truth = True
except:
pass
return truth
def unzip(filename):
"""Unzips file at requested path
Returns unzipped file (as numpy array) and success boolean
Args:
filename (string): image path to unzip
Returns:
imgs (list): list containing image data as base 64 strings
filenames (list): list containing image filenames
success (bool): whether zip was successfully extracted
"""
imgs = []
success = True
zip_files = zipfile.ZipFile(filename, "r")
filenames = zip_files.namelist()
img_filenames = []
j = 0
for i in range(len(filenames)):
file = filenames[i]
# Ignores garbage files in Mac
if not re.search('._', file):
try:
with zip_files.open(file) as img_file:
img_obj = Image.open(img_file)
img_np = np.array(img_obj)
img_obj.close()
imgs.append(image_to_b64(img_np))
img_filenames.append(file)
except:
success = False
img_filenames.append(filename)
# Empty lists are false
if not imgs:
success = False
# zip_files.close()
return imgs, img_filenames, success
if __name__ == '__main__':
a = read_img_as_b64("test_image/test5.png")
print(a)
| 27.315476 | 70 | 0.629113 |
44fa491bef64cfb395232ed9298b20fe6f2d8ac3 | 14,346 | py | Python | pyteomics/auxiliary/structures.py | donnyyy777/pyteomics | dc0cafb16823767457fa52342574e3fa1c61b970 | [
"Apache-2.0"
] | null | null | null | pyteomics/auxiliary/structures.py | donnyyy777/pyteomics | dc0cafb16823767457fa52342574e3fa1c61b970 | [
"Apache-2.0"
] | null | null | null | pyteomics/auxiliary/structures.py | donnyyy777/pyteomics | dc0cafb16823767457fa52342574e3fa1c61b970 | [
"Apache-2.0"
] | null | null | null | import re
from collections import defaultdict, Counter
import warnings
try:
basestring
PY2 = True
except NameError:
basestring = (str, bytes)
PY2 = False
_UNIT_CV_INTERN_TABLE = dict()
def clear_unit_cv_table():
"""Clear the module-level unit name and
controlled vocabulary accession table.
"""
_UNIT_CV_INTERN_TABLE.clear()
def _intern_unit_or_cv(unit_or_cv):
"""Intern `unit_or_cv` in :const:`~._UNIT_CV_INTERN_TABLE`, potentially
keeping a reference to the object stored for the duration of the program.
Parameters
----------
unit_or_cv : object
The value to intern
Returns
-------
object:
The object which `unit_or_cv` hash-equals in :const:`~._UNIT_CV_INTERN_TABLE`.
"""
if unit_or_cv is None:
return None
try:
return _UNIT_CV_INTERN_TABLE[unit_or_cv]
except KeyError:
_UNIT_CV_INTERN_TABLE[unit_or_cv] = unit_or_cv
return _UNIT_CV_INTERN_TABLE[unit_or_cv]
class PyteomicsError(Exception):
"""Exception raised for errors in Pyteomics library.
Attributes
----------
message : str
Error message.
"""
def __init__(self, msg, *values):
self.message = msg
self.values = values
def __str__(self):
if not self.values:
return "Pyteomics error, message: %s" % (repr(self.message),)
else:
return "Pyteomics error, message: %s %r" % (repr(self.message), self.values)
class Charge(int):
"""A subclass of :py:class:`int`. Can be constructed from strings in "N+"
or "N-" format, and the string representation of a :py:class:`Charge` is
also in that format.
"""
def __new__(cls, *args, **kwargs):
try:
return super(Charge, cls).__new__(cls, *args)
except ValueError as e:
if isinstance(args[0], basestring):
try:
num, sign = re.match(r'^(\d+)(\+|-)$', args[0]).groups()
return super(Charge, cls).__new__(cls, sign + num, *args[1:], **kwargs)
except Exception:
pass
raise PyteomicsError(*e.args)
def __str__(self):
return str(abs(self)) + '+-'[self < 0]
class Ion(str):
"""Represents an Ion, right now just a subclass of String.
"""
_pattern = r'([abcxyz]\d+(\-H2O|\-NH3)?)([\+|-]\d+)' # "y2-H2O+1"
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], basestring):
try:
self.ion_type, self.neutral_loss, self.charge = re.match(self._pattern, args[0]).groups()
except Exception:
raise PyteomicsError("Malformed ion string, must match the regex {!r}".format(self._pattern))
class ChargeList(list):
"""Just a list of :py:class:`Charge`s. When printed, looks like an
enumeration of the list contents. Can also be constructed from such
strings (e.g. "2+, 3+ and 4+").
"""
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], basestring):
delim = r'(?:,\s*)|(?:\s*and\s*)'
self.extend(map(Charge, re.split(delim, args[0])))
else:
try:
super(ChargeList, self).__init__(
sorted(set(args[0])), *args[1:], **kwargs)
except Exception:
super(ChargeList, self).__init__(*args, **kwargs)
self[:] = map(Charge, self)
def __str__(self):
if len(self) > 1:
return ', '.join(map(str, self[:-1])) + ' and {}'.format(self[-1])
elif self:
return str(self[0])
return super(ChargeList, self).__str__()
def _parse_charge(s, list_only=False):
if not list_only:
try:
return Charge(s)
except PyteomicsError:
pass
return ChargeList(s)
def _parse_ion(ion_text):
try:
return Ion(ion_text)
except Exception as e:
warnings.warn('Could not parse ion string: {} ({})'.format(ion_text, e.args[0]))
class BasicComposition(defaultdict, Counter):
"""A generic dictionary for compositions.
Keys should be strings, values should be integers.
Allows simple arithmetics."""
def __init__(self, *args, **kwargs):
defaultdict.__init__(self, int)
Counter.__init__(self, *args, **kwargs)
for k, v in list(self.items()):
if not v:
del self[k]
def __str__(self):
return '{}({})'.format(type(self).__name__, dict.__repr__(self))
def __repr__(self):
return str(self)
def _repr_pretty_(self, p, cycle):
if cycle: # should never happen
p.text('{} object with a cyclic reference'.format(type(self).__name__))
p.text(str(self))
def __add__(self, other):
result = self.copy()
for elem, cnt in other.items():
result[elem] += cnt
return result
def __iadd__(self, other):
for elem, cnt in other.items():
self[elem] += cnt
return self
def __radd__(self, other):
return self + other
def __sub__(self, other):
result = self.copy()
for elem, cnt in other.items():
result[elem] -= cnt
return result
def __isub__(self, other):
for elem, cnt in other.items():
self[elem] -= cnt
return self
def __rsub__(self, other):
return (self - other) * (-1)
def __mul__(self, other):
if not isinstance(other, int):
raise PyteomicsError('Cannot multiply Composition by non-integer',
other)
return type(self)({k: v * other for k, v in self.items()})
def __imul__(self, other):
if not isinstance(other, int):
raise PyteomicsError('Cannot multiply Composition by non-integer',
other)
for elem in self:
self[elem] *= other
return self
def __rmul__(self, other):
return self * other
def __eq__(self, other):
if not isinstance(other, dict):
return False
self_items = {i for i in self.items() if i[1]}
other_items = {i for i in other.items() if i[1]}
return self_items == other_items
# override default behavior:
# we don't want to add 0's to the dictionary
def __missing__(self, key):
return 0
def __setitem__(self, key, value):
if isinstance(value, float):
value = int(round(value))
elif not isinstance(value, int):
raise PyteomicsError('Only integers allowed as values in '
'Composition, got {}.'.format(type(value).__name__))
if value: # reject 0's
super(BasicComposition, self).__setitem__(key, value)
elif key in self:
del self[key]
def copy(self):
return type(self)(self)
def __reduce__(self):
class_, args, state, list_iterator, dict_iterator = super(
BasicComposition, self).__reduce__()
# Override the reduce of defaultdict so we do not provide the
# `int` type as the first argument
# which prevents from correctly unpickling the object
args = ()
return class_, args, state, list_iterator, dict_iterator
class _MappingOverAttributeProxy(object):
'''A replacement for __dict__ for unpickling an object which once
has __slots__ now but did not before.'''
def __init__(self, obj):
self.obj = obj
def __getitem__(self, key):
return getattr(self.obj, key)
def __setitem__(self, key, value):
setattr(self.obj, key, value)
def __contains__(self, key):
return hasattr(self.obj, key)
def __repr__(self):
return "{self.__class__.__name__}({self.obj})".format(self=self)
class unitint(int):
def __new__(cls, value, unit_info=None):
inst = int.__new__(cls, value)
inst.unit_info = (unit_info)
return inst
def __reduce__(self):
return self.__class__, (int(self), self.unit_info)
def _repr_pretty_(self, p, cycle):
base = super(unitint, self).__repr__()
if self.unit_info:
string = "%s %s" % (base, self.unit_info)
else:
string = base
p.text(string)
class unitfloat(float):
__slots__ = ('unit_info', )
def __new__(cls, value, unit_info=None):
inst = float.__new__(cls, value)
inst.unit_info = unit_info
return inst
@property
def __dict__(self):
return _MappingOverAttributeProxy(self)
def __reduce__(self):
return self.__class__, (float(self), self.unit_info)
def _repr_pretty_(self, p, cycle):
base = super(unitfloat, self).__repr__()
if self.unit_info:
string = "%s %s" % (base, self.unit_info)
else:
string = base
p.text(string)
class unitstr(str):
if not PY2:
__slots__ = ("unit_info", )
def __new__(cls, value, unit_info=None):
if PY2 and isinstance(value, unicode):
value = value.encode('utf-8')
inst = str.__new__(cls, value)
inst.unit_info = unit_info
return inst
@property
def __dict__(self):
return _MappingOverAttributeProxy(self)
def __reduce__(self):
return self.__class__, (str(self), self.unit_info)
def _repr_pretty_(self, p, cycle):
base = super(unitstr, self).__repr__()
if self.unit_info:
string = "%s %s" % (base, self.unit_info)
else:
string = base
p.text(string)
class cvstr(str):
'''A helper class to associate a controlled vocabullary accession
number with an otherwise plain :class:`str` object'''
if not PY2:
__slots__ = ('accession', 'unit_accession')
_cache = {}
def __new__(cls, value, accession=None, unit_accession=None):
try:
inst = cls._cache[value]
if inst.accession == accession and inst.unit_accession == unit_accession:
return inst
except KeyError:
pass
if PY2 and isinstance(value, unicode):
value = value.encode('utf-8')
inst = str.__new__(cls, value)
inst.accession = _intern_unit_or_cv(accession)
inst.unit_accession = _intern_unit_or_cv(unit_accession)
cls._cache[value] = inst
return inst
@property
def __dict__(self):
return _MappingOverAttributeProxy(self)
def __reduce__(self):
return self.__class__, (str(self), self.accession, self.unit_accession)
class CVQueryEngine(object):
'''Traverse an arbitrarily nested dictionary looking
for keys which are :class:`cvstr` instances, or objects
with an attribute called ``accession``.
'''
def _accession(self, key):
return getattr(key, 'accession', None)
def _query_dict(self, data, accession):
for key, value in data.items():
if self._accession(key) == accession:
if not isinstance(value, str) or value != '':
return value
else:
return key
elif isinstance(value, dict):
inner = self._query_dict(value, accession)
if inner is not None:
return inner
elif isinstance(value, (list, tuple)):
inner = self._query_sequence(value, accession)
if inner is not None:
return inner
elif self._accession(value) == accession:
return value
def _query_sequence(self, data, accession):
for value in data:
if isinstance(value, dict):
inner = self._query_dict(value, accession)
if inner is not None:
return inner
elif isinstance(value, (list, tuple)):
inner = self._query_sequence(value, accession)
if inner is not None:
return inner
elif self._accession(value) == accession:
return value
def query(self, data, accession):
'''Search ``data`` for a key with the accession
number ``accession``. Returns :const:`None` if
not found.
'''
if accession is None:
raise TypeError("`accession` cannot be None")
return self._query_dict(data, accession)
def _is_empty(self, value):
if isinstance(value, basestring):
return value == ''
return False
def _walk_dict(self, data, index):
for key, value in data.items():
accession = self._accession(key)
if accession:
if not self._is_empty(value):
index[accession] = value
else:
index[accession] = key
elif isinstance(value, dict):
self._walk_dict(value, index)
elif isinstance(value, (list, tuple)):
self._walk_sequence(value, index)
accession = self._accession(value)
if accession:
index[accession] = value
return index
def _walk_sequence(self, data, index):
for value in data:
if isinstance(value, dict):
self._walk_dict(value, index)
elif isinstance(value, (list, tuple)):
self._walk_sequence(value, index)
else:
accession = self._accession(value)
if accession:
index[accession] = value
def index(self, data):
'''Construct a flat :class:`dict` whose keys are the
accession numbers for all qualified keys in ``data``
and whose values are the mapped values from ``data``.
'''
index = self._walk_dict(data, {})
return index
def __call__(self, data, accession=None):
'''If ``accession`` is :const:`None`, calls
:meth:`index` on ``data``, otherwise calls
:meth:`query` with ``data`` and ``accession``.
'''
if accession is None:
return self.index(data)
else:
return self.query(data, accession)
cvquery = CVQueryEngine()
| 30.458599 | 109 | 0.57577 |
43fe230db121b88e3496b60676b24624f3d34b4b | 13,902 | py | Python | data/github/preprocessing/src/dataset.py | devjeetr/PLBART | 7c80850000bfca2058aaf2dbce9c9f8bb80981ec | [
"MIT"
] | 99 | 2021-03-12T17:12:02.000Z | 2022-03-29T03:04:14.000Z | data/github/preprocessing/src/dataset.py | devjeetr/PLBART | 7c80850000bfca2058aaf2dbce9c9f8bb80981ec | [
"MIT"
] | 32 | 2021-03-24T15:43:02.000Z | 2022-03-30T21:21:45.000Z | data/github/preprocessing/src/dataset.py | devjeetr/PLBART | 7c80850000bfca2058aaf2dbce9c9f8bb80981ec | [
"MIT"
] | 18 | 2021-03-26T23:43:28.000Z | 2022-03-24T05:55:05.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import itertools
import subprocess
from pathlib import Path
from data.github.preprocessing.src.utils import (
shuf_file,
apply_bpe_file,
get_vocab_file,
learn_bpe_file,
regroup_and_select_data,
LocalExecutor,
binarize_for_XLM_file,
truncate_files,
get_nlines,
process_and_tokenize_json_file,
extract_functions_file,
extract_docstrings
)
class Language:
def __init__(self, root, lang):
self.folder = Path(str(root)).joinpath(lang)
assert self.folder.is_dir(
), f"failed to initalize Language {self.l}, there is no directory {str(self.folder)}"
self.l = lang
def process_json_and_tok(self, keep_comments, executor=None):
if executor is None:
executor = LocalExecutor()
suffix = '.with_comments' if keep_comments else ''
assert len(list(self.folder.glob('*.json.gz'))
) > 0, f"there is no json in {str(self.folder)}"
jsons = [json for json in self.folder.glob(
'*.json.gz') if not Path(str(json).replace('.json.gz', suffix + '.tok')).is_file()]
print(f"{self.l}: tokenizing {len(jsons)} json files ...")
if len(jsons) > 0:
jobs = executor.map_array(process_and_tokenize_json_file, jsons, itertools.repeat(
self.l), itertools.repeat(keep_comments))
for job in jobs:
job.result()
else:
return
def split_train_test_valid(self, keep_comments, test_size=1000):
suffix = '.with_comments' if keep_comments else ''
# split train-test-valid
# regroup
all_tok = self.folder.joinpath(f'all{suffix}.tok')
command = f"cd {self.folder}; cat *[0-4][0-9][0-9]{suffix}.tok > {all_tok}"
proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, executable='/bin/bash')
size_gb = all_tok.stat().st_size
n_lines = get_nlines(all_tok)
# shuf
shuf_file(all_tok)
# select test/valid/train and split train in 8
subprocess.run(f"cat {all_tok} | head -n {test_size} > {self.folder.joinpath(f'valid{suffix}.tok')}",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
subprocess.run(
f"cat {all_tok} | head -n {2 * test_size} | tail -n {test_size} > {self.folder.joinpath(f'test{suffix}.tok')}",
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
split_len = int((n_lines - 2 * test_size) / 8)
for n, i in zip(range(8), range(2 * test_size, n_lines, split_len)):
subprocess.run(
f"cat {all_tok} | head -n {i + split_len} | tail -n {split_len} > {self.folder.joinpath(f'train{suffix}.{n}.tok')}",
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return n_lines, size_gb
def process(self, keep_comments, tok_executor=None, test_size=1000, split_executor=None):
suffix = '.with_comments' if keep_comments else ''
print(f"{self.l}: process ...")
self.process_json_and_tok(keep_comments, tok_executor)
if (all(self.folder.joinpath(f'train{suffix}.{n}.tok').is_file() for n in range(8)) and
self.folder.joinpath(f'test{suffix}.tok').is_file() and
self.folder.joinpath(f'valid{suffix}.tok').is_file()):
print(f"{self.l}: train, test and valid for already exist. ")
nlines = 8 * \
get_nlines(self.folder.joinpath(f'train{suffix}.{0}.tok'))
size_gb = 8 * \
self.folder.joinpath(f'train{suffix}.{0}.tok').stat().st_size
else:
print(f"{self.l}: split train, test and valid ... ")
if split_executor is None:
split_executor = LocalExecutor()
job = split_executor.submit(
self.split_train_test_valid, keep_comments, test_size)
nlines, size_gb = job.result()
print(f"{self.l}: train for is {nlines} lines and {size_gb / (1024 ** 3)} Go. ")
# nlines, size = self.split_train_test_valid(keep_comments, test_size)
return nlines, size_gb
def extract_functions(self, keep_comments, test_size=1000, executor=None):
if executor is None:
executor = LocalExecutor()
suffix = '.with_comments' if keep_comments else ''
files = list(self.folder.glob(f'train{suffix}.[01234567].tok'))
files.append(self.folder.joinpath(f'test{suffix}.tok'))
files.append(self.folder.joinpath(f'valid{suffix}.tok'))
toks = [tok for tok in files if not (tok.with_suffix('.functions_standalone.tok').is_file(
) and tok.with_suffix('.functions_class.tok').is_file())]
if len(toks) > 0:
jobs = executor.map_array(
extract_functions_file, toks, itertools.repeat(self.l))
for job in jobs:
job.result()
def extract_docstrings(self, keep_comments, test_size=1000, executor=None):
if executor is None:
executor = LocalExecutor()
suffix = '.with_comments' if keep_comments else ''
files = list(self.folder.glob(
f'train{suffix}.[01234567].functions_class.tok'))
files += list(self.folder.glob(
f'train{suffix}.[01234567].functions_standalone.tok'))
files.append(self.folder.joinpath(f'test{suffix}.functions_class.tok'))
files.append(self.folder.joinpath(
f'test{suffix}.functions_standalone.tok'))
files.append(self.folder.joinpath(
f'valid{suffix}.functions_class.tok'))
files.append(self.folder.joinpath(
f'valid{suffix}.functions_standalone.tok'))
toks = [tok for tok in files if not (tok.with_suffix(
'.DS-f.ds.tok').is_file() and tok.with_suffix('.DS-f.f.tok').is_file())]
if len(toks) > 0:
jobs = executor.map_array(
extract_docstrings, toks, itertools.repeat(self.l))
for job in jobs:
job.result()
class Dataset:
def __init__(self, root, lang1, lang2=None, keep_comments=False, test_size=1000, lang3=None):
self.test_size = test_size
self.root = Path(root)
assert self.root.is_dir(
), f"failed to build the dataset, there is no directory {str(root)}"
langs = [lang1]
if lang2 is not None:
langs.append(lang2)
if lang3 is not None:
langs.append(lang3)
langs = sorted(langs)
self.langs = []
self.langs.append(Language(root, langs[0]))
if len(langs) >= 2:
self.langs.append(Language(root, langs[1]))
if len(langs) == 3:
self.langs.append(Language(root, langs[2]))
self.keep_comments = keep_comments
self.suffix = ".with_comments" if keep_comments else ''
prefix = '-'.join(langs)
self.folder = self.root.joinpath(f"{prefix}{self.suffix}")
self.codes = self.folder.joinpath("codes")
self.vocab = self.folder.joinpath("vocab")
self.sizes = {l.l: [] for l in self.langs}
if not self.folder.is_dir():
self.folder.mkdir()
def process_languages(self, lang_executor=None, tok_executor=None, split_executor=None):
if lang_executor is None:
lang_executor = LocalExecutor()
jobs = [lang_executor.submit(lang.process, self.keep_comments, tok_executor, self.test_size, split_executor)
for lang in self.langs]
for i, lang in enumerate(self.langs):
self.sizes[lang.l] = jobs[i].result()
def train_bpe(self, ncodes, size_gb=None):
if self.codes.is_file():
print("bpe codes already exists.")
return
print("train bpe ...")
if size_gb is None:
nlines = None
else:
size_gb_ = size_gb / len(self.langs)
nlines = [int(self.sizes[l.l][0] * size_gb_ * 1024 **
3 / self.sizes[l.l][1]) for l in self.langs]
print(
f"we need to regroup {nlines} lines for {self.langs[0].l} {self.langs[1].l} and {self.langs[2].l} to gather {size_gb} Go")
# train bpe on only 50 GB (25 each lang) of the tokenized train set
data_train_bpe = self.folder.joinpath(
f'train{self.suffix}.tok.{size_gb}GB')
print(
f"regroup and select data for training bpe in {data_train_bpe} ...")
regroup_and_select_data(
files=[l.folder.glob(
f'train{self.suffix}.[01234567].tok') for l in self.langs],
nlines=nlines,
output=data_train_bpe)
print(f"training bpe on {data_train_bpe}...")
learn_bpe_file(data_train_bpe, ncodes, self.codes)
def get_vocab(self, size_gb=None):
if self.vocab.is_file():
print("vocab already exists.")
return
print("get vocab ...")
if size_gb is None:
nlines = None
else:
size_gb_ = size_gb / len(self.langs)
nlines = [int(self.sizes[l.l][0] * size_gb_ * 1024 **
3 / self.sizes[l.l][1]) for l in self.langs]
# get vocab only from a subset of 40GB (20 each lang) of the bpe-ed train set
data_get_vocab = self.folder.joinpath(
f'train{self.suffix}.bpe.{size_gb}GB')
print(f"regroup and select data in {data_get_vocab} to get vocab ...")
regroup_and_select_data(
files=[self.folder.glob(
f'{l.l}.train{self.suffix}.[01234567].bpe') for l in self.langs],
nlines=nlines,
output=data_get_vocab)
print(f"computing vocab on {data_get_vocab}...")
get_vocab_file(data_get_vocab, self.vocab)
def apply_bpe(self, files_regex, use_vocab=False, executor=None):
vocab = '' if use_vocab is False else self.vocab
if executor is None:
executor = LocalExecutor()
jobs = []
for l in self.langs:
for f in l.folder.glob(files_regex):
out = self.folder.joinpath(
f"{l.l}.{f.name}").with_suffix('.bpe')
if not out.is_file():
print(f'apply bpe on {f} ...')
jobs.append(executor.submit(
apply_bpe_file, f, out, self.codes, vocab))
for job in jobs:
job.result()
def binarize_for_XLM(self, files_regex, executor=None):
print(f"binarize {files_regex} ...")
if executor is None:
executor = LocalExecutor()
jobs = []
for l in self.langs:
for f in self.folder.glob(f'{l.l}.{files_regex}'):
if not Path(str(f) + '.pth').is_file():
print(f"binarizing {f} ...")
jobs.append(executor.submit(
binarize_for_XLM_file, f, self.vocab))
for job in jobs:
job.result()
def extract_functions(self, lang_executor=None, function_executor=None):
print("extract functions ... ")
if lang_executor is None:
lang_executor = LocalExecutor()
jobs = [lang_executor.submit(lang.extract_functions, self.keep_comments,
self.test_size, function_executor) for lang in self.langs]
for job in jobs:
job.result()
for split in ['test', 'valid']:
for f_type in ['functions_standalone', 'functions_class']:
truncate_files(l.folder.joinpath(
f'{split}{self.suffix}.{f_type}.tok') for l in self.langs)
def extract_docstrings(self, lang_executor=None, function_executor=None):
print("extract docstrings ... ")
if lang_executor is None:
lang_executor = LocalExecutor()
jobs = [lang_executor.submit(lang.extract_docstrings, self.keep_comments,
self.test_size, function_executor) for lang in self.langs]
for job in jobs:
job.result()
for split in ['test', 'valid']:
for f_type in ['functions_standalone.DS-f.ds', 'functions_standalone.DS-f.f',
'functions_class.DS-f.ds', 'functions_class.DS-f.f']:
truncate_files(l.folder.joinpath(
f'{split}{self.suffix}.{f_type}.tok') for l in self.langs)
def extract_functions_and_apply_bpe(self, lang_executor=None, function_executor=None, bpe_executor=None):
print("extract functions ... ")
if lang_executor is None:
lang_executor = LocalExecutor()
jobs = [lang_executor.submit(lang.extract_functions, self.keep_comments,
self.test_size, function_executor) for lang in self.langs]
for job in jobs:
job.result()
for split in ['test', 'valid']:
for f_type in ['functions_standalone', 'functions_class']:
truncate_files(l.folder.joinpath(
f'{split}{self.suffix}.{f_type}.tok') for l in self.langs)
print("apply bpe on train ... ")
self.apply_bpe(
f'train{self.suffix}.[01234567].functions_*.tok', use_vocab=False, executor=bpe_executor)
print("apply bpe on test and valid ...")
self.apply_bpe(f'test{self.suffix}.functions_*.tok',
use_vocab=False, executor=bpe_executor)
self.apply_bpe(f'valid{self.suffix}.functions_*.tok',
use_vocab=False, executor=bpe_executor)
| 42.907407 | 138 | 0.590203 |
8a0641f89560df74b99d846e25f15c0a0a01d283 | 716 | py | Python | setup.py | asprazz/stockStalker | 4812d17e544721939657ef4b2e5e0f60bbd236a9 | [
"MIT"
] | 2 | 2020-06-13T07:24:51.000Z | 2021-02-05T21:43:26.000Z | setup.py | asprazz/stockStalker | 4812d17e544721939657ef4b2e5e0f60bbd236a9 | [
"MIT"
] | null | null | null | setup.py | asprazz/stockStalker | 4812d17e544721939657ef4b2e5e0f60bbd236a9 | [
"MIT"
] | 1 | 2020-06-13T07:25:01.000Z | 2020-06-13T07:25:01.000Z | from setuptools import setup, find_packages
setup(
name="stockStalker",
author="Ankush Patil",
version="0.0.1",
url="https://github.com/asprazz/stockStalker",
description="Python CLI Application for Tracking portfolio.",
packages=[
"stockStalker"
],
install_requires=[
'requests>=2.23',
'argparse',
'prettytable',
'colorama',
'halo',
'platform'
],
python_requires='>=3.5',
entry_points={
'console_scripts': [
'stockStalker=stockStalker.__main__:main'
]
},
author_mail='aspraz2658@gmail.com',
keywords=['stock', 'python-cli', 'stock-market', 'bse', 'nse'],
license='MIT'
) | 24.689655 | 67 | 0.582402 |
1f44fef3eebc9e7336c981b6739cf015b29dd544 | 4,763 | py | Python | kobe/data/download.py | stungkit/KOBE | 274e46d41cac5f526e764dc6001cf545dbbcd6bb | [
"MIT"
] | 9 | 2019-05-01T08:46:06.000Z | 2019-05-13T14:00:28.000Z | kobe/data/download.py | stungkit/KOBE | 274e46d41cac5f526e764dc6001cf545dbbcd6bb | [
"MIT"
] | 2 | 2019-05-03T13:35:42.000Z | 2019-05-13T11:55:17.000Z | kobe/data/download.py | stungkit/KOBE | 274e46d41cac5f526e764dc6001cf545dbbcd6bb | [
"MIT"
] | 4 | 2019-05-02T09:28:31.000Z | 2022-01-13T06:42:41.000Z | import hashlib
import os
import shutil
import time
from urllib.request import urlopen
import gdown
import requests
import tqdm
def download(url, path, fname, redownload=False):
"""
Downloads file using `requests`. If ``redownload`` is set to false, then
will not download tar file again if it is present (default ``True``).
"""
outfile = os.path.join(path, fname)
download = not os.path.isfile(outfile) or redownload
print("[ downloading: " + url + " to " + outfile + " ]")
retry = 5
exp_backoff = [2 ** r for r in reversed(range(retry))]
pbar = tqdm.tqdm(unit="B", unit_scale=True, desc="Downloading {}".format(fname))
while download and retry >= 0:
resume_file = outfile + ".part"
resume = os.path.isfile(resume_file)
if resume:
resume_pos = os.path.getsize(resume_file)
mode = "ab"
else:
resume_pos = 0
mode = "wb"
response = None
with requests.Session() as session:
try:
header = (
{"Range": "bytes=%d-" % resume_pos, "Accept-Encoding": "identity"}
if resume
else {}
)
response = session.get(url, stream=True, timeout=5, headers=header)
# negative reply could be 'none' or just missing
if resume and response.headers.get("Accept-Ranges", "none") == "none":
resume_pos = 0
mode = "wb"
CHUNK_SIZE = 32768
total_size = int(response.headers.get("Content-Length", -1))
# server returns remaining size if resuming, so adjust total
total_size += resume_pos
pbar.total = total_size
done = resume_pos
with open(resume_file, mode) as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if total_size > 0:
done += len(chunk)
if total_size < done:
# don't freak out if content-length was too small
total_size = done
pbar.total = total_size
pbar.update(len(chunk))
break
except requests.exceptions.ConnectionError:
retry -= 1
pbar.clear()
if retry >= 0:
print("Connection error, retrying. (%d retries left)" % retry)
time.sleep(exp_backoff[retry])
else:
print("Retried too many times, stopped retrying.")
finally:
if response:
response.close()
if retry < 0:
raise RuntimeWarning("Connection broken too many times. Stopped retrying.")
if download and retry > 0:
pbar.update(done - pbar.n)
if done < total_size:
raise RuntimeWarning(
"Received less data than specified in "
+ "Content-Length header for "
+ url
+ "."
+ " There may be a download problem."
)
move(resume_file, outfile)
pbar.close()
def move(path1, path2):
"""Renames the given file."""
shutil.move(path1, path2)
def untar(path, fname, deleteTar=True):
"""
Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print("unpacking " + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
def test_google():
try:
urlopen("https://www.google.com/", timeout=1)
return True
except Exception:
return False
FNAME = "saved.zip"
MD5 = "9924fb8ac6d32fc797499f226e0e9908"
CN_URL = "https://cloud.tsinghua.edu.cn/f/06f64ae627ec404db300/?dl=1"
URL = "https://drive.google.com/uc?id=1NOhv8pvC8IGwt8oRoIZ-A0EojJBZcolr"
if __name__ == "__main__":
if test_google():
gdown.cached_download(URL, FNAME, md5=MD5, postprocess=gdown.extractall)
os.remove(FNAME)
else:
# If Google is blocked, download from Tsinghua Cloud
download(CN_URL, ".", FNAME)
md5 = hashlib.md5(open(FNAME, "rb").read()).hexdigest()
print(f"Downloaded MD5 = {md5}; Required MD5 = {MD5}")
if md5 != MD5:
raise Exception(
"MD5 doesn't match; please remove saved.zip and rerun the script."
)
untar(".", FNAME)
| 33.542254 | 86 | 0.536427 |
a9a43b6f024670d674483485f271b000ad38f332 | 3,233 | py | Python | tempest/api/orchestration/stacks/test_update.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | 3 | 2015-03-03T15:43:06.000Z | 2016-10-24T06:12:40.000Z | tempest/api/orchestration/stacks/test_update.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | tempest/api/orchestration/stacks/test_update.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import test
LOG = logging.getLogger(__name__)
class UpdateStackTestJSON(base.BaseOrchestrationTest):
_interface = 'json'
template = '''
heat_template_version: 2013-05-23
resources:
random1:
type: OS::Heat::RandomString
'''
update_template = '''
heat_template_version: 2013-05-23
resources:
random1:
type: OS::Heat::RandomString
random2:
type: OS::Heat::RandomString
'''
def update_stack(self, stack_identifier, template):
stack_name = stack_identifier.split('/')[0]
resp = self.client.update_stack(
stack_identifier=stack_identifier,
name=stack_name,
template=template)
self.assertEqual('202', resp[0]['status'])
self.client.wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
@test.attr(type='gate')
def test_stack_update_nochange(self):
stack_name = data_utils.rand_name('heat')
stack_identifier = self.create_stack(stack_name, self.template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
expected_resources = {'random1': 'OS::Heat::RandomString'}
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
# Update with no changes, resources should be unchanged
self.update_stack(stack_identifier, self.template)
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
@test.attr(type='gate')
@test.skip_because(bug='1308682')
def test_stack_update_add_remove(self):
stack_name = data_utils.rand_name('heat')
stack_identifier = self.create_stack(stack_name, self.template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
initial_resources = {'random1': 'OS::Heat::RandomString'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
# Add one resource via a stack update
self.update_stack(stack_identifier, self.update_template)
updated_resources = {'random1': 'OS::Heat::RandomString',
'random2': 'OS::Heat::RandomString'}
self.assertEqual(updated_resources,
self.list_resources(stack_identifier))
# Then remove it by updating with the original template
self.update_stack(stack_identifier, self.template)
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
| 38.035294 | 78 | 0.686978 |
6eec9090c04b2dcb895b8e36c23aa5cd30cf6283 | 1,399 | py | Python | decorators/log.py | tarasivashchuk/Cookbooks | efb8ac7340f5d110d95da084615504fdd8081111 | [
"MIT"
] | 1 | 2020-07-03T06:42:58.000Z | 2020-07-03T06:42:58.000Z | decorators/log.py | tarasivashchuk/cookbooks | efb8ac7340f5d110d95da084615504fdd8081111 | [
"MIT"
] | null | null | null | decorators/log.py | tarasivashchuk/cookbooks | efb8ac7340f5d110d95da084615504fdd8081111 | [
"MIT"
] | null | null | null | import logging
from functools import wraps
from sys import stdout
logging.basicConfig(stream=stdout, level=logging.DEBUG)
def log_output(func):
def wrapper():
logger = logging.getLogger(func.__name__)
output = func()
logger.info(f"Output: {output}")
return wrapper
def log_args(func):
def wrapper(*args, **kwargs):
logger = logging.getLogger(func.__name__)
[logger.info(arg) for arg in args]
[logger.info(kwarg) for kwarg in kwargs]
output = func(*args, **kwargs)
logger.info(f"Output: {output}")
return wrapper
def log_level(level):
def inner_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
logger = logging.getLogger(func.__name__)
logger.setLevel(level)
[logger.info(arg) for arg in args]
[logger.info(kwarg) for kwarg in kwargs]
output = func(*args, **kwargs)
logger.log(msg=f"Output: {output}", level=level)
return wrapper
return inner_function
@log_output
def test_function():
return print("Log me!")
@log_args
def test_args(x):
return x ** x
@log_level(level=logging.CRITICAL)
def test_level(x):
return x ** x
if __name__ == '__main__':
test_function()
[test_args(value) for value in range(0, 10, 2)]
[test_level(value) for value in range(0, 10, 2)]
| 20.880597 | 60 | 0.625447 |
cb18e6c9e36377d1451ae3bebc95119ded7fde6a | 6,153 | py | Python | movies_analyzer/Imdb.py | mateuszrusin/filmweb-rekomendacje | 06bdff3825f4c9e7b80fb5778d1a40388d2313d9 | [
"MIT"
] | 3 | 2019-12-10T10:20:07.000Z | 2020-12-03T17:37:24.000Z | movies_analyzer/Imdb.py | mateuszrusin/filmweb-rekomendacje | 06bdff3825f4c9e7b80fb5778d1a40388d2313d9 | [
"MIT"
] | 2 | 2021-03-31T19:31:04.000Z | 2021-12-13T20:32:18.000Z | movies_analyzer/Imdb.py | mateuszrusin/filmweb-rekomendacje | 06bdff3825f4c9e7b80fb5778d1a40388d2313d9 | [
"MIT"
] | 4 | 2019-11-21T23:49:39.000Z | 2020-12-03T17:37:26.000Z | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
from filmweb_integrator.fwimdbmerge.utils import to_list
from pathlib import Path
import pyarrow.parquet as pq
import pyarrow as pa
from imdb import IMDb as ImdbServer
import urllib.request
import pickle
import os.path
ROOT = str(Path(__file__).parent.parent.absolute())
# import os; ROOT = os.getcwd()
IMDB_TITLE_GZIP = 'https://datasets.imdbws.com/title.basics.tsv.gz'
IMDB_RATING_GZIP = 'https://datasets.imdbws.com/title.ratings.tsv.gz'
IMDB_ACTORS_GZIP = 'https://datasets.imdbws.com/title.principals.tsv.gz'
IMDB_ACTORS_NAMES_GZIP = 'https://datasets.imdbws.com/name.basics.tsv.gz'
IMDB_COVERS_CSV = ROOT + '/data_static/movie_covers.csv'
IMDB_MOVIES_PARQUET = ROOT + '/data/imdb_movies.parquet.gzip'
IMDB_COVERS_PARQUET = ROOT + '/data/imdb_covers.parquet.gzip'
IMDB_ACTORS_PARQUET = ROOT + '/data/imdb_actors.parquet.gzip'
IMAGE_FOLDER = 'data/images'
DATA_FOLDER = 'data/movies'
# ecommendation_dataset.
try:
os.mkdir(IMAGE_FOLDER)
except:
print(IMAGE_FOLDER + ': folder exist')
try:
os.mkdir(DATA_FOLDER)
except:
print(DATA_FOLDER + ': folder exist')
ia = ImdbServer()
def get_imdb_movie(tmbdid: str):
"""
return a tuple with the movie and id, and image if it's exist.
otherwirse try to load a movie and save it
image_file, pickle_file, movie
"""
tmbdid = str(tmbdid).replace('tt','')
image_file = IMAGE_FOLDER + "/"+ str(tmbdid) + '.jpg'
pickle_file = DATA_FOLDER+"/"+tmbdid+".pkl"
if os.path.isfile(pickle_file):
movie = pickle.load(open(pickle_file,"rb"))
return tmbdid if os.path.isfile(image_file) else 'no-cover' , movie
movie = ia.get_movie(tmbdid)
if 'cover url' in movie:
urllib.request.urlretrieve(movie['cover url'], image_file)
else:
tmbdid = 'no-cover'
with open(pickle_file,"wb") as f:
pickle.dump(movie,f)
return tmbdid, movie
class Imdb(object):
def __init__(self):
self.imdb = pd.read_parquet(IMDB_MOVIES_PARQUET, engine='pyarrow')
self.imdb_actors = pd.read_parquet(IMDB_ACTORS_PARQUET, engine='pyarrow')
@staticmethod
def prepare():
print("Download titles....")
imdb_title = pd.read_csv(IMDB_TITLE_GZIP, sep='\t', dtype='str', index_col='tconst', engine='c')
imdb_title = imdb_title[imdb_title['titleType']=='movie']
imdb_title = imdb_title.dropna(subset=['startYear', 'originalTitle'])
print("Download ratings....")
table = pa.Table.from_pandas(pd.merge(
imdb_title,
pd.read_csv(IMDB_RATING_GZIP, sep='\t', dtype='str', index_col='tconst', engine='c'),
how='left',
left_index=True,
right_index=True, sort=False), preserve_index=True)
pq.write_table(table, IMDB_MOVIES_PARQUET, compression='gzip')
print("Download actors....")
imdb_actors = pd.read_csv(IMDB_ACTORS_GZIP, sep='\t', dtype='str', index_col='tconst', engine='c')
imdb_actors = imdb_actors[(imdb_actors["ordering"] == '1') & (
(imdb_actors["category"] == 'actor') | (imdb_actors["category"] == 'actress'))]
imdb_actors_names = pd.read_csv(IMDB_ACTORS_NAMES_GZIP, sep='\t', dtype='str', index_col='nconst', engine='c')
imdb_actors_with_names = imdb_actors.merge(imdb_actors_names, right_index=True, left_on="nconst")
imdb_actors_with_names = imdb_actors_with_names[["primaryName", "characters"]]
pa_actors = pa.Table.from_pandas(imdb_actors_with_names)
pq.write_table(pa_actors, IMDB_ACTORS_PARQUET, compression='gzip')
print("Download covers....")
table = pa.Table.from_pandas(pd.read_csv(IMDB_COVERS_CSV), preserve_index=False)
pq.write_table(table, IMDB_COVERS_PARQUET, compression='gzip')
@staticmethod
def get_similarity(row):
text_list_eng = to_list(row['genre_eng'])
text_list_genres = to_list(row['genres'])
# product of those lists
commons = set(text_list_eng) & set(text_list_genres)
return len(commons)
@staticmethod
def change_type(t):
match = {
'akcja': 'action',
'dramat': 'drama',
'animowany': 'cartoon',
'romans': 'romance',
'drogi': 'road',
'biograficzny': 'biographic',
'romantyczny': 'romantic',
'wojenny': 'war',
'katastroficzny': 'disaster',
'kryminał': 'crime',
'komedia': 'comedy',
'dokumentalny': 'documentary',
'pełnometrażowy': 'full-length',
'krótkometrażowy': 'short',
'niemy': 'silent',
'historyczny': 'historical',
'edukacyjny': 'educational',
'kostiumowy': 'costume',
'obyczajowy': 'drama'
}
arr = [match[s.lower()] if s.lower() in match else s.lower() for s in to_list(t)]
return ", ".join(arr)
def merge(self, df):
df['originalTitle'] = df['Tytuł oryginalny']
df['startYear'] = df['Rok produkcji'].fillna('0').astype(str).astype(int).astype(str)
df['originalTitle'] = df['originalTitle'].fillna(df['Tytuł polski'])
df['Gatunek'] = df['Gatunek'].fillna('')
df['genre_eng'] = df['Gatunek'].map(lambda x: self.change_type(x))
merged = pd.merge(
self.imdb.reset_index(),
df,
how='inner',
on=['startYear', 'originalTitle'])
merged = self.filter_duplicates(merged)
merged['averageRating'] = merged['averageRating'].fillna(value=0).astype(float)
merged['diff'] = (merged['Ocena'] - merged['averageRating'])
merged['averageRating_int'] = merged['averageRating'].round().astype(int)
merged.set_index('tconst', inplace=True)
return merged
def filter_duplicates(self, df):
df['similarity'] = df.apply(self.get_similarity, axis=1)
top1 = df.groupby(['ID']).apply(lambda x: x.sort_values(["similarity"], ascending = False)).reset_index(drop=True)
return top1.groupby('ID').head(1).copy()
| 37.748466 | 122 | 0.635137 |
7a3e59ab9c0e3387f8883c5a1b90064f7b84ceca | 7,893 | py | Python | ddt_request_history/panels/request_history.py | djsutho/django-debug-toolbar-request-history | df23cf96f7106453bce087397834aa6e1927621e | [
"BSD-3-Clause"
] | 129 | 2015-01-30T19:04:14.000Z | 2022-01-03T00:50:05.000Z | ddt_request_history/panels/request_history.py | djsutho/django-debug-toolbar-request-history | df23cf96f7106453bce087397834aa6e1927621e | [
"BSD-3-Clause"
] | 30 | 2015-04-24T12:40:24.000Z | 2020-10-06T11:20:01.000Z | ddt_request_history/panels/request_history.py | djsutho/django-debug-toolbar-request-history | df23cf96f7106453bce087397834aa6e1927621e | [
"BSD-3-Clause"
] | 26 | 2015-03-13T09:33:58.000Z | 2022-02-26T12:52:27.000Z | from __future__ import absolute_import, unicode_literals
import json
import logging
import os
import re
import sys
import threading
import uuid
import debug_toolbar
from collections import OrderedDict
from datetime import datetime
from distutils.version import LooseVersion
from django.conf import settings
from django.template import Template
from django.template.backends.django import DjangoTemplates
from django.template.context import Context
from django.utils.translation import gettext_lazy as _
from debug_toolbar.panels import Panel
from debug_toolbar.settings import get_config
from debug_toolbar.toolbar import DebugToolbar
try:
from collections.abc import Callable
except ImportError: # Python < 3.3
from collections import Callable
try:
toolbar_version = LooseVersion(debug_toolbar.VERSION)
except:
toolbar_version = LooseVersion('0')
logger = logging.getLogger(__name__)
DEBUG_TOOLBAR_URL_PREFIX = getattr(settings, 'DEBUG_TOOLBAR_URL_PREFIX', '/__debug__')
_original_middleware_call = None
def patched_middleware_call(self, request):
# Decide whether the toolbar is active for this request.
show_toolbar = debug_toolbar.middleware.get_show_toolbar()
if not show_toolbar(request):
return self.get_response(request)
toolbar = DebugToolbar(request, self.get_response)
# Activate instrumentation ie. monkey-patch.
for panel in toolbar.enabled_panels:
panel.enable_instrumentation()
try:
# Run panels like Django middleware.
response = toolbar.process_request(request)
finally:
# Deactivate instrumentation ie. monkey-unpatch. This must run
# regardless of the response. Keep 'return' clauses below.
for panel in reversed(toolbar.enabled_panels):
panel.disable_instrumentation()
# When the toolbar will be inserted for sure, generate the stats.
for panel in reversed(toolbar.enabled_panels):
panel.generate_stats(request, response)
panel.generate_server_timing(request, response)
response = self.generate_server_timing_header(
response, toolbar.enabled_panels
)
# Check for responses where the toolbar can't be inserted.
content_encoding = response.get("Content-Encoding", "")
content_type = response.get("Content-Type", "").split(";")[0]
if any(
(
getattr(response, "streaming", False),
"gzip" in content_encoding,
content_type not in debug_toolbar.middleware._HTML_TYPES,
)
):
return response
# Collapse the toolbar by default if SHOW_COLLAPSED is set.
if toolbar.config["SHOW_COLLAPSED"] and "djdt" not in request.COOKIES:
response.set_cookie("djdt", "hide", 864000)
# Insert the toolbar in the response.
content = response.content.decode(response.charset)
insert_before = get_config()["INSERT_BEFORE"]
pattern = re.escape(insert_before)
bits = re.split(pattern, content, flags=re.IGNORECASE)
if len(bits) > 1:
bits[-2] += toolbar.render_toolbar()
response.content = insert_before.join(bits)
if response.get("Content-Length", None):
response["Content-Length"] = len(response.content)
return response
def patch_middleware():
if not this_module.middleware_patched:
try:
from debug_toolbar.middleware import DebugToolbarMiddleware
this_module._original_middleware_call = DebugToolbarMiddleware.__call__
DebugToolbarMiddleware.__call__ = patched_middleware_call
except ImportError:
return
this_module.middleware_patched = True
middleware_patched = False
template = None
this_module = sys.modules[__name__]
# XXX: need to call this as early as possible but we have circular imports when
# running with gunicorn so also try a second later
patch_middleware()
threading.Timer(1.0, patch_middleware, ()).start()
def get_template():
if this_module.template is None:
template_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'request_history.html'
)
with open(template_path) as template_file:
this_module.template = Template(
template_file.read(),
engine=DjangoTemplates({'NAME': 'rh', 'DIRS': [], 'APP_DIRS': False, 'OPTIONS': {}}).engine
)
return this_module.template
def allow_ajax(request):
"""
Default function to determine whether to show the toolbar on a given page.
"""
if request.META.get('REMOTE_ADDR', None) not in settings.INTERNAL_IPS:
return False
return bool(settings.DEBUG)
def patched_store(self):
if self.store_id: # don't save if already have
return
self.store_id = uuid.uuid4().hex
cls = type(self)
cls._store[self.store_id] = self
store_size = get_config().get('RESULTS_CACHE_SIZE', get_config().get('RESULTS_STORE_SIZE', 100))
for dummy in range(len(cls._store) - store_size):
try:
# collections.OrderedDict
cls._store.popitem(last=False)
except TypeError:
# django.utils.datastructures.SortedDict
del cls._store[cls._store.keyOrder[0]]
def patched_fetch(cls, store_id):
return cls._store.get(store_id)
DebugToolbar.store = patched_store
DebugToolbar.fetch = classmethod(patched_fetch)
class RequestHistoryPanel(Panel):
""" A panel to display Request History """
title = _("Request History")
template = 'request_history.html'
@property
def nav_subtitle(self):
return self.get_stats().get('request_url', '')
def generate_stats(self, request, response):
self.record_stats({
'request_url': request.get_full_path(),
'request_method': request.method,
'post': json.dumps(request.POST, sort_keys=True, indent=4),
'time': datetime.now(),
})
def process_request(self, request):
self.record_stats({
'request_url': request.get_full_path(),
'request_method': request.method,
'post': json.dumps(request.POST, sort_keys=True, indent=4),
'time': datetime.now(),
})
return super().process_request(request)
@property
def content(self):
""" Content of the panel when it's displayed in full screen. """
toolbars = OrderedDict()
for id, toolbar in DebugToolbar._store.items():
content = {}
for panel in toolbar.panels:
panel_id = None
nav_title = ''
nav_subtitle = ''
try:
panel_id = panel.panel_id
nav_title = panel.nav_title
nav_subtitle = panel.nav_subtitle() if isinstance(
panel.nav_subtitle, Callable) else panel.nav_subtitle
except Exception:
logger.debug('Error parsing panel info:', exc_info=True)
if panel_id is not None:
content.update({
panel_id: {
'panel_id': panel_id,
'nav_title': nav_title,
'nav_subtitle': nav_subtitle,
}
})
toolbars[id] = {
'toolbar': toolbar,
'content': content
}
return get_template().render(Context({
'toolbars': OrderedDict(reversed(list(toolbars.items()))),
'trunc_length': get_config().get('RH_POST_TRUNC_LENGTH', 0)
}))
def disable_instrumentation(self):
request_panel = self.toolbar.stats.get(self.panel_id)
if request_panel and not request_panel.get('request_url', '').startswith(DEBUG_TOOLBAR_URL_PREFIX):
self.toolbar.store()
| 33.587234 | 107 | 0.652857 |
cdc39b4ff2f8ddeafe3d8d19349bf0a145af9acb | 3,412 | py | Python | utils.py | ksquarekumar/facial-keypoints | b422a1954665c27fd254312c7499bb2773f4ecf1 | [
"MIT"
] | 1 | 2019-03-29T16:52:03.000Z | 2019-03-29T16:52:03.000Z | utils.py | planetnoob/facial-keypoints | b422a1954665c27fd254312c7499bb2773f4ecf1 | [
"MIT"
] | null | null | null | utils.py | planetnoob/facial-keypoints | b422a1954665c27fd254312c7499bb2773f4ecf1 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
def load_data(test=False):
"""
Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Important that the files are in a `data` directory
"""
FTRAIN = 'data/training.csv'
FTEST = 'data/test.csv'
fname = FTEST if test else FTRAIN
df = read_csv(os.path.expanduser(fname)) # load dataframes
# The Image column has pixel values separated by space; convert
# the values to numpy arrays:
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
df = df.dropna() # drop all rows that have missing values in them
X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]
X = X.astype(np.float32)
X = X.reshape(-1, 96, 96, 1) # return each images as 96 x 96 x 1
if not test: # only FTRAIN has target columns
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # scale target coordinates to [-1, 1]
X, y = shuffle(X, y, random_state=42) # shuffle train data
y = y.astype(np.float32)
else:
y = None
return X, y
def plot_data(img, landmark1, axis, color1='c', landmark2=None, color2='r'):
"""
Plot image (img), along with normalized facial keypoints (landmarks)
"""
axis.imshow(np.squeeze(img), cmap='gray') # plot the image
landmark1 = landmark1 * 48 + 48 # undo the
# Plot the keypoints
axis.scatter(landmark1[0::2],
landmark1[1::2],
marker='o',
c=color1,
s=40)
if landmark2 is not None:
landmark2 = landmark2 * 48 + 48
# Plot the keypoints
axis.scatter(landmark2[0::2],
landmark2[1::2],
marker='x',
c=color2,
s=40)
def plot_keypoints(img_path,
face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_alt.xml'),
model_path='my_model.h5'):
# TODO: write a function that plots keypoints on arbitrary image containing human
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
ax.imshow(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB))
if len(faces) == 0:
plt.title('no faces detected')
elif len(faces) > 1:
plt.title('too many faces detected')
for (x,y,w,h) in faces:
rectangle = cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
ax.imshow(cv2.cvtColor(rectangle, cv2.COLOR_BGR2RGB))
elif len(faces) == 1:
plt.title('one face detected')
x,y,w,h = faces[0]
bgr_crop = img[y:y+h, x:x+w]
orig_shape_crop = bgr_crop.shape
gray_crop = cv2.cvtColor(bgr_crop, cv2.COLOR_BGR2GRAY)
resize_gray_crop = cv2.resize(gray_crop, (96, 96)) / 255.
model = load_model(model_path)
landmarks = np.squeeze(model.predict(
np.expand_dims(np.expand_dims(resize_gray_crop, axis=-1), axis=0)))
ax.scatter(((landmarks[0::2] * 48 + 48)*orig_shape_crop[0]/96)+x,
((landmarks[1::2] * 48 + 48)*orig_shape_crop[1]/96)+y,
marker='o', c='c', s=40)
plt.show()
| 36.688172 | 88 | 0.607855 |
29637c631e3fa04c7764e344111597dccdd39099 | 1,806 | py | Python | 001a_minimal_frame_temporal_separation.py | ofgulban/minimalist_psychopy_examples | 71864ca7f829f4846d1aa002754117565b6549ba | [
"Unlicense"
] | 1 | 2019-01-29T17:04:08.000Z | 2019-01-29T17:04:08.000Z | 001a_minimal_frame_temporal_separation.py | ofgulban/minimalist_psychopy_examples | 71864ca7f829f4846d1aa002754117565b6549ba | [
"Unlicense"
] | null | null | null | 001a_minimal_frame_temporal_separation.py | ofgulban/minimalist_psychopy_examples | 71864ca7f829f4846d1aa002754117565b6549ba | [
"Unlicense"
] | null | null | null | """Minimal frame for two stimuli separated in time."""
from psychopy import core, event, monitors, visual
# =============================================================================
# MONITOR
# Set monitor information used in the experimental setup
moni = monitors.Monitor('testMonitor', width=8.2, distance=60) # in cm
# Set screen (make 'fullscr = True' for fullscreen)
mywin = visual.Window(size=(800, 600), screen=0, winType='pyglet',
allowGUI=True, fullscr=False, monitor=moni,
color='grey', colorSpace='rgb', units='cm')
# =============================================================================
# STIMULUS
# Squares
stim_1 = visual.GratingStim(win=mywin, tex=None, units='deg',
size=(1, 1), color='red')
stim_2 = visual.GratingStim(win=mywin, tex=None, units='deg',
size=(2, 2), color='green')
# Text
text = visual.TextStim(win=mywin, color='black', height=0.4)
# =============================================================================
# TIME
# Parameters
total_time = 10.
block_time = 6.
# Give the system time to settle
core.wait(0.5)
# Create a clock
clock = core.Clock()
clock.reset()
# =============================================================================
# RENDER LOOP
while clock.getTime() < total_time:
t = clock.getTime()
# Determine block
if t < block_time:
stim_1.draw()
elif t >= block_time:
stim_2.draw()
text.text = t
text.draw()
mywin.flip()
# Handle key presses each frame
for keys in event.getKeys():
if keys[0] in ['escape', 'q']:
mywin.close()
core.quit()
mywin.close()
core.quit()
| 25.8 | 80 | 0.477852 |
01cb9556c92f1cd681faa3e26de49131e4d87606 | 146 | py | Python | output/models/sun_data/schema/annotations/annotations00101m/annotations00101m4_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/sun_data/schema/annotations/annotations00101m/annotations00101m4_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/sun_data/schema/annotations/annotations00101m/annotations00101m4_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.sun_data.schema.annotations.annotations00101m.annotations00101m4_xsd.annotations00101m4 import Root
__all__ = [
"Root",
]
| 24.333333 | 118 | 0.815068 |
3a5946abebc00b2b69cdbc61222a4b2f4338f651 | 225 | py | Python | examples/plugin/setup.py | lasta/preacher | 5e50f8eb930fac72a788e7614eb5a85903f7bde6 | [
"MIT"
] | null | null | null | examples/plugin/setup.py | lasta/preacher | 5e50f8eb930fac72a788e7614eb5a85903f7bde6 | [
"MIT"
] | null | null | null | examples/plugin/setup.py | lasta/preacher | 5e50f8eb930fac72a788e7614eb5a85903f7bde6 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='preacher-plugin-example',
version='0.0.0',
py_modules=['custom_matcher'],
entry_points={'preacher': 'matcher = custom_matcher'},
install_requires=['preacher'],
)
| 22.5 | 58 | 0.684444 |
5298d8de30d884783f1af9fd734f6b5da70c10fd | 3,256 | py | Python | demo/demo.py | dt3310321/cos-python-sdk-v5 | dc337d33898012621ac9bb5c0574ee2278af9e27 | [
"MIT"
] | null | null | null | demo/demo.py | dt3310321/cos-python-sdk-v5 | dc337d33898012621ac9bb5c0574ee2278af9e27 | [
"MIT"
] | null | null | null | demo/demo.py | dt3310321/cos-python-sdk-v5 | dc337d33898012621ac9bb5c0574ee2278af9e27 | [
"MIT"
] | null | null | null | # -*- coding=utf-8
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
from qcloud_cos import CosServiceError
from qcloud_cos import CosClientError
import sys
import logging
# 腾讯云COSV5Python SDK, 目前可以支持Python2.6与Python2.7以及Python3.x
# pip安装指南:pip install -U cos-python-sdk-v5
# cos最新可用地域,参照https://www.qcloud.com/document/product/436/6224
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
# 设置用户属性, 包括secret_id, secret_key, region
# appid已在配置中移除,请在参数Bucket中带上appid。Bucket由bucketname-appid组成
secret_id = 'secret_id' # 替换为用户的secret_id
secret_key = 'secret_key' # 替换为用户的secret_key
region = 'ap-beijing' # 替换为用户的region
token = None # 使用临时密钥需要传入Token,默认为空,可不填
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 文件流 简单上传
file_name = 'test.txt'
with open('test.txt', 'rb') as fp:
response = client.put_object(
Bucket='test04-123456789', # Bucket由bucketname-appid组成
Body=fp,
Key=file_name,
StorageClass='STANDARD',
ContentType='text/html; charset=utf-8'
)
print(response['ETag'])
# 字节流 简单上传
response = client.put_object(
Bucket='test04-123456789',
Body=b'abcdefg',
Key=file_name
)
print(response['ETag'])
# 本地路径 简单上传
response = client.put_object_from_local_file(
Bucket='test04-123456789',
LocalFilePath='local.txt',
Key=file_name,
)
print(response['ETag'])
# 设置HTTP头部 简单上传
response = client.put_object(
Bucket='test04-123456789',
Body=b'test',
Key=file_name,
ContentType='text/html; charset=utf-8'
)
print(response['ETag'])
# 设置自定义头部 简单上传
response = client.put_object(
Bucket='test04-123456789',
Body=b'test',
Key=file_name,
Metadata={
'x-cos-meta-key1': 'value1',
'x-cos-meta-key2': 'value2'
}
)
print(response['ETag'])
# 高级上传接口(推荐)
response = client.upload_file(
Bucket='test04-123456789',
LocalFilePath='local.txt',
Key=file_name,
PartSize=10,
MAXThread=10
)
print(response['ETag'])
# 文件下载 获取文件到本地
response = client.get_object(
Bucket='test04-123456789',
Key=file_name,
)
response['Body'].get_stream_to_file('output.txt')
# 文件下载 获取文件流
response = client.get_object(
Bucket='test04-123456789',
Key=file_name,
)
fp = response['Body'].get_raw_stream()
print(fp.read(2))
# 文件下载 设置Response HTTP 头部
response = client.get_object(
Bucket='test04-123456789',
Key=file_name,
ResponseContentType='text/html; charset=utf-8'
)
print(response['Content-Type'])
fp = response['Body'].get_raw_stream()
print(fp.read(2))
# 文件下载 指定下载范围
response = client.get_object(
Bucket='test04-123456789',
Key=file_name,
Range='bytes=0-10'
)
fp = response['Body'].get_raw_stream()
print(fp.read())
# 文件下载 捕获异常
try:
response = client.get_object(
Bucket='test04-123456789',
Key='not_exist.txt',
)
fp = response['Body'].get_raw_stream()
print(fp.read(2))
except CosServiceError as e:
print(e.get_origin_msg())
print(e.get_digest_msg())
print(e.get_status_code())
print(e.get_error_code())
print(e.get_error_msg())
print(e.get_resource_location())
print(e.get_trace_id())
print(e.get_request_id())
| 23.766423 | 98 | 0.696253 |
fcab5286621c19626cb87d56fe316ab567ae04f6 | 13,312 | py | Python | sparse/coo/indexing.py | nimroha/sparse | 975a202fb68452dc3cc4421c18f415bc7d5033e8 | [
"BSD-3-Clause"
] | 2 | 2018-06-12T18:48:48.000Z | 2018-07-01T18:09:33.000Z | sparse/coo/indexing.py | nimroha/sparse | 975a202fb68452dc3cc4421c18f415bc7d5033e8 | [
"BSD-3-Clause"
] | null | null | null | sparse/coo/indexing.py | nimroha/sparse | 975a202fb68452dc3cc4421c18f415bc7d5033e8 | [
"BSD-3-Clause"
] | 2 | 2018-06-11T20:52:16.000Z | 2018-06-12T18:48:59.000Z | from collections import Iterable
from numbers import Integral
import numba
import numpy as np
from ..compatibility import range, zip_longest
from ..slicing import normalize_index
from ..utils import _zero_of_dtype
def getitem(x, index):
"""
This function implements the indexing functionality for COO.
The overall algorithm has three steps:
1. Normalize the index to canonical form. Function: normalize_index
2. Get the mask, which is a list of integers corresponding to
the indices in coords/data for the output data. Function: _mask
3. Transform the coordinates to what they will be in the output.
Parameters
----------
x : COO
The array to apply the indexing operation on.
index : {tuple, str}
The index into the array.
"""
from .core import COO
# If string, this is an index into an np.void
# Custom dtype.
if isinstance(index, str):
data = x.data[index]
idx = np.where(data)
coords = list(x.coords[:, idx[0]])
coords.extend(idx[1:])
return COO(coords, data[idx].flatten(),
shape=x.shape + x.data.dtype[index].shape,
has_duplicates=False,
sorted=True)
# Otherwise, convert into a tuple.
if not isinstance(index, tuple):
index = (index,)
# Check if the last index is an ellipsis.
last_ellipsis = len(index) > 0 and index[-1] is Ellipsis
# Normalize the index into canonical form.
index = normalize_index(index, x.shape)
# zip_longest so things like x[..., None] are picked up.
if len(index) != 0 and all(ind == slice(0, dim, 1) for ind, dim in zip_longest(index, x.shape)):
return x
# Get the mask
mask = _mask(x.coords, index, x.shape)
# Get the length of the mask
if isinstance(mask, slice):
n = len(range(mask.start, mask.stop, mask.step))
else:
n = len(mask)
coords = []
shape = []
i = 0
for ind in index:
# Nothing is added to shape or coords if the index is an integer.
if isinstance(ind, Integral):
i += 1
continue
# Add to the shape and transform the coords in the case of a slice.
elif isinstance(ind, slice):
shape.append(len(range(ind.start, ind.stop, ind.step)))
dt = np.min_scalar_type(min(-(dim - 1) if dim != 0 else -1 for dim in shape))
coords.append((x.coords[i, mask].astype(dt) - ind.start) // ind.step)
i += 1
elif isinstance(ind, Iterable):
raise NotImplementedError('Advanced indexing is not yet supported.')
# Add a dimension for None.
elif ind is None:
coords.append(np.zeros(n))
shape.append(1)
# Join all the transformed coords.
if coords:
coords = np.stack(coords, axis=0)
else:
# If index result is a scalar, return a 0-d COO or
# a scalar depending on whether the last index is an ellipsis.
if last_ellipsis:
coords = np.empty((0, n), dtype=np.uint8)
else:
if n != 0:
return x.data[mask][0]
else:
return _zero_of_dtype(x.dtype)[()]
shape = tuple(shape)
data = x.data[mask]
return COO(coords, data, shape=shape,
has_duplicates=False,
sorted=True)
def _mask(coords, indices, shape):
indices = _prune_indices(indices, shape)
ind_ar = np.empty((len(indices), 3), dtype=np.intp)
for i, idx in enumerate(indices):
if isinstance(idx, slice):
ind_ar[i] = [idx.start, idx.stop, idx.step]
else: # idx is an integer
ind_ar[i] = [idx, idx + 1, 1]
mask, is_slice = _compute_mask(coords, ind_ar)
if is_slice:
return slice(mask[0], mask[1], 1)
else:
return mask
def _prune_indices(indices, shape, prune_none=True):
"""
Gets rid of the indices that do not contribute to the
overall mask, e.g. None and full slices.
Parameters
----------
indices : tuple
The indices to the array.
shape : tuple[int]
The shape of the array.
Returns
-------
indices : tuple
The filtered indices.
Examples
--------
>>> _prune_indices((None, 5), (10,)) # None won't affect the mask
[5]
>>> _prune_indices((slice(0, 10, 1),), (10,)) # Full slices don't affect the mask
[]
"""
if prune_none:
indices = [idx for idx in indices if idx is not None]
i = 0
for idx, l in zip(indices[::-1], shape[::-1]):
if not isinstance(idx, slice):
break
if idx.start == 0 and idx.stop == l and idx.step == 1:
i += 1
continue
if idx.start == l - 1 and idx.stop == -1 and idx.step == -1:
i += 1
continue
break
if i != 0:
indices = indices[:-i]
return indices
@numba.jit(nopython=True)
def _compute_mask(coords, indices): # pragma: no cover
"""
Gets the mask for the coords given the indices in slice format.
Works with either start-stop ranges of matching indices into coords
called "pairs" (start-stop pairs) or filters the mask directly, based
on which is faster.
Exploits the structure in sorted coords, which is that for a constant
value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted.
Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]``
is always sorted. It uses this sortedness to find sub-pairs for each
dimension given the previous, and so on. This is efficient for small
slices or ints, but not for large ones.
After it detects that working with pairs is rather inefficient (or after
going through each possible index), it constructs a filtered mask from the
start-stop pairs.
Parameters
----------
coords : np.ndarray
The coordinates of the array.
indices : np.ndarray
The indices in the form of slices such that indices[:, 0] are starts,
indices[:, 1] are stops and indices[:, 2] are steps.
Returns
-------
mask : np.ndarray
The starts and stops in the mask.
is_slice : bool
Whether or not the array represents a continuous slice.
Examples
--------
Let's create some mock coords and indices
>>> import numpy as np
>>> coords = np.array([[0, 0, 1, 1, 2, 2]])
>>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2)
Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched.
>>> _compute_mask(coords, indices)
(array([0, 1, 4, 5]), False)
Now, let's try with a more "continuous" slice. Matches ``0`` and ``1``.
>>> indices = np.array([[0, 2, 1]])
>>> _compute_mask(coords, indices)
(array([0, 4]), True)
This is equivalent to mask being ``slice(0, 4, 1)``.
"""
# Set the initial mask to be the entire range of coordinates.
starts = [0]
stops = [coords.shape[1]]
n_matches = coords.shape[1]
i = 0
while i < len(indices):
# Guesstimate whether working with pairs is more efficient or
# working with the mask directly.
# One side is the estimate of time taken for binary searches
# (n_searches * log(avg_length))
# The other is an estimated time of a linear filter for the mask.
n_pairs = len(starts)
n_current_slices = _get_slice_len(indices[i]) * n_pairs + 2
if n_current_slices * np.log(n_current_slices / max(n_pairs, 1)) > \
n_matches + n_pairs:
break
# For each of the pairs, search inside the coordinates for other
# matching sub-pairs.
# This gets the start-end coordinates in coords for each 'sub-array'
# Which would come out of indexing a single integer.
starts, stops, n_matches = _get_mask_pairs(starts, stops, coords[i], indices[i])
i += 1
# Combine adjacent pairs
starts, stops = _join_adjacent_pairs(starts, stops)
# If just one pair is left over, treat it as a slice.
if i == len(indices) and len(starts) == 1:
return np.array([starts[0], stops[0]]), True
# Convert start-stop pairs into mask, filtering by remaining
# coordinates.
mask = _filter_pairs(starts, stops, coords[i:], indices[i:])
return np.array(mask, dtype=np.intp), False
@numba.jit(nopython=True)
def _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover
"""
Gets the pairs for a following dimension given the pairs for
a dimension.
For each pair, it searches in the following dimension for
matching coords and returns those.
The total combined length of all pairs is returned to
help with the performance guesstimate.
Parameters
----------
starts_old, stops_old : list[int]
The starts and stops from the previous index.
c : np.ndarray
The coords for this index's dimension.
idx : np.ndarray
The index in the form of a slice.
idx[0], idx[1], idx[2] = start, stop, step
Returns
-------
starts, stops: list
The starts and stops after applying the current index.
n_matches : int
The sum of elements in all ranges.
Examples
--------
>>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2])
>>> starts_old = [4]
>>> stops_old = [8]
>>> idx = np.array([1, 2, 1])
>>> _get_mask_pairs(starts_old, stops_old, c, idx)
([4], [6], 2)
"""
starts = []
stops = []
n_matches = 0
for j in range(len(starts_old)):
# For each matching "integer" in the slice, search within the "sub-coords"
# Using binary search.
for p_match in range(idx[0], idx[1], idx[2]):
start = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j]
stop = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match + 1) + starts_old[j]
if start != stop:
starts.append(start)
stops.append(stop)
n_matches += stop - start
return starts, stops, n_matches
@numba.jit(nopython=True)
def _get_slice_len(idx): # pragma: no cover
"""
Get the number of elements in a slice.
Parameters
----------
idx : np.ndarray
A (3,) shaped array containing start, stop, step
Returns
-------
n : int
The length of the slice.
Examples
--------
>>> idx = np.array([5, 15, 5])
>>> _get_slice_len(idx)
2
"""
start, stop, step = idx[0], idx[1], idx[2]
if step > 0:
return (stop - start + step - 1) // step
else:
return (start - stop - step - 1) // (-step)
@numba.jit(nopython=True)
def _filter_pairs(starts, stops, coords, indices): # pragma: no cover
"""
Converts all the pairs into a single integer mask, additionally filtering
by the indices.
Parameters
----------
starts, stops : list[int]
The starts and stops to convert into an array.
coords : np.ndarray
The coordinates to filter by.
indices : np.ndarray
The indices in the form of slices such that indices[:, 0] are starts,
indices[:, 1] are stops and indices[:, 2] are steps.
Returns
-------
mask : list
The output integer mask.
Examples
--------
>>> import numpy as np
>>> starts = [2]
>>> stops = [7]
>>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]])
>>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs
>>> _filter_pairs(starts, stops, coords, indices)
[2, 4, 6]
"""
mask = []
# For each pair,
for i in range(len(starts)):
# For each element match within the pair range
for j in range(starts[i], stops[i]):
match = True
# Check if it matches all indices
for k in range(len(indices)):
idx = indices[k]
elem = coords[k, j]
match &= ((elem - idx[0]) % idx[2] == 0 and
((idx[2] > 0 and idx[0] <= elem < idx[1])
or (idx[2] < 0 and idx[0] >= elem > idx[1])))
# and append to the mask if so.
if match:
mask.append(j)
return mask
@numba.jit(nopython=True)
def _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover
"""
Joins adjacent pairs into one. For example, 2-5 and 5-7
will reduce to 2-7 (a single pair). This may help in
returning a slice in the end which could be faster.
Parameters
----------
starts_old, stops_old : list[int]
The input starts and stops
Returns
-------
starts, stops : list[int]
The reduced starts and stops.
Examples
--------
>>> starts = [2, 5]
>>> stops = [5, 7]
>>> _join_adjacent_pairs(starts, stops)
([2], [7])
"""
if len(starts_old) <= 1:
return starts_old, stops_old
starts = [starts_old[0]]
stops = []
for i in range(1, len(starts_old)):
if starts_old[i] != stops_old[i - 1]:
starts.append(starts_old[i])
stops.append(stops_old[i - 1])
stops.append(stops_old[-1])
return starts, stops
| 29.51663 | 100 | 0.583459 |
0aba7fec95a40317fb1568ee7b30d677fe49d2b7 | 12,061 | py | Python | fairness_teaching/in_progress/rl_a2c/train.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | fairness_teaching/in_progress/rl_a2c/train.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | fairness_teaching/in_progress/rl_a2c/train.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import numpy as np
import tensorflow as tf
import data
import model
# pylint: skip-file
# from data import *
# from architecture import *
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
parser.add_argument('--real_path', default='/localscratch/wliu328/att/resize128')
# parser.add_argument('--real_path', default='/localscratch/wliu328/att/big/data')
parser.add_argument('--fake_path', default='/localscratch/wliu328/att/output/AttGAN_128/samples_training_2')
parser.add_argument('--train_label', default='/localscratch/wliu328/att/annotations/train_label.txt')
parser.add_argument('--test_label', default='/localscratch/wliu328/att/annotations/test_label.txt')
parser.add_argument('--valid_label', default='/localscratch/wliu328/att/annotations/val_label.txt')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--n_episode', type=int, default=500, help='Epoch to run [default: 50]')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size during training [default: 64]')
parser.add_argument('--n_class', type=int, default=2, help='Number of class [default: 2]')
parser.add_argument('--n_action', type=int, default=2, help='Number of action [default: 2]')
parser.add_argument('--lr', type=float, default=0.1, help='Initial learning rate [default: 0.1]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='momentum', help='adam or momentum [default: momentum]')
parser.add_argument('--random_seed', type=int, default=100,help='random seed')
FLAGS = parser.parse_args()
##################### config #####################
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu
tf.set_random_seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
REAL_PATH = FLAGS.real_path
FAKE_PATH = FLAGS.fake_path
TRAIN_LABEL = FLAGS.train_label
TEST_LABEL = FLAGS.test_label
VALID_LABEL = FLAGS.valid_label
BATCH_SIZE = FLAGS.batch_size
N_EPISODE = FLAGS.n_episode
N_CLASS = FLAGS.n_class
N_ACTION = FLAGS.n_action
LR = FLAGS.lr
MOMENTUM = FLAGS.momentum
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
LOG_PATH = os.path.join(ROOT_PATH, FLAGS.log_dir)
if not os.path.exists(LOG_PATH): os.mkdir(LOG_PATH)
acc_count = 0
while True:
if os.path.exists(os.path.join(LOG_PATH, 'log_%02d.txt' % acc_count)): acc_count += 1
else: break
LOG_FNAME = 'log_%02d.txt' % acc_count
LOG_FOUT = open(os.path.join(LOG_PATH, LOG_FNAME), 'w')
(train_images, train_labels, train_att), train_iters = data.data_train(REAL_PATH, TRAIN_LABEL, BATCH_SIZE)
(fake_images, fake_labels, fake_att), fake_iters = data.data_train(FAKE_PATH, TRAIN_LABEL, BATCH_SIZE)
(valid_images, valid_labels, valid_att), valid_iters = data.data_test(REAL_PATH, VALID_LABEL, BATCH_SIZE*10)
(test_images, test_labels, test_att), test_iters = data.data_test(REAL_PATH, TEST_LABEL, BATCH_SIZE)
####################################################
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def choose_action(prob_actions):
actions = []
for i in range(prob_actions.shape[0]):
action = np.random.choice(range(prob_actions.shape[1]), p=prob_actions[i])
actions.append(action)
return np.array(actions)
def vgg_graph(sess, phs):
VGG = model.VGG()
Y_score = VGG.build(phs['batch_images'], N_CLASS, phs['is_training_ph'])
Y_hat = tf.nn.softmax(Y_score)
Y_pred = tf.argmax(Y_hat, 1)
Y_label = tf.to_float(tf.one_hot(phs['batch_labels'], N_CLASS))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = Y_score, labels = Y_label)
loss_op = tf.reduce_mean(cross_entropy)
correct_prediction = tf.equal(tf.argmax(Y_hat, 1), tf.argmax(Y_label, 1))
acc_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
update_op = tf.train.MomentumOptimizer(LR, MOMENTUM).minimize(loss_op, var_list=VGG.vars)
return loss_op, acc_op, cross_entropy, Y_hat, update_op, Y_pred, VGG.vars
def rl_graph(sess, phrl):
Actor = model.Actor()
Y_score = Actor.build(phrl['states_rl'], N_ACTION, phrl['is_training_rl'])
Y_prob =tf.nn.softmax(Y_score)
entropy = tf.reduce_sum(tf.reduce_mean(Y_prob)*tf.math.log(Y_prob))
neg_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = Y_score, labels = phrl['actions_rl'])
loss_op = tf.reduce_mean(neg_log_prob*phrl['values_rl'])
reg_loss = tf.reduce_sum(Actor.reg_loss)
loss_op += reg_loss
loss_op += 1e-3 * entropy
# update_op = tf.train.MomentumOptimizer(LR, MOMENTUM).minimize(loss_op, var_list=Actor.vars)
update_op = tf.train.AdamOptimizer(1e-4).minimize(loss_op, var_list=Actor.vars)
return loss_op, Y_prob, update_op, Actor.vars
def c_graph(sess, phc):
Critic = model.Critic()
Y_value = Critic.build(phc['states_c'], phc['is_training_c'])
loss_op = tf.reduce_mean(tf.square(Y_value-phc['values_c']))
reg_loss = tf.reduce_sum(Critic.reg_loss)
loss_op += reg_loss
# update_op = tf.train.MomentumOptimizer(LR, MOMENTUM).minimize(loss_op, var_list=Critic.vars)
update_op = tf.train.AdamOptimizer(1e-3).minimize(loss_op, var_list=Critic.vars)
return loss_op, Y_value, update_op, Critic.vars
def train():
batch_images = tf.placeholder(tf.float32,[None,128,128,3])
batch_labels = tf.placeholder(tf.int32,[None,])
is_training_ph = tf.placeholder(tf.bool)
lr_ph = tf.placeholder(tf.float32)
states_rl = tf.placeholder(tf.float32,[None,2])
actions_rl = tf.placeholder(tf.int32,[None,])
values_rl = tf.placeholder(tf.float32,[None,])
is_training_rl = tf.placeholder(tf.bool)
lr_rl = tf.placeholder(tf.float32)
states_c = tf.placeholder(tf.float32,[None,7])
values_c = tf.placeholder(tf.float32,[None,])
is_training_c = tf.placeholder(tf.bool)
lr_c= tf.placeholder(tf.float32)
phs = {'batch_images': batch_images,
'batch_labels': batch_labels,
'is_training_ph': is_training_ph,
'lr_ph': lr_ph}
phrl = {'states_rl': states_rl,
'actions_rl': actions_rl,
'values_rl': values_rl,
'is_training_rl': is_training_rl,
'lr_rl': lr_rl}
phc = {'states_c': states_c,
'values_c': values_c,
'is_training_c': is_training_c,
'lr_c': lr_c}
with tf.Session() as sess:
vgg_loss, vgg_acc, vgg_ce, vgg_prob, vgg_update, vgg_pred, vgg_vars = vgg_graph(sess, phs)
rl_loss, rl_prob, rl_update, rl_vars = rl_graph(sess, phrl)
c_loss, c_value, c_update, c_vars = c_graph(sess, phc)
vgg_init = tf.variables_initializer(var_list=vgg_vars)
saver = tf.train.Saver(vgg_vars)
all_saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess.run(init)
# for epoch in range(4):
# for t in range(train_iters):
# if t % 50==0: print("pretrain:", t)
# tr_images, tr_labels = sess.run([train_images,train_labels])
# pre_dict = {phs['batch_images']: tr_images,
# phs['batch_labels']: tr_labels,
# phs['is_training_ph']: True}
# sess.run(vgg_update, feed_dict=pre_dict)
# saver.save(sess,LOG_PATH+'/vgg.ckpt')
# valid_acc = 0.0
# y_pred =[]
# y_label = []
# y_att = []
# for k in range(valid_iters):
# va_images, va_labels, va_att = sess.run([valid_images, valid_labels, valid_att])
# valid_dict = {phs['batch_images']: va_images,
# phs['batch_labels']: va_labels,
# phs['is_training_ph']: False}
# batch_acc, batch_pred = sess.run([vgg_acc,vgg_pred], feed_dict=valid_dict)
# valid_acc += batch_acc
# y_pred += batch_pred.tolist()
# y_label += va_labels.tolist()
# y_att += va_att.tolist()
# valid_acc = valid_acc / float(valid_iters)
# valid_eo = data.cal_eo(y_att, y_label, y_pred)
# log_string('====pretrain: valid_acc=%.4f, valid_eo=%.4f' % (valid_acc, valid_eo[-1]))
# print(valid_eo)
va_images, va_labels, va_att = sess.run([valid_images, valid_labels, valid_att])
for i in range(N_EPISODE):
sess.run(vgg_init)
# saver.restore(sess,LOG_PATH+'/vgg.ckpt')
train_loss = []
for j in range(train_iters*20):
tr_images, tr_labels, tr_att = sess.run([train_images,train_labels, train_att])
fa_images, fa_labels, fa_att = sess.run([fake_images,fake_labels, fake_att])
train_dict = {phs['batch_images']: tr_images,
phs['batch_labels']: tr_labels,
phs['is_training_ph']: False}
ce, acc, prob, pred = sess.run([vgg_ce, vgg_acc, vgg_prob, vgg_pred], feed_dict=train_dict)
ce = np.clip(ce, 0, 10)/10.0
train_loss.append(np.mean(ce))
model_stat = list(data.cal_eo(tr_att, tr_labels, pred)) #shape [5,]
model_stat.append(np.mean(ce))
model_stat.append(j/(train_iters*20))
# model_stat.append(np.mean(train_loss))
c_state = np.array(model_stat)[np.newaxis,:]
# model_stat = np.tile(model_stat,(BATCH_SIZE,1))
state = np.concatenate((tr_labels[:, np.newaxis], tr_att[:, np.newaxis]), axis=1)
rl_dict = {phrl['states_rl']: state,
phrl['is_training_rl']: False}
action = choose_action(sess.run(rl_prob, feed_dict=rl_dict))
c_dict = {phc['states_c']: c_state,
phc['is_training_c']: False}
base = sess.run(c_value, feed_dict=c_dict)
bool_train = list(map(bool,action))
bool_fake = list(map(bool,1-action))
co_images = np.concatenate((tr_images[bool_train],fa_images[bool_fake]),axis=0)
co_labels = np.concatenate((tr_labels[bool_train],fa_labels[bool_fake]),axis=0)
update_dict = {phs['batch_images']: co_images,
phs['batch_labels']: co_labels,
phs['is_training_ph']: True}
_, ce, acc = sess.run([vgg_update, vgg_ce, vgg_acc], feed_dict=update_dict)
valid_dict = {phs['batch_images']: va_images,
phs['batch_labels']: va_labels,
phs['is_training_ph']: False}
valid_acc, y_pred = sess.run([vgg_acc,vgg_pred], feed_dict=valid_dict)
valid_eo = data.cal_eo(va_att, va_labels, y_pred)
if valid_eo[-1]<=0.05:
value = -2
else:
value = -np.log(valid_eo[-1])
reward = value-base[0]
c_dict = {phc['states_c']: c_state,
phc['values_c']: [value],
phc['is_training_c']: True}
_, cri_loss = sess.run([c_update, c_loss], feed_dict=c_dict)
final_reward = np.repeat(reward, BATCH_SIZE)
learn_dict = {phrl['states_rl']: state,
phrl['actions_rl']: action,
phrl['values_rl']: final_reward,
phrl['is_training_rl']: True}
sess.run(rl_update, feed_dict=learn_dict)
if j % 10 == 0:
log_string('====epoch_%d====iter_%d: student_loss=%.4f, train_acc=%.4f' % (i, j, np.mean(ce), acc))
log_string('===============: critic_loss=%.4f, reward=%.4f, valid_acc=%.4f, valid_eo=%.4f' % (cri_loss, reward, valid_acc, valid_eo[-1]))
print('eo: ',valid_eo[0],valid_eo[1])
print('eo: ',valid_eo[2],valid_eo[3])
print(action, np.sum(action))
all_saver.save(sess,LOG_PATH+'/all.ckpt')
# """
if __name__ == "__main__":
train()
LOG_FOUT.close()
| 40.337793 | 147 | 0.679546 |
216cf4dadaacac7ace779cfdaa6ed01560cf44c8 | 2,769 | py | Python | test/integration/rmq_2_sysmon/run_program.py | mjpernot/rmq-sysmon | 728f696a451b6c870feb22e53be09bfcd7ed3c77 | [
"MIT"
] | null | null | null | test/integration/rmq_2_sysmon/run_program.py | mjpernot/rmq-sysmon | 728f696a451b6c870feb22e53be09bfcd7ed3c77 | [
"MIT"
] | null | null | null | test/integration/rmq_2_sysmon/run_program.py | mjpernot/rmq-sysmon | 728f696a451b6c870feb22e53be09bfcd7ed3c77 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: run_program.py
Description: Integration testing of run_program in rmq_2_sysmon.py.
Usage:
test/integration/rmq_2_sysmon/run_program.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import rmq_2_sysmon
import rmq_cleanup
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_run_program
tearDown
"""
def setUp(self):
"""Function: setUp
Description: Initialization for integration testing.
Arguments:
"""
self.base_dir = "test/integration/rmq_2_sysmon"
self.test_path = os.path.join(os.getcwd(), self.base_dir)
self.config_path = os.path.join(self.test_path, "config")
self.cfg = gen_libs.load_module("rabbitmq", self.config_path)
log_path = os.path.join(self.test_path, self.cfg.log_dir)
self.cfg.log_file = os.path.join(log_path, self.cfg.log_file)
self.cfg.message_dir = os.path.join(self.test_path,
self.cfg.message_dir)
self.cfg.queue_list[0]["directory"] = os.path.join(
self.test_path, self.cfg.queue_list[0]["directory"])
self.connect_true = "Connected to RabbitMQ node"
self.args_array = {"-M": True, "-c": "rabbitmq", "-d": "config"}
self.func_dict = {"-M": rmq_2_sysmon.monitor_queue}
@mock.patch("rmq_2_sysmon.gen_libs.get_base_dir")
@mock.patch("rmq_2_sysmon.gen_libs.load_module")
@mock.patch("rmq_2_sysmon.rabbitmq_class.RabbitMQCon.consume")
def test_run_program(self, mock_consume, mock_cfg, mock_base):
"""Function: test_run_program
Description: Test of test_run_program function.
Arguments:
"""
mock_consume.return_value = "RabbitMQ_Tag"
mock_cfg.return_value = self.cfg
mock_base.return_value = self.test_path
rmq_2_sysmon.run_program(self.args_array, self.func_dict)
self.assertTrue(self.connect_true in open(self.cfg.log_file).read())
def tearDown(self):
"""Function: tearDown
Description: Clean up of integration testing.
Arguments:
"""
os.remove(self.cfg.log_file)
rmq_cleanup.rmq_cleanup(self.cfg, self.cfg.queue_list[0]["queue"],
True)
if __name__ == "__main__":
unittest.main()
| 24.504425 | 76 | 0.649693 |
712974cb3d989fe2a43b804a0724c95593cb0022 | 671 | py | Python | tests/test_hash.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 2,382 | 2015-01-04T03:16:59.000Z | 2021-12-10T15:48:56.000Z | tests/test_hash.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 1,009 | 2015-01-03T23:44:02.000Z | 2021-12-10T16:02:42.000Z | tests/test_hash.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 467 | 2015-01-19T23:18:33.000Z | 2021-12-09T18:31:28.000Z | from shapely.geometry import Point, MultiPoint, Polygon, GeometryCollection
def test_point():
g = Point(0, 0)
try:
assert hash(g)
return False
except TypeError:
return True
def test_multipoint():
g = MultiPoint([(0, 0)])
try:
assert hash(g)
return False
except TypeError:
return True
def test_polygon():
g = Point(0, 0).buffer(1.0)
try:
assert hash(g)
return False
except TypeError:
return True
def test_collection():
g = GeometryCollection([Point(0, 0)])
try:
assert hash(g)
return False
except TypeError:
return True
| 17.657895 | 75 | 0.584203 |
27e26c0b26c7f50eca8a8e8c144a7dbfd9fb0afb | 9,482 | py | Python | tests/test_modeling_tf_electra.py | malteos/transformers | cafa6a9e29f3e99c67a1028f8ca779d439bc0689 | [
"Apache-2.0"
] | 23 | 2020-10-26T11:10:30.000Z | 2022-03-21T10:18:08.000Z | tests/test_modeling_tf_electra.py | malteos/transformers | cafa6a9e29f3e99c67a1028f8ca779d439bc0689 | [
"Apache-2.0"
] | 2 | 2020-10-29T07:59:57.000Z | 2021-09-08T14:49:44.000Z | tests/test_modeling_tf_electra.py | malteos/transformers | cafa6a9e29f3e99c67a1028f8ca779d439bc0689 | [
"Apache-2.0"
] | 8 | 2020-12-31T03:30:57.000Z | 2022-03-21T08:12:54.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import ElectraConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import require_tf, slow
if is_tf_available():
from transformers.modeling_tf_electra import (
TFElectraModel,
TFElectraForMaskedLM,
TFElectraForPreTraining,
TFElectraForTokenClassification,
)
@require_tf
class TFElectraModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFElectraModel, TFElectraForMaskedLM, TFElectraForPreTraining, TFElectraForTokenClassification,)
if is_tf_available()
else ()
)
class TFElectraModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = ElectraConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_electra_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFElectraModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
(sequence_output,) = model(inputs)
inputs = [input_ids, input_mask]
(sequence_output,) = model(inputs)
(sequence_output,) = model(input_ids)
result = {
"sequence_output": sequence_output.numpy(),
}
self.parent.assertListEqual(
list(result["sequence_output"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_electra_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFElectraForMaskedLM(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
(prediction_scores,) = model(inputs)
result = {
"prediction_scores": prediction_scores.numpy(),
}
self.parent.assertListEqual(
list(result["prediction_scores"].shape), [self.batch_size, self.seq_length, self.vocab_size]
)
def create_and_check_electra_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFElectraForPreTraining(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
(prediction_scores,) = model(inputs)
result = {
"prediction_scores": prediction_scores.numpy(),
}
self.parent.assertListEqual(list(result["prediction_scores"].shape), [self.batch_size, self.seq_length])
def create_and_check_electra_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFElectraForTokenClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
(logits,) = model(inputs)
result = {
"logits": logits.numpy(),
}
self.parent.assertListEqual(
list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels]
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def setUp(self):
self.model_tester = TFElectraModelTest.TFElectraModelTester(self)
self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_electra_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_masked_lm(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_pretraining(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
# for model_name in list(TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in ["electra-small-discriminator"]:
model = TFElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| 41.587719 | 116 | 0.656507 |
9d30c3d8729e194656a32992221dbee31ce855fa | 1,267 | py | Python | tools/pylightnet/pylightnet/context.py | SiyuanMa0316/LightNet | 931479d28695f3e871ba5eeae60a8ef4d84de72d | [
"MIT"
] | null | null | null | tools/pylightnet/pylightnet/context.py | SiyuanMa0316/LightNet | 931479d28695f3e871ba5eeae60a8ef4d84de72d | [
"MIT"
] | 1 | 2019-03-08T13:18:36.000Z | 2019-03-08T13:18:36.000Z | tools/pylightnet/pylightnet/context.py | SiyuanMa0316/LightNet | 931479d28695f3e871ba5eeae60a8ef4d84de72d | [
"MIT"
] | null | null | null | from ctypes import *
import lib
def create():
lib.libln.ln_context_create.restype = c_void_p
return lib.libln.ln_context_create()
def init(ctx, source):
lib.libln.ln_context_init(ctx, source)
def cleanup(ctx):
lib.libln.ln_context_cleanup(ctx)
def free(ctx):
lib.libln.ln_context_free(ctx)
def compile(ctx, target):
lib.libln.ln_context_compile(ctx, target)
def Print(ctx, outfile):
lib.libln.ln_context_print(ctx, outfile)
def load(ctx, datafile):
lib.libln.ln_context_load(ctx, datafile)
def set_data(ctx, tname, data):
lib.libln.ln_context_set_data(ctx, tname, data)
def get_data(ctx, tname, data):
lib.libln.ln_context_get_data.restype = c_void_p
return lib.libln.ln_context_get_data(ctx, tname, data)
def data_size(ctx, tname):
lib.libln.ln_context_data_size.restype = c_size_t
return lib.libln.ln_context_data_size(ctx, tname)
def set_param(ctx, opname, pname, *args):
if len(args) == 1:
lib.libln.ln_context_set_param(ctx, opname, pname, args[0])
elif len(args == 2):
lib.libln.ln_context_set_param(ctx, opname, pname, args[0], args[1])
else:
assert False;
def run(ctx):
lib.libln.ln_context_run(ctx)
def unload(ctx):
lib.libln.ln_context_unload(ctx)
| 25.34 | 76 | 0.715075 |
991bae49a484ef06027e4de9d26f0335ac4df207 | 9,959 | py | Python | contrib/spendfrom/spendfrom.py | mxdum/Mxdum | 813de58604a5dc0936e61c440af8b768cb35f055 | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | mxdum/Mxdum | 813de58604a5dc0936e61c440af8b768cb35f055 | [
"MIT"
] | 1 | 2019-02-23T21:39:25.000Z | 2019-02-23T21:39:25.000Z | contrib/spendfrom/spendfrom.py | mxdum/Mxdum | 813de58604a5dc0936e61c440af8b768cb35f055 | [
"MIT"
] | 1 | 2019-02-23T21:34:31.000Z | 2019-02-23T21:34:31.000Z | #!/usr/bin/env python
#
# Use the raw transactions API to spend MDMs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a mxdumd or mxdum-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the mxdum data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Mxdum/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Mxdum")
return os.path.expanduser("~/.mxdum")
def read_bitcoin_config(dbdir):
"""Read the mxdum.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "mxdum.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a mxdum JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 16393 if testnet else 6393
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the mxdumd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(mxdumd):
info = mxdumd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
mxdumd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = mxdumd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(mxdumd):
address_summary = dict()
address_to_account = dict()
for info in mxdumd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = mxdumd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = mxdumd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-mxdum-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(mxdumd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(mxdumd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to mxdumd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = mxdumd.createrawtransaction(inputs, outputs)
signed_rawtx = mxdumd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(mxdumd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = mxdumd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(mxdumd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = mxdumd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(mxdumd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get MDMs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send MDMs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of mxdum.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
mxdumd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(mxdumd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(mxdumd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(mxdumd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(mxdumd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = mxdumd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.160448 | 111 | 0.629782 |
c4a9e0870c2568093f0a2209f5ffc0b78ef03ef6 | 5,080 | py | Python | wb/main/utils/dev_cloud_http_service.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 23 | 2022-03-17T12:24:09.000Z | 2022-03-31T09:13:30.000Z | wb/main/utils/dev_cloud_http_service.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 18 | 2022-03-21T08:17:44.000Z | 2022-03-30T12:42:30.000Z | wb/main/utils/dev_cloud_http_service.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 16 | 2022-03-17T12:24:14.000Z | 2022-03-31T12:15:12.000Z | """
OpenVINO DL Workbench
Class for working with DevCloud Service HTTP API
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import enum
import requests
from typing_extensions import TypedDict
from config.constants import CLOUD_SERVICE_HOST, CLOUD_SERVICE_PORT, REQUEST_TIMEOUT_SECONDS, CLOUD_SERVICE_API_PREFIX
from wb.error.dev_cloud_errors import (DevCloudNotRunningError, DevCloudHandshakeHTTPError, DevCloudDevicesHTTPError,
DevCloudRemoteJobHTTPError)
class DevCloudApiEndpointsEnum(enum.Enum):
sync = 'sync'
devices = 'devices'
remote_job = 'remote-job' # old way with sharing artifacts via HTTP
remote_job_trigger = 'remote-job/trigger' # old way with sharing artifacts via shared folder
class HandshakePayload(TypedDict):
wbURL: str
class HandshakeResponse(TypedDict):
dcUser: str
dcFileSystemPrefix: str
class TriggerRemoteJobPayload(TypedDict):
platformTag: str
wbPipelineId: int
remoteJobType: str
class TriggerSharedFolderRemoteJobPayload(TriggerRemoteJobPayload):
wbSetupBundlePath: str
wbJobBundlePath: str
class TriggerNetworkRemotePipelinePayload(TriggerRemoteJobPayload):
wbSetupBundleId: int
wbJobBundleId: int
class RemoteJobStatusResponse(TypedDict):
wbPipelineId: int
status: str
class DevCloudHttpService:
_api_url = f'{CLOUD_SERVICE_HOST}:{CLOUD_SERVICE_PORT}/{CLOUD_SERVICE_API_PREFIX}'
@staticmethod
def is_url_set() -> bool:
return CLOUD_SERVICE_HOST and CLOUD_SERVICE_PORT
@staticmethod
def start_handshake(payload: HandshakePayload) -> HandshakeResponse:
url = f'{DevCloudHttpService._api_url}/{DevCloudApiEndpointsEnum.sync.value}'
try:
response = requests.post(url=url, json=payload, timeout=REQUEST_TIMEOUT_SECONDS)
except requests.exceptions.ConnectionError:
raise DevCloudNotRunningError('DevCloud service is not running')
if response.status_code != requests.codes['ok']:
raise DevCloudHandshakeHTTPError('Handshake with DevCloud failed', response=response)
return response.json()
@staticmethod
def get_devices() -> dict:
url = f'{DevCloudHttpService._api_url}/{DevCloudApiEndpointsEnum.devices.value}'
try:
response = requests.get(url=url, timeout=REQUEST_TIMEOUT_SECONDS)
except requests.exceptions.ConnectionError:
raise DevCloudNotRunningError('DevCloud service is not running')
if response.status_code != requests.codes['ok']:
raise DevCloudDevicesHTTPError('Unable to fetch DevCloud devices', response=response)
return response.json()
@staticmethod
def trigger_network_remote_pipeline(payload: TriggerRemoteJobPayload) -> dict:
url = f'{DevCloudHttpService._api_url}/{DevCloudApiEndpointsEnum.remote_job.value}'
return DevCloudHttpService._trigger_remote_job(url, payload)
@staticmethod
def trigger_shared_folder_remote_pipeline(payload: TriggerRemoteJobPayload) -> dict:
url = f'{DevCloudHttpService._api_url}/{DevCloudApiEndpointsEnum.remote_job_trigger.value}'
return DevCloudHttpService._trigger_remote_job(url, payload)
@staticmethod
def _trigger_remote_job(url: str, payload: TriggerRemoteJobPayload):
response = requests.post(url=url, json=payload, timeout=REQUEST_TIMEOUT_SECONDS)
if response.status_code != requests.codes['ok']:
raise DevCloudRemoteJobHTTPError('Unable to trigger DevCloud remote job', response=response)
return response.json()
@staticmethod
def get_remote_job_status(wb_pipeline_id: int) -> RemoteJobStatusResponse:
url = f'{DevCloudHttpService._api_url}/{DevCloudApiEndpointsEnum.remote_job.value}/{wb_pipeline_id}'
response = requests.get(url=url, timeout=REQUEST_TIMEOUT_SECONDS)
if response.status_code != requests.codes['ok']:
raise DevCloudRemoteJobHTTPError('Unable to get DevCloud remote job status', response=response)
return response.json()
@staticmethod
def cancel_remote_job(wb_pipeline_id: int) -> RemoteJobStatusResponse:
url = f'{DevCloudHttpService._api_url}/{DevCloudApiEndpointsEnum.remote_job.value}/{wb_pipeline_id}'
response = requests.delete(url=url, timeout=REQUEST_TIMEOUT_SECONDS)
if response.status_code != requests.codes['ok']:
raise DevCloudRemoteJobHTTPError('Unable to cancel DevCloud remote job', response=response)
return response.json()
| 40.64 | 118 | 0.745866 |
96b71b7c8a6df14b5b3bb53c8506f7443088a7bc | 3,936 | py | Python | envoy/tests/test_envoy.py | jstoja/integrations-core | 704fa31f7db642c6df9e0969b3b2778fc6144fa7 | [
"BSD-3-Clause"
] | null | null | null | envoy/tests/test_envoy.py | jstoja/integrations-core | 704fa31f7db642c6df9e0969b3b2778fc6144fa7 | [
"BSD-3-Clause"
] | null | null | null | envoy/tests/test_envoy.py | jstoja/integrations-core | 704fa31f7db642c6df9e0969b3b2778fc6144fa7 | [
"BSD-3-Clause"
] | 1 | 2019-12-23T13:35:17.000Z | 2019-12-23T13:35:17.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from copy import deepcopy
import mock
import pytest
from datadog_checks.envoy import Envoy
from datadog_checks.envoy.metrics import METRIC_PREFIX, METRICS
from .common import HOST, INSTANCES, response
CHECK_NAME = 'envoy'
@pytest.mark.usefixtures('dd_environment')
def test_success(aggregator):
instance = INSTANCES['main']
c = Envoy(CHECK_NAME, {}, [instance])
c.check(instance)
metrics_collected = 0
for metric in METRICS:
metrics_collected += len(aggregator.metrics(METRIC_PREFIX + metric))
assert metrics_collected >= 250
def test_success_fixture(aggregator):
instance = INSTANCES['main']
c = Envoy(CHECK_NAME, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
metrics_collected = 0
for metric in METRICS:
metrics_collected += len(aggregator.metrics(METRIC_PREFIX + metric))
num_metrics = len(response('multiple_services').content.decode().splitlines())
num_metrics -= sum(c.unknown_metrics.values()) + sum(c.unknown_tags.values())
assert 4215 <= metrics_collected == num_metrics
def test_success_fixture_whitelist(aggregator):
instance = INSTANCES['whitelist']
c = Envoy(CHECK_NAME, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
for metric in aggregator.metric_names:
assert metric.startswith('envoy.cluster.')
def test_success_fixture_blacklist(aggregator):
instance = INSTANCES['blacklist']
c = Envoy(CHECK_NAME, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
for metric in aggregator.metric_names:
assert not metric.startswith('envoy.cluster.')
def test_success_fixture_whitelist_blacklist(aggregator):
instance = INSTANCES['whitelist_blacklist']
c = Envoy(CHECK_NAME, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
for metric in aggregator.metric_names:
assert metric.startswith("envoy.cluster.") and not metric.startswith("envoy.cluster.out")
def test_service_check(aggregator):
instance = INSTANCES['main']
c = Envoy(CHECK_NAME, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
assert aggregator.service_checks(Envoy.SERVICE_CHECK_NAME)[0].status == Envoy.OK
def test_unknown():
instance = INSTANCES['main']
c = Envoy(CHECK_NAME, {}, [instance])
with mock.patch('requests.get', return_value=response('unknown_metrics')):
c.check(instance)
assert sum(c.unknown_metrics.values()) == 5
@pytest.mark.parametrize(
'test_case, extra_config, expected_http_kwargs',
[
("new auth config", {'username': 'new_foo', 'password': 'new_bar'}, {'auth': ('new_foo', 'new_bar')}),
("legacy ssl config True", {'verify_ssl': True}, {'verify': True}),
("legacy ssl config False", {'verify_ssl': False}, {'verify': False}),
("legacy ssl config unset", {}, {'verify': True}),
],
)
def test_config(test_case, extra_config, expected_http_kwargs):
instance = deepcopy(INSTANCES['main'])
instance.update(extra_config)
check = Envoy(CHECK_NAME, {}, instances=[instance])
with mock.patch('datadog_checks.base.utils.http.requests') as r:
r.get.return_value = mock.MagicMock(status_code=200)
check.check(instance)
http_wargs = dict(
auth=mock.ANY, cert=mock.ANY, headers=mock.ANY, proxies=mock.ANY, timeout=mock.ANY, verify=mock.ANY
)
http_wargs.update(expected_http_kwargs)
r.get.assert_called_with('http://{}:8001/stats'.format(HOST), **http_wargs)
| 32 | 111 | 0.694868 |
43744659fe23074da5bcbc98c10be8bee3cf36b4 | 2,526 | py | Python | tests/test_physt_histogram.py | artemis-analytics/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 4 | 2020-02-29T15:02:05.000Z | 2021-05-13T18:50:58.000Z | tests/test_physt_histogram.py | artemis-analytics/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 25 | 2020-02-25T19:29:21.000Z | 2020-04-03T15:06:59.000Z | tests/test_physt_histogram.py | ryanmwhitephd/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 2 | 2021-08-12T09:40:51.000Z | 2021-08-12T09:42:09.000Z | # Copyright © Her Majesty the Queen in Right of Canada, as represented
# by the Minister of Statistics Canada, 2019.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module includes code from the Physt Project
#
# (C) Jan Pipek, 2016-9, MIT licence
# See https://github.com/janpipek/physt
import sys
import os
sys.path = [os.path.join(os.path.dirname(__file__), "..")] + sys.path
# from physt.histogram1d import Histogram1D
from artemis.externals.physt import histogram
import numpy as np
import unittest
class TestNumpyBins(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_nbin(self):
arr = np.random.rand(100)
hist = histogram(arr, bins=15)
assert hist.bin_count == 15
assert np.isclose(hist.bin_right_edges[-1], arr.max())
assert np.isclose(hist.bin_left_edges[0], arr.min())
def test_edges(self):
arr = np.arange(0, 1, 0.01)
hist = histogram(arr, np.arange(0.1, 0.8001, 0.1))
assert np.allclose(hist.numpy_bins, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
assert hist.underflow == 10
assert hist.overflow == 19
def test_range(self):
arr = np.arange(0, 1.00, 0.01)
hist = histogram(arr, 10, range=(0.5, 1.0))
assert hist.bin_count == 10
assert hist.bin_left_edges[0] == 0.5
assert hist.bin_right_edges[-1] == 1.0
assert hist.overflow == 0
assert hist.underflow == 50
assert hist.total == 50
hist = histogram(arr, bins=10, range=(0.5, 1.0), keep_missed=False)
assert hist.total == 50
assert np.isnan(hist.underflow)
assert np.isnan(hist.overflow)
def test_metadata(self):
arr = np.arange(0, 1.00, 0.01)
hist = histogram(arr, name="name", title="title", axis_name="axis_name")
assert hist.name == "name"
assert hist.title == "title"
assert hist.axis_names == ("axis_name",)
if __name__ == "__main__":
unittest.main()
| 33.236842 | 85 | 0.651623 |
d6f8119b02f3f14f7bb4809e6183d0fa67ff8096 | 57,753 | py | Python | Python-3.5.10/Lib/test/test_email/test_headerregistry.py | AtriCZE23/POe-full | 89be2fda5747e44764a62ba5e358d8c9309fbf0a | [
"MIT",
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 6 | 2018-02-23T08:52:04.000Z | 2021-08-19T12:01:50.000Z | Python-3.5.10/Lib/test/test_email/test_headerregistry.py | AtriCZE23/Atri | 34ed092852b49daeafeb9c94adf3bfba42819b37 | [
"MIT",
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 5 | 2021-12-14T20:56:36.000Z | 2021-12-20T14:45:34.000Z | Python-3.5.10/Lib/test/test_email/test_headerregistry.py | AtriCZE23/POe-full | 89be2fda5747e44764a62ba5e358d8c9309fbf0a | [
"MIT",
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 1 | 2019-05-06T14:36:47.000Z | 2019-05-06T14:36:47.000Z | import datetime
import textwrap
import unittest
import types
from email import errors
from email import policy
from email.message import Message
from test.test_email import TestEmailBase, parameterize
from email import headerregistry
from email.headerregistry import Address, Group
DITTO = object()
class TestHeaderRegistry(TestEmailBase):
def test_arbitrary_name_unstructured(self):
factory = headerregistry.HeaderRegistry()
h = factory('foobar', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_name_case_ignored(self):
factory = headerregistry.HeaderRegistry()
# Whitebox check that test is valid
self.assertNotIn('Subject', factory.registry)
h = factory('Subject', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UniqueUnstructuredHeader)
class FooBase:
def __init__(self, *args, **kw):
pass
def test_override_default_base_class(self):
factory = headerregistry.HeaderRegistry(base_class=self.FooBase)
h = factory('foobar', 'test')
self.assertIsInstance(h, self.FooBase)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
class FooDefault:
parse = headerregistry.UnstructuredHeader.parse
def test_override_default_class(self):
factory = headerregistry.HeaderRegistry(default_class=self.FooDefault)
h = factory('foobar', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, self.FooDefault)
def test_override_default_class_only_overrides_default(self):
factory = headerregistry.HeaderRegistry(default_class=self.FooDefault)
h = factory('subject', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UniqueUnstructuredHeader)
def test_dont_use_default_map(self):
factory = headerregistry.HeaderRegistry(use_default_map=False)
h = factory('subject', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_map_to_type(self):
factory = headerregistry.HeaderRegistry()
h1 = factory('foobar', 'test')
factory.map_to_type('foobar', headerregistry.UniqueUnstructuredHeader)
h2 = factory('foobar', 'test')
self.assertIsInstance(h1, headerregistry.BaseHeader)
self.assertIsInstance(h1, headerregistry.UnstructuredHeader)
self.assertIsInstance(h2, headerregistry.BaseHeader)
self.assertIsInstance(h2, headerregistry.UniqueUnstructuredHeader)
class TestHeaderBase(TestEmailBase):
factory = headerregistry.HeaderRegistry()
def make_header(self, name, value):
return self.factory(name, value)
class TestBaseHeaderFeatures(TestHeaderBase):
def test_str(self):
h = self.make_header('subject', 'this is a test')
self.assertIsInstance(h, str)
self.assertEqual(h, 'this is a test')
self.assertEqual(str(h), 'this is a test')
def test_substr(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(h[5:7], 'is')
def test_has_name(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(h.name, 'subject')
def _test_attr_ro(self, attr):
h = self.make_header('subject', 'this is a test')
with self.assertRaises(AttributeError):
setattr(h, attr, 'foo')
def test_name_read_only(self):
self._test_attr_ro('name')
def test_defects_read_only(self):
self._test_attr_ro('defects')
def test_defects_is_tuple(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(len(h.defects), 0)
self.assertIsInstance(h.defects, tuple)
# Make sure it is still true when there are defects.
h = self.make_header('date', '')
self.assertEqual(len(h.defects), 1)
self.assertIsInstance(h.defects, tuple)
# XXX: FIXME
#def test_CR_in_value(self):
# # XXX: this also re-raises the issue of embedded headers,
# # need test and solution for that.
# value = '\r'.join(['this is', ' a test'])
# h = self.make_header('subject', value)
# self.assertEqual(h, value)
# self.assertDefectsEqual(h.defects, [errors.ObsoleteHeaderDefect])
@parameterize
class TestUnstructuredHeader(TestHeaderBase):
def string_as_value(self,
source,
decoded,
*args):
l = len(args)
defects = args[0] if l>0 else []
header = 'Subject:' + (' ' if source else '')
folded = header + (args[1] if l>1 else source) + '\n'
h = self.make_header('Subject', source)
self.assertEqual(h, decoded)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h.fold(policy=policy.default), folded)
string_params = {
'rfc2047_simple_quopri': (
'=?utf-8?q?this_is_a_test?=',
'this is a test',
[],
'this is a test'),
'rfc2047_gb2312_base64': (
'=?gb2312?b?1eLKx9bQzsSy4srUo6E=?=',
'\u8fd9\u662f\u4e2d\u6587\u6d4b\u8bd5\uff01',
[],
'=?utf-8?b?6L+Z5piv5Lit5paH5rWL6K+V77yB?='),
'rfc2047_simple_nonascii_quopri': (
'=?utf-8?q?=C3=89ric?=',
'Éric'),
'rfc2047_quopri_with_regular_text': (
'The =?utf-8?q?=C3=89ric=2C?= Himself',
'The Éric, Himself'),
}
@parameterize
class TestDateHeader(TestHeaderBase):
datestring = 'Sun, 23 Sep 2001 20:10:55 -0700'
utcoffset = datetime.timedelta(hours=-7)
tz = datetime.timezone(utcoffset)
dt = datetime.datetime(2001, 9, 23, 20, 10, 55, tzinfo=tz)
def test_parse_date(self):
h = self.make_header('date', self.datestring)
self.assertEqual(h, self.datestring)
self.assertEqual(h.datetime, self.dt)
self.assertEqual(h.datetime.utcoffset(), self.utcoffset)
self.assertEqual(h.defects, ())
def test_set_from_datetime(self):
h = self.make_header('date', self.dt)
self.assertEqual(h, self.datestring)
self.assertEqual(h.datetime, self.dt)
self.assertEqual(h.defects, ())
def test_date_header_properties(self):
h = self.make_header('date', self.datestring)
self.assertIsInstance(h, headerregistry.UniqueDateHeader)
self.assertEqual(h.max_count, 1)
self.assertEqual(h.defects, ())
def test_resent_date_header_properties(self):
h = self.make_header('resent-date', self.datestring)
self.assertIsInstance(h, headerregistry.DateHeader)
self.assertEqual(h.max_count, None)
self.assertEqual(h.defects, ())
def test_no_value_is_defect(self):
h = self.make_header('date', '')
self.assertEqual(len(h.defects), 1)
self.assertIsInstance(h.defects[0], errors.HeaderMissingRequiredValue)
def test_datetime_read_only(self):
h = self.make_header('date', self.datestring)
with self.assertRaises(AttributeError):
h.datetime = 'foo'
def test_set_date_header_from_datetime(self):
m = Message(policy=policy.default)
m['Date'] = self.dt
self.assertEqual(m['Date'], self.datestring)
self.assertEqual(m['Date'].datetime, self.dt)
@parameterize
class TestContentTypeHeader(TestHeaderBase):
def content_type_as_value(self,
source,
content_type,
maintype,
subtype,
*args):
l = len(args)
parmdict = args[0] if l>0 else {}
defects = args[1] if l>1 else []
decoded = args[2] if l>2 and args[2] is not DITTO else source
header = 'Content-Type:' + ' ' if source else ''
folded = args[3] if l>3 else header + source + '\n'
h = self.make_header('Content-Type', source)
self.assertEqual(h.content_type, content_type)
self.assertEqual(h.maintype, maintype)
self.assertEqual(h.subtype, subtype)
self.assertEqual(h.params, parmdict)
with self.assertRaises(TypeError):
h.params['abc'] = 'xyz' # params is read-only.
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h, decoded)
self.assertEqual(h.fold(policy=policy.default), folded)
content_type_params = {
# Examples from RFC 2045.
'RFC_2045_1': (
'text/plain; charset=us-ascii (Plain text)',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'},
[],
'text/plain; charset="us-ascii"'),
'RFC_2045_2': (
'text/plain; charset=us-ascii',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'},
[],
'text/plain; charset="us-ascii"'),
'RFC_2045_3': (
'text/plain; charset="us-ascii"',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'}),
# RFC 2045 5.2 says syntactically invalid values are to be treated as
# text/plain.
'no_subtype_in_content_type': (
'text/',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
'no_slash_in_content_type': (
'foo',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
'junk_text_in_content_type': (
'<crazy "stuff">',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
'too_many_slashes_in_content_type': (
'image/jpeg/foo',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
# But unknown names are OK. We could make non-IANA names a defect, but
# by not doing so we make ourselves future proof. The fact that they
# are unknown will be detectable by the fact that they don't appear in
# the mime_registry...and the application is free to extend that list
# to handle them even if the core library doesn't.
'unknown_content_type': (
'bad/names',
'bad/names',
'bad',
'names'),
# The content type is case insensitive, and CFWS is ignored.
'mixed_case_content_type': (
'ImAge/JPeg',
'image/jpeg',
'image',
'jpeg'),
'spaces_in_content_type': (
' text / plain ',
'text/plain',
'text',
'plain'),
'cfws_in_content_type': (
'(foo) text (bar)/(baz)plain(stuff)',
'text/plain',
'text',
'plain'),
# test some parameters (more tests could be added for parameters
# associated with other content types, but since parameter parsing is
# generic they would be redundant for the current implementation).
'charset_param': (
'text/plain; charset="utf-8"',
'text/plain',
'text',
'plain',
{'charset': 'utf-8'}),
'capitalized_charset': (
'text/plain; charset="US-ASCII"',
'text/plain',
'text',
'plain',
{'charset': 'US-ASCII'}),
'unknown_charset': (
'text/plain; charset="fOo"',
'text/plain',
'text',
'plain',
{'charset': 'fOo'}),
'capitalized_charset_param_name_and_comment': (
'text/plain; (interjection) Charset="utf-8"',
'text/plain',
'text',
'plain',
{'charset': 'utf-8'},
[],
# Should the parameter name be lowercased here?
'text/plain; Charset="utf-8"'),
# Since this is pretty much the ur-mimeheader, we'll put all the tests
# that exercise the parameter parsing and formatting here.
#
# XXX: question: is minimal quoting preferred?
'unquoted_param_value': (
'text/plain; title=foo',
'text/plain',
'text',
'plain',
{'title': 'foo'},
[],
'text/plain; title="foo"'),
'param_value_with_tspecials': (
'text/plain; title="(bar)foo blue"',
'text/plain',
'text',
'plain',
{'title': '(bar)foo blue'}),
'param_with_extra_quoted_whitespace': (
'text/plain; title=" a loong way \t home "',
'text/plain',
'text',
'plain',
{'title': ' a loong way \t home '}),
'bad_params': (
'blarg; baz; boo',
'text/plain',
'text',
'plain',
{'baz': '', 'boo': ''},
[errors.InvalidHeaderDefect]*3),
'spaces_around_param_equals': (
'Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"',
'multipart/mixed',
'multipart',
'mixed',
{'boundary': 'CPIMSSMTPC06p5f3tG'},
[],
'Multipart/mixed; boundary="CPIMSSMTPC06p5f3tG"'),
'spaces_around_semis': (
('image/jpeg; name="wibble.JPG" ; x-mac-type="4A504547" ; '
'x-mac-creator="474B4F4E"'),
'image/jpeg',
'image',
'jpeg',
{'name': 'wibble.JPG',
'x-mac-type': '4A504547',
'x-mac-creator': '474B4F4E'},
[],
('image/jpeg; name="wibble.JPG"; x-mac-type="4A504547"; '
'x-mac-creator="474B4F4E"'),
# XXX: it could be that we will eventually prefer to fold starting
# from the decoded value, in which case these spaces and similar
# spaces in other tests will be wrong.
('Content-Type: image/jpeg; name="wibble.JPG" ; '
'x-mac-type="4A504547" ;\n'
' x-mac-creator="474B4F4E"\n'),
),
'semis_inside_quotes': (
'image/jpeg; name="Jim&&Jill"',
'image/jpeg',
'image',
'jpeg',
{'name': 'Jim&&Jill'}),
'single_quotes_inside_quotes': (
'image/jpeg; name="Jim \'Bob\' Jill"',
'image/jpeg',
'image',
'jpeg',
{'name': "Jim 'Bob' Jill"}),
'double_quotes_inside_quotes': (
r'image/jpeg; name="Jim \"Bob\" Jill"',
'image/jpeg',
'image',
'jpeg',
{'name': 'Jim "Bob" Jill'},
[],
r'image/jpeg; name="Jim \"Bob\" Jill"'),
# XXX: This test works except for the refolding of the header. I'll
# deal with that bug when I deal with the other folding bugs.
#'non_ascii_in_params': (
# ('foo\xa7/bar; b\xa7r=two; '
# 'baz=thr\xa7e'.encode('latin-1').decode('us-ascii',
# 'surrogateescape')),
# 'foo\uFFFD/bar',
# 'foo\uFFFD',
# 'bar',
# {'b\uFFFDr': 'two', 'baz': 'thr\uFFFDe'},
# [errors.UndecodableBytesDefect]*3,
# 'foo�/bar; b�r="two"; baz="thr�e"',
# ),
# RFC 2231 parameter tests.
'rfc2231_segmented_normal_values': (
'image/jpeg; name*0="abc"; name*1=".html"',
'image/jpeg',
'image',
'jpeg',
{'name': "abc.html"},
[],
'image/jpeg; name="abc.html"'),
'quotes_inside_rfc2231_value': (
r'image/jpeg; bar*0="baz\"foobar"; bar*1="\"baz"',
'image/jpeg',
'image',
'jpeg',
{'bar': 'baz"foobar"baz'},
[],
r'image/jpeg; bar="baz\"foobar\"baz"'),
# XXX: This test works except for the refolding of the header. I'll
# deal with that bug when I deal with the other folding bugs.
#'non_ascii_rfc2231_value': (
# ('text/plain; charset=us-ascii; '
# "title*=us-ascii'en'This%20is%20"
# 'not%20f\xa7n').encode('latin-1').decode('us-ascii',
# 'surrogateescape'),
# 'text/plain',
# 'text',
# 'plain',
# {'charset': 'us-ascii', 'title': 'This is not f\uFFFDn'},
# [errors.UndecodableBytesDefect],
# 'text/plain; charset="us-ascii"; title="This is not f�n"'),
'rfc2231_encoded_charset': (
'text/plain; charset*=ansi-x3.4-1968\'\'us-ascii',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'},
[],
'text/plain; charset="us-ascii"'),
# This follows the RFC: no double quotes around encoded values.
'rfc2231_encoded_no_double_quotes': (
("text/plain;"
"\tname*0*=''This%20is%20;"
"\tname*1*=%2A%2A%2Afun%2A%2A%2A%20;"
'\tname*2="is it not.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is ***fun*** is it not.pdf'},
[],
'text/plain; name="This is ***fun*** is it not.pdf"',
('Content-Type: text/plain;\tname*0*=\'\'This%20is%20;\n'
'\tname*1*=%2A%2A%2Afun%2A%2A%2A%20;\tname*2="is it not.pdf"\n'),
),
# Make sure we also handle it if there are spurious double quotes.
'rfc2231_encoded_with_double_quotes': (
("text/plain;"
'\tname*0*="us-ascii\'\'This%20is%20even%20more%20";'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it not.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more ***fun*** is it not.pdf'},
[errors.InvalidHeaderDefect]*2,
'text/plain; name="This is even more ***fun*** is it not.pdf"',
('Content-Type: text/plain;\t'
'name*0*="us-ascii\'\'This%20is%20even%20more%20";\n'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it not.pdf"\n'),
),
'rfc2231_single_quote_inside_double_quotes': (
('text/plain; charset=us-ascii;'
'\ttitle*0*="us-ascii\'en\'This%20is%20really%20";'
'\ttitle*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\ttitle*2="isn\'t it!"'),
'text/plain',
'text',
'plain',
{'charset': 'us-ascii', 'title': "This is really ***fun*** isn't it!"},
[errors.InvalidHeaderDefect]*2,
('text/plain; charset="us-ascii"; '
'title="This is really ***fun*** isn\'t it!"'),
('Content-Type: text/plain; charset=us-ascii;\n'
'\ttitle*0*="us-ascii\'en\'This%20is%20really%20";\n'
'\ttitle*1*="%2A%2A%2Afun%2A%2A%2A%20";\ttitle*2="isn\'t it!"\n'),
),
'rfc2231_single_quote_in_value_with_charset_and_lang': (
('application/x-foo;'
"\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\""),
'application/x-foo',
'application',
'x-foo',
{'name': "Frank's Document"},
[errors.InvalidHeaderDefect]*2,
'application/x-foo; name="Frank\'s Document"',
('Content-Type: application/x-foo;\t'
'name*0*="us-ascii\'en-us\'Frank\'s";\n'
' name*1*=" Document"\n'),
),
'rfc2231_single_quote_in_non_encoded_value': (
('application/x-foo;'
"\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\""),
'application/x-foo',
'application',
'x-foo',
{'name': "us-ascii'en-us'Frank's Document"},
[],
'application/x-foo; name="us-ascii\'en-us\'Frank\'s Document"',
('Content-Type: application/x-foo;\t'
'name*0="us-ascii\'en-us\'Frank\'s";\n'
' name*1=" Document"\n'),
),
'rfc2231_no_language_or_charset': (
'text/plain; NAME*0*=english_is_the_default.html',
'text/plain',
'text',
'plain',
{'name': 'english_is_the_default.html'},
[errors.InvalidHeaderDefect],
'text/plain; NAME="english_is_the_default.html"'),
'rfc2231_encoded_no_charset': (
("text/plain;"
'\tname*0*="\'\'This%20is%20even%20more%20";'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more ***fun*** is it.pdf'},
[errors.InvalidHeaderDefect]*2,
'text/plain; name="This is even more ***fun*** is it.pdf"',
('Content-Type: text/plain;\t'
'name*0*="\'\'This%20is%20even%20more%20";\n'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it.pdf"\n'),
),
# XXX: see below...the first name line here should be *0 not *0*.
'rfc2231_partly_encoded': (
("text/plain;"
'\tname*0*="\'\'This%20is%20even%20more%20";'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more ***fun*** is it.pdf'},
[errors.InvalidHeaderDefect]*2,
'text/plain; name="This is even more ***fun*** is it.pdf"',
('Content-Type: text/plain;\t'
'name*0*="\'\'This%20is%20even%20more%20";\n'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it.pdf"\n'),
),
'rfc2231_partly_encoded_2': (
("text/plain;"
'\tname*0*="\'\'This%20is%20even%20more%20";'
'\tname*1="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more %2A%2A%2Afun%2A%2A%2A%20is it.pdf'},
[errors.InvalidHeaderDefect],
'text/plain; name="This is even more %2A%2A%2Afun%2A%2A%2A%20is it.pdf"',
('Content-Type: text/plain;\t'
'name*0*="\'\'This%20is%20even%20more%20";\n'
'\tname*1="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it.pdf"\n'),
),
'rfc2231_unknown_charset_treated_as_ascii': (
"text/plain; name*0*=bogus'xx'ascii_is_the_default",
'text/plain',
'text',
'plain',
{'name': 'ascii_is_the_default'},
[],
'text/plain; name="ascii_is_the_default"'),
'rfc2231_bad_character_in_charset_parameter_value': (
"text/plain; charset*=ascii''utf-8%F1%F2%F3",
'text/plain',
'text',
'plain',
{'charset': 'utf-8\uFFFD\uFFFD\uFFFD'},
[errors.UndecodableBytesDefect],
'text/plain; charset="utf-8\uFFFD\uFFFD\uFFFD"'),
'rfc2231_utf_8_in_supposedly_ascii_charset_parameter_value': (
"text/plain; charset*=ascii''utf-8%E2%80%9D",
'text/plain',
'text',
'plain',
{'charset': 'utf-8”'},
[errors.UndecodableBytesDefect],
'text/plain; charset="utf-8”"',
),
# XXX: if the above were *re*folded, it would get tagged as utf-8
# instead of ascii in the param, since it now contains non-ASCII.
'rfc2231_encoded_then_unencoded_segments': (
('application/x-foo;'
'\tname*0*="us-ascii\'en-us\'My";'
'\tname*1=" Document";'
'\tname*2=" For You"'),
'application/x-foo',
'application',
'x-foo',
{'name': 'My Document For You'},
[errors.InvalidHeaderDefect],
'application/x-foo; name="My Document For You"',
('Content-Type: application/x-foo;\t'
'name*0*="us-ascii\'en-us\'My";\n'
'\tname*1=" Document";\tname*2=" For You"\n'),
),
# My reading of the RFC is that this is an invalid header. The RFC
# says that if charset and language information is given, the first
# segment *must* be encoded.
'rfc2231_unencoded_then_encoded_segments': (
('application/x-foo;'
'\tname*0=us-ascii\'en-us\'My;'
'\tname*1*=" Document";'
'\tname*2*=" For You"'),
'application/x-foo',
'application',
'x-foo',
{'name': 'My Document For You'},
[errors.InvalidHeaderDefect]*3,
'application/x-foo; name="My Document For You"',
("Content-Type: application/x-foo;\tname*0=us-ascii'en-us'My;\t"
# XXX: the newline is in the wrong place, come back and fix
# this when the rest of tests pass.
'name*1*=" Document"\n;'
'\tname*2*=" For You"\n'),
),
# XXX: I would say this one should default to ascii/en for the
# "encoded" segment, since the first segment is not encoded and is
# in double quotes, making the value a valid non-encoded string. The
# old parser decodes this just like the previous case, which may be the
# better Postel rule, but could equally result in borking headers that
# intentionally have quoted quotes in them. We could get this 98%
# right if we treat it as a quoted string *unless* it matches the
# charset'lang'value pattern exactly *and* there is at least one
# encoded segment. Implementing that algorithm will require some
# refactoring, so I haven't done it (yet).
'rfc2231_qouted_unencoded_then_encoded_segments': (
('application/x-foo;'
'\tname*0="us-ascii\'en-us\'My";'
'\tname*1*=" Document";'
'\tname*2*=" For You"'),
'application/x-foo',
'application',
'x-foo',
{'name': "us-ascii'en-us'My Document For You"},
[errors.InvalidHeaderDefect]*2,
'application/x-foo; name="us-ascii\'en-us\'My Document For You"',
('Content-Type: application/x-foo;\t'
'name*0="us-ascii\'en-us\'My";\n'
'\tname*1*=" Document";\tname*2*=" For You"\n'),
),
}
@parameterize
class TestContentTransferEncoding(TestHeaderBase):
def cte_as_value(self,
source,
cte,
*args):
l = len(args)
defects = args[0] if l>0 else []
decoded = args[1] if l>1 and args[1] is not DITTO else source
header = 'Content-Transfer-Encoding:' + ' ' if source else ''
folded = args[2] if l>2 else header + source + '\n'
h = self.make_header('Content-Transfer-Encoding', source)
self.assertEqual(h.cte, cte)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h, decoded)
self.assertEqual(h.fold(policy=policy.default), folded)
cte_params = {
'RFC_2183_1': (
'base64',
'base64',),
'no_value': (
'',
'7bit',
[errors.HeaderMissingRequiredValue],
'',
'Content-Transfer-Encoding:\n',
),
'junk_after_cte': (
'7bit and a bunch more',
'7bit',
[errors.InvalidHeaderDefect]),
}
@parameterize
class TestContentDisposition(TestHeaderBase):
def content_disp_as_value(self,
source,
content_disposition,
*args):
l = len(args)
parmdict = args[0] if l>0 else {}
defects = args[1] if l>1 else []
decoded = args[2] if l>2 and args[2] is not DITTO else source
header = 'Content-Disposition:' + ' ' if source else ''
folded = args[3] if l>3 else header + source + '\n'
h = self.make_header('Content-Disposition', source)
self.assertEqual(h.content_disposition, content_disposition)
self.assertEqual(h.params, parmdict)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h, decoded)
self.assertEqual(h.fold(policy=policy.default), folded)
content_disp_params = {
# Examples from RFC 2183.
'RFC_2183_1': (
'inline',
'inline',),
'RFC_2183_2': (
('attachment; filename=genome.jpeg;'
' modification-date="Wed, 12 Feb 1997 16:29:51 -0500";'),
'attachment',
{'filename': 'genome.jpeg',
'modification-date': 'Wed, 12 Feb 1997 16:29:51 -0500'},
[],
('attachment; filename="genome.jpeg"; '
'modification-date="Wed, 12 Feb 1997 16:29:51 -0500"'),
('Content-Disposition: attachment; filename=genome.jpeg;\n'
' modification-date="Wed, 12 Feb 1997 16:29:51 -0500";\n'),
),
'no_value': (
'',
None,
{},
[errors.HeaderMissingRequiredValue],
'',
'Content-Disposition:\n'),
'invalid_value': (
'ab./k',
'ab.',
{},
[errors.InvalidHeaderDefect]),
'invalid_value_with_params': (
'ab./k; filename="foo"',
'ab.',
{'filename': 'foo'},
[errors.InvalidHeaderDefect]),
}
@parameterize
class TestMIMEVersionHeader(TestHeaderBase):
def version_string_as_MIME_Version(self,
source,
decoded,
version,
major,
minor,
defects):
h = self.make_header('MIME-Version', source)
self.assertEqual(h, decoded)
self.assertEqual(h.version, version)
self.assertEqual(h.major, major)
self.assertEqual(h.minor, minor)
self.assertDefectsEqual(h.defects, defects)
if source:
source = ' ' + source
self.assertEqual(h.fold(policy=policy.default),
'MIME-Version:' + source + '\n')
version_string_params = {
# Examples from the RFC.
'RFC_2045_1': (
'1.0',
'1.0',
'1.0',
1,
0,
[]),
'RFC_2045_2': (
'1.0 (produced by MetaSend Vx.x)',
'1.0 (produced by MetaSend Vx.x)',
'1.0',
1,
0,
[]),
'RFC_2045_3': (
'(produced by MetaSend Vx.x) 1.0',
'(produced by MetaSend Vx.x) 1.0',
'1.0',
1,
0,
[]),
'RFC_2045_4': (
'1.(produced by MetaSend Vx.x)0',
'1.(produced by MetaSend Vx.x)0',
'1.0',
1,
0,
[]),
# Other valid values.
'1_1': (
'1.1',
'1.1',
'1.1',
1,
1,
[]),
'2_1': (
'2.1',
'2.1',
'2.1',
2,
1,
[]),
'whitespace': (
'1 .0',
'1 .0',
'1.0',
1,
0,
[]),
'leading_trailing_whitespace_ignored': (
' 1.0 ',
' 1.0 ',
'1.0',
1,
0,
[]),
# Recoverable invalid values. We can recover here only because we
# already have a valid value by the time we encounter the garbage.
# Anywhere else, and we don't know where the garbage ends.
'non_comment_garbage_after': (
'1.0 <abc>',
'1.0 <abc>',
'1.0',
1,
0,
[errors.InvalidHeaderDefect]),
# Unrecoverable invalid values. We *could* apply more heuristics to
# get something out of the first two, but doing so is not worth the
# effort.
'non_comment_garbage_before': (
'<abc> 1.0',
'<abc> 1.0',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'non_comment_garbage_inside': (
'1.<abc>0',
'1.<abc>0',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'two_periods': (
'1..0',
'1..0',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'2_x': (
'2.x',
'2.x',
None, # This could be 2, but it seems safer to make it None.
None,
None,
[errors.InvalidHeaderDefect]),
'foo': (
'foo',
'foo',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'missing': (
'',
'',
None,
None,
None,
[errors.HeaderMissingRequiredValue]),
}
@parameterize
class TestAddressHeader(TestHeaderBase):
example_params = {
'empty':
('<>',
[errors.InvalidHeaderDefect],
'<>',
'',
'<>',
'',
'',
None),
'address_only':
('zippy@pinhead.com',
[],
'zippy@pinhead.com',
'',
'zippy@pinhead.com',
'zippy',
'pinhead.com',
None),
'name_and_address':
('Zaphrod Beblebrux <zippy@pinhead.com>',
[],
'Zaphrod Beblebrux <zippy@pinhead.com>',
'Zaphrod Beblebrux',
'zippy@pinhead.com',
'zippy',
'pinhead.com',
None),
'quoted_local_part':
('Zaphrod Beblebrux <"foo bar"@pinhead.com>',
[],
'Zaphrod Beblebrux <"foo bar"@pinhead.com>',
'Zaphrod Beblebrux',
'"foo bar"@pinhead.com',
'foo bar',
'pinhead.com',
None),
'quoted_parens_in_name':
(r'"A \(Special\) Person" <person@dom.ain>',
[],
'"A (Special) Person" <person@dom.ain>',
'A (Special) Person',
'person@dom.ain',
'person',
'dom.ain',
None),
'quoted_backslashes_in_name':
(r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>',
[],
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>',
r'Arthur \Backslash\ Foobar',
'person@dom.ain',
'person',
'dom.ain',
None),
'name_with_dot':
('John X. Doe <jxd@example.com>',
[errors.ObsoleteHeaderDefect],
'"John X. Doe" <jxd@example.com>',
'John X. Doe',
'jxd@example.com',
'jxd',
'example.com',
None),
'quoted_strings_in_local_part':
('""example" example"@example.com',
[errors.InvalidHeaderDefect]*3,
'"example example"@example.com',
'',
'"example example"@example.com',
'example example',
'example.com',
None),
'escaped_quoted_strings_in_local_part':
(r'"\"example\" example"@example.com',
[],
r'"\"example\" example"@example.com',
'',
r'"\"example\" example"@example.com',
r'"example" example',
'example.com',
None),
'escaped_escapes_in_local_part':
(r'"\\"example\\" example"@example.com',
[errors.InvalidHeaderDefect]*5,
r'"\\example\\\\ example"@example.com',
'',
r'"\\example\\\\ example"@example.com',
r'\example\\ example',
'example.com',
None),
'spaces_in_unquoted_local_part_collapsed':
('merwok wok @example.com',
[errors.InvalidHeaderDefect]*2,
'"merwok wok"@example.com',
'',
'"merwok wok"@example.com',
'merwok wok',
'example.com',
None),
'spaces_around_dots_in_local_part_removed':
('merwok. wok . wok@example.com',
[errors.ObsoleteHeaderDefect],
'merwok.wok.wok@example.com',
'',
'merwok.wok.wok@example.com',
'merwok.wok.wok',
'example.com',
None),
'rfc2047_atom_is_decoded':
('=?utf-8?q?=C3=89ric?= <foo@example.com>',
[],
'Éric <foo@example.com>',
'Éric',
'foo@example.com',
'foo',
'example.com',
None),
'rfc2047_atom_in_phrase_is_decoded':
('The =?utf-8?q?=C3=89ric=2C?= Himself <foo@example.com>',
[],
'"The Éric, Himself" <foo@example.com>',
'The Éric, Himself',
'foo@example.com',
'foo',
'example.com',
None),
'rfc2047_atom_in_quoted_string_is_decoded':
('"=?utf-8?q?=C3=89ric?=" <foo@example.com>',
[errors.InvalidHeaderDefect],
'Éric <foo@example.com>',
'Éric',
'foo@example.com',
'foo',
'example.com',
None),
}
# XXX: Need many more examples, and in particular some with names in
# trailing comments, which aren't currently handled. comments in
# general are not handled yet.
def example_as_address(self, source, defects, decoded, display_name,
addr_spec, username, domain, comment):
h = self.make_header('sender', source)
self.assertEqual(h, decoded)
self.assertDefectsEqual(h.defects, defects)
a = h.address
self.assertEqual(str(a), decoded)
self.assertEqual(len(h.groups), 1)
self.assertEqual([a], list(h.groups[0].addresses))
self.assertEqual([a], list(h.addresses))
self.assertEqual(a.display_name, display_name)
self.assertEqual(a.addr_spec, addr_spec)
self.assertEqual(a.username, username)
self.assertEqual(a.domain, domain)
# XXX: we have no comment support yet.
#self.assertEqual(a.comment, comment)
def example_as_group(self, source, defects, decoded, display_name,
addr_spec, username, domain, comment):
source = 'foo: {};'.format(source)
gdecoded = 'foo: {};'.format(decoded) if decoded else 'foo:;'
h = self.make_header('to', source)
self.assertEqual(h, gdecoded)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h.groups[0].addresses, h.addresses)
self.assertEqual(len(h.groups), 1)
self.assertEqual(len(h.addresses), 1)
a = h.addresses[0]
self.assertEqual(str(a), decoded)
self.assertEqual(a.display_name, display_name)
self.assertEqual(a.addr_spec, addr_spec)
self.assertEqual(a.username, username)
self.assertEqual(a.domain, domain)
def test_simple_address_list(self):
value = ('Fred <dinsdale@python.org>, foo@example.com, '
'"Harry W. Hastings" <hasty@example.com>')
h = self.make_header('to', value)
self.assertEqual(h, value)
self.assertEqual(len(h.groups), 3)
self.assertEqual(len(h.addresses), 3)
for i in range(3):
self.assertEqual(h.groups[i].addresses[0], h.addresses[i])
self.assertEqual(str(h.addresses[0]), 'Fred <dinsdale@python.org>')
self.assertEqual(str(h.addresses[1]), 'foo@example.com')
self.assertEqual(str(h.addresses[2]),
'"Harry W. Hastings" <hasty@example.com>')
self.assertEqual(h.addresses[2].display_name,
'Harry W. Hastings')
def test_complex_address_list(self):
examples = list(self.example_params.values())
source = ('dummy list:;, another: (empty);,' +
', '.join([x[0] for x in examples[:4]]) + ', ' +
r'"A \"list\"": ' +
', '.join([x[0] for x in examples[4:6]]) + ';,' +
', '.join([x[0] for x in examples[6:]])
)
# XXX: the fact that (empty) disappears here is a potential API design
# bug. We don't currently have a way to preserve comments.
expected = ('dummy list:;, another:;, ' +
', '.join([x[2] for x in examples[:4]]) + ', ' +
r'"A \"list\"": ' +
', '.join([x[2] for x in examples[4:6]]) + ';, ' +
', '.join([x[2] for x in examples[6:]])
)
h = self.make_header('to', source)
self.assertEqual(h.split(','), expected.split(','))
self.assertEqual(h, expected)
self.assertEqual(len(h.groups), 7 + len(examples) - 6)
self.assertEqual(h.groups[0].display_name, 'dummy list')
self.assertEqual(h.groups[1].display_name, 'another')
self.assertEqual(h.groups[6].display_name, 'A "list"')
self.assertEqual(len(h.addresses), len(examples))
for i in range(4):
self.assertIsNone(h.groups[i+2].display_name)
self.assertEqual(str(h.groups[i+2].addresses[0]), examples[i][2])
for i in range(7, 7 + len(examples) - 6):
self.assertIsNone(h.groups[i].display_name)
self.assertEqual(str(h.groups[i].addresses[0]), examples[i-1][2])
for i in range(len(examples)):
self.assertEqual(str(h.addresses[i]), examples[i][2])
self.assertEqual(h.addresses[i].addr_spec, examples[i][4])
def test_address_read_only(self):
h = self.make_header('sender', 'abc@xyz.com')
with self.assertRaises(AttributeError):
h.address = 'foo'
def test_addresses_read_only(self):
h = self.make_header('sender', 'abc@xyz.com')
with self.assertRaises(AttributeError):
h.addresses = 'foo'
def test_groups_read_only(self):
h = self.make_header('sender', 'abc@xyz.com')
with self.assertRaises(AttributeError):
h.groups = 'foo'
def test_addresses_types(self):
source = 'me <who@example.com>'
h = self.make_header('to', source)
self.assertIsInstance(h.addresses, tuple)
self.assertIsInstance(h.addresses[0], Address)
def test_groups_types(self):
source = 'me <who@example.com>'
h = self.make_header('to', source)
self.assertIsInstance(h.groups, tuple)
self.assertIsInstance(h.groups[0], Group)
def test_set_from_Address(self):
h = self.make_header('to', Address('me', 'foo', 'example.com'))
self.assertEqual(h, 'me <foo@example.com>')
def test_set_from_Address_list(self):
h = self.make_header('to', [Address('me', 'foo', 'example.com'),
Address('you', 'bar', 'example.com')])
self.assertEqual(h, 'me <foo@example.com>, you <bar@example.com>')
def test_set_from_Address_and_Group_list(self):
h = self.make_header('to', [Address('me', 'foo', 'example.com'),
Group('bing', [Address('fiz', 'z', 'b.com'),
Address('zif', 'f', 'c.com')]),
Address('you', 'bar', 'example.com')])
self.assertEqual(h, 'me <foo@example.com>, bing: fiz <z@b.com>, '
'zif <f@c.com>;, you <bar@example.com>')
self.assertEqual(h.fold(policy=policy.default.clone(max_line_length=40)),
'to: me <foo@example.com>,\n'
' bing: fiz <z@b.com>, zif <f@c.com>;,\n'
' you <bar@example.com>\n')
def test_set_from_Group_list(self):
h = self.make_header('to', [Group('bing', [Address('fiz', 'z', 'b.com'),
Address('zif', 'f', 'c.com')])])
self.assertEqual(h, 'bing: fiz <z@b.com>, zif <f@c.com>;')
class TestAddressAndGroup(TestEmailBase):
def _test_attr_ro(self, obj, attr):
with self.assertRaises(AttributeError):
setattr(obj, attr, 'foo')
def test_address_display_name_ro(self):
self._test_attr_ro(Address('foo', 'bar', 'baz'), 'display_name')
def test_address_username_ro(self):
self._test_attr_ro(Address('foo', 'bar', 'baz'), 'username')
def test_address_domain_ro(self):
self._test_attr_ro(Address('foo', 'bar', 'baz'), 'domain')
def test_group_display_name_ro(self):
self._test_attr_ro(Group('foo'), 'display_name')
def test_group_addresses_ro(self):
self._test_attr_ro(Group('foo'), 'addresses')
def test_address_from_username_domain(self):
a = Address('foo', 'bar', 'baz')
self.assertEqual(a.display_name, 'foo')
self.assertEqual(a.username, 'bar')
self.assertEqual(a.domain, 'baz')
self.assertEqual(a.addr_spec, 'bar@baz')
self.assertEqual(str(a), 'foo <bar@baz>')
def test_address_from_addr_spec(self):
a = Address('foo', addr_spec='bar@baz')
self.assertEqual(a.display_name, 'foo')
self.assertEqual(a.username, 'bar')
self.assertEqual(a.domain, 'baz')
self.assertEqual(a.addr_spec, 'bar@baz')
self.assertEqual(str(a), 'foo <bar@baz>')
def test_address_with_no_display_name(self):
a = Address(addr_spec='bar@baz')
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, 'bar')
self.assertEqual(a.domain, 'baz')
self.assertEqual(a.addr_spec, 'bar@baz')
self.assertEqual(str(a), 'bar@baz')
def test_null_address(self):
a = Address()
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, '')
self.assertEqual(a.domain, '')
self.assertEqual(a.addr_spec, '<>')
self.assertEqual(str(a), '<>')
def test_domain_only(self):
# This isn't really a valid address.
a = Address(domain='buzz')
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, '')
self.assertEqual(a.domain, 'buzz')
self.assertEqual(a.addr_spec, '@buzz')
self.assertEqual(str(a), '@buzz')
def test_username_only(self):
# This isn't really a valid address.
a = Address(username='buzz')
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, 'buzz')
self.assertEqual(a.domain, '')
self.assertEqual(a.addr_spec, 'buzz')
self.assertEqual(str(a), 'buzz')
def test_display_name_only(self):
a = Address('buzz')
self.assertEqual(a.display_name, 'buzz')
self.assertEqual(a.username, '')
self.assertEqual(a.domain, '')
self.assertEqual(a.addr_spec, '<>')
self.assertEqual(str(a), 'buzz <>')
def test_quoting(self):
# Ideally we'd check every special individually, but I'm not up for
# writing that many tests.
a = Address('Sara J.', 'bad name', 'example.com')
self.assertEqual(a.display_name, 'Sara J.')
self.assertEqual(a.username, 'bad name')
self.assertEqual(a.domain, 'example.com')
self.assertEqual(a.addr_spec, '"bad name"@example.com')
self.assertEqual(str(a), '"Sara J." <"bad name"@example.com>')
def test_il8n(self):
a = Address('Éric', 'wok', 'exàmple.com')
self.assertEqual(a.display_name, 'Éric')
self.assertEqual(a.username, 'wok')
self.assertEqual(a.domain, 'exàmple.com')
self.assertEqual(a.addr_spec, 'wok@exàmple.com')
self.assertEqual(str(a), 'Éric <wok@exàmple.com>')
# XXX: there is an API design issue that needs to be solved here.
#def test_non_ascii_username_raises(self):
# with self.assertRaises(ValueError):
# Address('foo', 'wők', 'example.com')
def test_crlf_in_constructor_args_raises(self):
cases = (
dict(display_name='foo\r'),
dict(display_name='foo\n'),
dict(display_name='foo\r\n'),
dict(domain='example.com\r'),
dict(domain='example.com\n'),
dict(domain='example.com\r\n'),
dict(username='wok\r'),
dict(username='wok\n'),
dict(username='wok\r\n'),
dict(addr_spec='wok@example.com\r'),
dict(addr_spec='wok@example.com\n'),
dict(addr_spec='wok@example.com\r\n')
)
for kwargs in cases:
with self.subTest(kwargs=kwargs), self.assertRaisesRegex(ValueError, "invalid arguments"):
Address(**kwargs)
def test_non_ascii_username_in_addr_spec_raises(self):
with self.assertRaises(ValueError):
Address('foo', addr_spec='wők@example.com')
def test_address_addr_spec_and_username_raises(self):
with self.assertRaises(TypeError):
Address('foo', username='bing', addr_spec='bar@baz')
def test_address_addr_spec_and_domain_raises(self):
with self.assertRaises(TypeError):
Address('foo', domain='bing', addr_spec='bar@baz')
def test_address_addr_spec_and_username_and_domain_raises(self):
with self.assertRaises(TypeError):
Address('foo', username='bong', domain='bing', addr_spec='bar@baz')
def test_space_in_addr_spec_username_raises(self):
with self.assertRaises(ValueError):
Address('foo', addr_spec="bad name@example.com")
def test_bad_addr_sepc_raises(self):
with self.assertRaises(ValueError):
Address('foo', addr_spec="name@ex[]ample.com")
def test_empty_group(self):
g = Group('foo')
self.assertEqual(g.display_name, 'foo')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'foo:;')
def test_empty_group_list(self):
g = Group('foo', addresses=[])
self.assertEqual(g.display_name, 'foo')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'foo:;')
def test_null_group(self):
g = Group()
self.assertIsNone(g.display_name)
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'None:;')
def test_group_with_addresses(self):
addrs = [Address('b', 'b', 'c'), Address('a', 'b','c')]
g = Group('foo', addrs)
self.assertEqual(g.display_name, 'foo')
self.assertEqual(g.addresses, tuple(addrs))
self.assertEqual(str(g), 'foo: b <b@c>, a <b@c>;')
def test_group_with_addresses_no_display_name(self):
addrs = [Address('b', 'b', 'c'), Address('a', 'b','c')]
g = Group(addresses=addrs)
self.assertIsNone(g.display_name)
self.assertEqual(g.addresses, tuple(addrs))
self.assertEqual(str(g), 'None: b <b@c>, a <b@c>;')
def test_group_with_one_address_no_display_name(self):
addrs = [Address('b', 'b', 'c')]
g = Group(addresses=addrs)
self.assertIsNone(g.display_name)
self.assertEqual(g.addresses, tuple(addrs))
self.assertEqual(str(g), 'b <b@c>')
def test_display_name_quoting(self):
g = Group('foo.bar')
self.assertEqual(g.display_name, 'foo.bar')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), '"foo.bar":;')
def test_display_name_blanks_not_quoted(self):
g = Group('foo bar')
self.assertEqual(g.display_name, 'foo bar')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'foo bar:;')
def test_set_message_header_from_address(self):
a = Address('foo', 'bar', 'example.com')
m = Message(policy=policy.default)
m['To'] = a
self.assertEqual(m['to'], 'foo <bar@example.com>')
self.assertEqual(m['to'].addresses, (a,))
def test_set_message_header_from_group(self):
g = Group('foo bar')
m = Message(policy=policy.default)
m['To'] = g
self.assertEqual(m['to'], 'foo bar:;')
self.assertEqual(m['to'].addresses, g.addresses)
class TestFolding(TestHeaderBase):
def test_short_unstructured(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(h.fold(policy=policy.default),
'subject: this is a test\n')
def test_long_unstructured(self):
h = self.make_header('Subject', 'This is a long header '
'line that will need to be folded into two lines '
'and will demonstrate basic folding')
self.assertEqual(h.fold(policy=policy.default),
'Subject: This is a long header line that will '
'need to be folded into two lines\n'
' and will demonstrate basic folding\n')
def test_unstructured_short_max_line_length(self):
h = self.make_header('Subject', 'this is a short header '
'that will be folded anyway')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=20)),
textwrap.dedent("""\
Subject: this is a
short header that
will be folded
anyway
"""))
def test_fold_unstructured_single_word(self):
h = self.make_header('Subject', 'test')
self.assertEqual(h.fold(policy=policy.default), 'Subject: test\n')
def test_fold_unstructured_short(self):
h = self.make_header('Subject', 'test test test')
self.assertEqual(h.fold(policy=policy.default),
'Subject: test test test\n')
def test_fold_unstructured_with_overlong_word(self):
h = self.make_header('Subject', 'thisisaverylonglineconsistingofa'
'singlewordthatwontfit')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=20)),
'Subject: thisisaverylonglineconsistingofasinglewordthatwontfit\n')
def test_fold_unstructured_with_two_overlong_words(self):
h = self.make_header('Subject', 'thisisaverylonglineconsistingofa'
'singlewordthatwontfit plusanotherverylongwordthatwontfit')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=20)),
'Subject: thisisaverylonglineconsistingofasinglewordthatwontfit\n'
' plusanotherverylongwordthatwontfit\n')
def test_fold_unstructured_with_slightly_long_word(self):
h = self.make_header('Subject', 'thislongwordislessthanmaxlinelen')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=35)),
'Subject:\n thislongwordislessthanmaxlinelen\n')
def test_fold_unstructured_with_commas(self):
# The old wrapper would fold this at the commas.
h = self.make_header('Subject', "This header is intended to "
"demonstrate, in a fairly succinct way, that we now do "
"not give a , special treatment in unstructured headers.")
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=60)),
textwrap.dedent("""\
Subject: This header is intended to demonstrate, in a fairly
succinct way, that we now do not give a , special treatment
in unstructured headers.
"""))
def test_fold_address_list(self):
h = self.make_header('To', '"Theodore H. Perfect" <yes@man.com>, '
'"My address is very long because my name is long" <foo@bar.com>, '
'"Only A. Friend" <no@yes.com>')
self.assertEqual(h.fold(policy=policy.default), textwrap.dedent("""\
To: "Theodore H. Perfect" <yes@man.com>,
"My address is very long because my name is long" <foo@bar.com>,
"Only A. Friend" <no@yes.com>
"""))
def test_fold_date_header(self):
h = self.make_header('Date', 'Sat, 2 Feb 2002 17:00:06 -0800')
self.assertEqual(h.fold(policy=policy.default),
'Date: Sat, 02 Feb 2002 17:00:06 -0800\n')
if __name__ == '__main__':
unittest.main()
| 35.716141 | 102 | 0.53218 |
dc58f3dabae186dfd83fc40be1fe185f19e7aa2f | 661 | py | Python | setup.py | narbutas/pgbackup | 2bc65dc9c4cdba135e0ae68c71d034de50fddda8 | [
"Apache-2.0"
] | null | null | null | setup.py | narbutas/pgbackup | 2bc65dc9c4cdba135e0ae68c71d034de50fddda8 | [
"Apache-2.0"
] | null | null | null | setup.py | narbutas/pgbackup | 2bc65dc9c4cdba135e0ae68c71d034de50fddda8 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='pgbackup',
version='1.0.0',
author='Tadas Narbutas',
author_email='narbutas.tadas@gmail.com',
descrption='CLI tool for PostgreSQL backup',
summary='CLI tool for PostgreSQL backup',
license='Apache License 2.0',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/narbutas/pgbackup',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=['boto3'],
python_requires='>=3.6',
entry_points={
'console_Scripts': [
'pgbackup=pgbackup.cli.main'
]
}
) | 25.423077 | 47 | 0.727685 |
c53f09e699508f98c067f871551fa75bda29b7e3 | 1,160 | py | Python | examples/hlapi/v3arch/asyncore/sync/agent/ntforg/v2c-trap-with-notification-objects.py | RKinsey/pysnmp | 96b5cf31e2f5d19f34d0dd1075014c488f6a5789 | [
"BSD-2-Clause"
] | 492 | 2016-03-13T11:03:13.000Z | 2022-03-21T02:52:57.000Z | examples/hlapi/v3arch/asyncore/sync/agent/ntforg/v2c-trap-with-notification-objects.py | bartomo/pysnmp | becd15c79c9a6b5696928ecd50bf5cca8b1770a1 | [
"BSD-2-Clause"
] | 372 | 2016-03-29T22:42:05.000Z | 2022-03-26T10:28:25.000Z | examples/hlapi/v3arch/asyncore/sync/agent/ntforg/v2c-trap-with-notification-objects.py | bartomo/pysnmp | becd15c79c9a6b5696928ecd50bf5cca8b1770a1 | [
"BSD-2-Clause"
] | 197 | 2016-03-13T11:01:54.000Z | 2022-03-07T19:52:15.000Z | """
SNMPv2c TRAP via NOTIFICATION-TYPE
++++++++++++++++++++++++++++++++++
Initialize TRAP message contents from variables specified
in *NOTIFICATION-TYPE* SMI macro.
* SNMPv2c
* with community name 'public'
* over IPv4/UDP
* send TRAP notification
* with TRAP ID 'linkUp' specified as a MIB symbol
* include values for managed objects implicitly added to notification
(via NOTIFICATION-TYPE->OBJECTS)
Functionally similar to:
| $ snmptrap -v2c -c public demo.snmplabs.com 0 1.3.6.1.6.3.1.1.5.1 1.3.6.1.2.1.2.2.1.1.123 i 123 1.3.6.1.2.1.2.2.1.7.123 i 1 1.3.6.1.2.1.2.2.1.8.123 i 1
"""#
from pysnmp.hlapi import *
iterator = sendNotification(
SnmpEngine(),
CommunityData('public'),
UdpTransportTarget(('demo.snmplabs.com', 162)),
ContextData(),
'trap',
NotificationType(
ObjectIdentity('IF-MIB', 'linkUp'),
instanceIndex=(123,),
objects={
('IF-MIB', 'ifIndex'): 123,
('IF-MIB', 'ifAdminStatus'): 'up',
('IF-MIB', 'ifOperStatus'): 'up'
}
)
)
errorIndication, errorStatus, errorIndex, varBinds = next(iterator)
if errorIndication:
print(errorIndication)
| 26.363636 | 153 | 0.636207 |
52ef6aef55266aa4b938d5c4b2af97e00c2a9e7f | 344 | py | Python | Structured Programming/Lesson 03 - Basic Operators & Input/ex_5.py | Raffaelus/Computer-Science-Graduation---UNIP | 05fd10d05efb63d9b6024f73947846f9ec805c25 | [
"MIT"
] | null | null | null | Structured Programming/Lesson 03 - Basic Operators & Input/ex_5.py | Raffaelus/Computer-Science-Graduation---UNIP | 05fd10d05efb63d9b6024f73947846f9ec805c25 | [
"MIT"
] | null | null | null | Structured Programming/Lesson 03 - Basic Operators & Input/ex_5.py | Raffaelus/Computer-Science-Graduation---UNIP | 05fd10d05efb63d9b6024f73947846f9ec805c25 | [
"MIT"
] | null | null | null | nome = input("Digite o nome do aluno: ")
notas = list(range(3))
notas[0] = float(input("Digite a nota 1: "))
notas[1] = float(input("Digite a nota 2: "))
notas[2] = float(input("Digite a nota 3: "))
soma_notas = 0
for nota in notas:
soma_notas += nota
else:
media = soma_notas / len(notas)
print(nome, "sua média foi de:", media) | 24.571429 | 44 | 0.636628 |
a8fb8ceea9aa8192bd5b9922d4f87941ebbe7a71 | 1,819 | py | Python | convoy/api/endpoint.py | frain-dev/convoy-python | 7607a6b65615cc83c38bfb7dba4ad6ed564860bc | [
"MIT"
] | null | null | null | convoy/api/endpoint.py | frain-dev/convoy-python | 7607a6b65615cc83c38bfb7dba4ad6ed564860bc | [
"MIT"
] | null | null | null | convoy/api/endpoint.py | frain-dev/convoy-python | 7607a6b65615cc83c38bfb7dba4ad6ed564860bc | [
"MIT"
] | null | null | null | from convoy.client import Client
class Endpoint():
"""Initializes an Endpoint object to make calls to the /endpoints endpoint.
Parameters
----------
config : dict of config values
"""
def __init__(self, config):
self.client = Client(config)
def all(self, appId, query):
'''
Get all endpoints for an application.
'''
response = self.client.httpGet("/applications/%s/endpoints" % appId, query)
return response
def create(self, appId, query, data):
'''
Create a new endpoint.
Parameters
----------
data = {
"url": "",
"description": "",
"secret": "",
"events": [],
}
'''
response = self.client.httpPost("/applications/%s/endpoints" % appId, query, data)
return response
def find(self, appId, endpointId, query):
'''
Find a particular application.
'''
response = self.client.httpGet("/applications/%s/endpoints/%s" % (appId, endpointId), query)
return response
def update(self, appId, endpointId, query, data):
'''
Update an application.
Parameters
----------
data = {
"url": "",
"description": "",
"secret": "",
"events": [],
}
'''
response = self.client.httpPut("/applications/%s/endpoints/%s" % (appId, endpointId), query, data)
return response
def delete(self, appId, endpointId, query, data):
'''
Delete an application.
'''
response = self.client.httpDelete("/applications/%s/endpoints/%s" % (appId, endpointId), query, data)
return response
| 28.421875 | 109 | 0.509621 |
a0f327d757c776d182b964fa9710677cc26cdcb4 | 677 | py | Python | stockTickerAnalysis/scrapers/scrape.py | raghuveermadala/stockTickerAnalysis | a74bb7c854d91c2f61fa1f9f3b0c586304005b04 | [
"MIT"
] | null | null | null | stockTickerAnalysis/scrapers/scrape.py | raghuveermadala/stockTickerAnalysis | a74bb7c854d91c2f61fa1f9f3b0c586304005b04 | [
"MIT"
] | null | null | null | stockTickerAnalysis/scrapers/scrape.py | raghuveermadala/stockTickerAnalysis | a74bb7c854d91c2f61fa1f9f3b0c586304005b04 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from scrapers import cache
from requests import get
from lxml import html
def scrape_page(url):
"""
Scrapes a web page given a url and returns an HTML tree representation.
This will check for a cachced version of the page before scraping and will attempt to cache
after scraping
:param url: A string of the url of the requested web page
:return: html tree structure based on the html markup of the scraped website
"""
cached_page = cache.get(url)
if cached_page:
return html.fromstring(cached_page)
else:
page = get(url)
cache.set(url, page.text)
return html.fromstring(page.text)
| 27.08 | 95 | 0.695716 |
a14a367f70a25251241e9b66122179bf71bfd5e1 | 2,184 | py | Python | nomadgram/images/models.py | TaeHyoungKwon/nomadgram | 338a2aaa0d8fd957e41f10ae114611e438f6f408 | [
"MIT"
] | null | null | null | nomadgram/images/models.py | TaeHyoungKwon/nomadgram | 338a2aaa0d8fd957e41f10ae114611e438f6f408 | [
"MIT"
] | null | null | null | nomadgram/images/models.py | TaeHyoungKwon/nomadgram | 338a2aaa0d8fd957e41f10ae114611e438f6f408 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.functional import cached_property
from taggit.managers import TaggableManager
from nomadgram.users import models as user_models
class TimeStampedModel(models.Model):
'''
모든 모델에 공통적으로 사용되는
created_at, updated_at 부분을 따로 추상화 시켜서 추상 클래스화 한다.
'''
created_at = models.DateTimeField(("Created Date"), auto_now_add=True)
updated_at = models.DateTimeField(("Updated Date"), auto_now=True)
# 추상 클래스로 지정
class Meta:
abstract = True
class Image(TimeStampedModel):
'''
Image Model
'''
file = models.ImageField(("이미지"))
location = models.CharField(("장소"), max_length=50)
caption = models.TextField(("내용"))
# 각 작성자는 여러개의 이미지를 작성할 수 있다.
creator = models.ForeignKey(
user_models.User,
verbose_name=("image_작성자"),
on_delete=models.CASCADE, null=True)
tags = TaggableManager()
def __str__(self):
return str("{} - {}".format(self.location, self.caption))
class Meta:
ordering=['-created_at']
@cached_property
def like_count(self):
return self.like_set.all().count()
@cached_property
def comment_count(self):
return self.comment_set.all().count()
class Comment(TimeStampedModel):
'''
Comment Model
'''
message = models.TextField(("메세지"))
# 작성자는 여러개의 comment를 쓸 수 있다.
creator = models.ForeignKey(user_models.User, verbose_name=("comment_작성자"), on_delete=models.CASCADE, null=True)
# 각 이미지는 여러개의 comment 를 가진다.
image = models.ForeignKey(Image, verbose_name=("comment_이미지"), on_delete=models.CASCADE, null=True)
def __str__(self):
return str('{} : {}'.format("Comment", self.message))
class Like(TimeStampedModel):
'''
Like Model
'''
# 각각 사람들은 여러개의 좋아요를 가진다.
creator = models.ForeignKey(user_models.User, verbose_name=("like_작성자"), on_delete=models.CASCADE, null=True)
# 각 이미지는 여러개의 Like를 가진다.
image = models.ForeignKey(Image, verbose_name=("like_이미지"), on_delete=models.CASCADE, null=True)
def __str__(self):
return str('User: {} - Image Caption: {}'.format(self.creator.username, self.image.caption))
| 29.12 | 116 | 0.666667 |
f2e9743ae7580c70f0ada53651fe8183b863e978 | 5,886 | py | Python | nltk/align/gdfa.py | dmcc/nltk | 33c193d2de3876ca89fb08140557e16f01c79c6f | [
"Apache-2.0"
] | 1 | 2015-01-25T19:20:11.000Z | 2015-01-25T19:20:11.000Z | nltk/align/gdfa.py | dmcc/nltk | 33c193d2de3876ca89fb08140557e16f01c79c6f | [
"Apache-2.0"
] | null | null | null | nltk/align/gdfa.py | dmcc/nltk | 33c193d2de3876ca89fb08140557e16f01c79c6f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Natural Language Toolkit: GDFA word alignment symmetrization
#
# Copyright (C) 2001-2015 NLTK Project
# Authors: Liling Tan
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import codecs
from collections import defaultdict
def grow_diag_final_and(srclen, trglen, e2f, f2e):
"""
This module symmetrisatizes the source-to-target and target-to-source
word alignment output and produces, aka. GDFA algorithm (Koehn, 2005).
Step 1: Find the intersection of the bidirectional alignment.
Step 2: Search for additional neighbor alignment points to be added, given
these criteria: (i) neighbor alignments points are not in the
intersection and (ii) neighbor alignments are in the union.
Step 3: Add all other alignment points thats not in the intersection, not in
the neighboring alignments that met the criteria but in the original
foward/backward alignment outputs.
>>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 '
... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18')
>>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 '
... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 '
... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18')
>>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 "
... "は L と 共 に 不連続 に 増加 する こと が "
... "期待 さ れる こと を 示し た 。")
>>> trgtext = ("Therefore , we expect that the luminosity function "
... "of such halo white dwarfs increases discontinuously "
... "with the luminosity .")
>>> srclen = len(srctext.split())
>>> trglen = len(trgtext.split())
>>>
>>> gdfa = grow_diag_final_and(srclen, trglen, forw, back)
>>> gdfa == set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12),
... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20,
... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5),
... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22,
... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5,
... 12), (11, 6), (12, 8)])
True
References:
Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot.
2005. Edinburgh System Description for the 2005 IWSLT Speech
Translation Evaluation. In MT Eval Workshop.
:type srclen: int
:param srclen: the number of tokens in the source language
:type trglen: int
:param trglen: the number of tokens in the target language
:type e2f: str
:param e2f: the forward word alignment outputs from source-to-target
language (in pharaoh output format)
:type f2e: str
:param f2e: the backward word alignment outputs from target-to-source
language (in pharaoh output format)
:rtype: set(tuple(int))
:return: the symmetrized alignment points from the GDFA algorithm
"""
# Converts pharaoh text format into list of tuples.
e2f = [tuple(map(int,a.split('-'))) for a in e2f.split()]
f2e = [tuple(map(int,a.split('-'))) for a in f2e.split()]
neighbors = [(-1,0),(0,-1),(1,0),(0,1),(-1,-1),(-1,1),(1,-1),(1,1)]
alignment = set(e2f).intersection(set(f2e)) # Find the intersection.
union = set(e2f).union(set(f2e))
# *aligned* is used to check if neighbors are aligned in grow_diag()
aligned = defaultdict(set)
for i,j in alignment:
aligned['e'].add(i)
aligned['j'].add(j)
def grow_diag():
"""
Search for the neighbor points and them to the intersected alignment
points if criteria are met.
"""
prev_len = len(alignment) - 1
# iterate until no new points added
while prev_len < len(alignment):
# for english word e = 0 ... en
for e in range(srclen):
# for foreign word f = 0 ... fn
for f in range(trglen):
# if ( e aligned with f)
if (e,f) in alignment:
# for each neighboring point (e-new, f-new)
for neighbor in neighbors:
neighbor = tuple(i+j for i,j in zip((e,f),neighbor))
e_new, f_new = neighbor
# if ( ( e-new not aligned and f-new not aligned)
# and (e-new, f-new in union(e2f, f2e) )
if (e_new not in aligned and f_new not in aligned)\
and neighbor in union:
alignment.add(neighbor)
aligned['e'].add(e_new); aligned['f'].add(f_new)
prev_len+=1
def final_and(a):
"""
Adds remaining points that are not in the intersection, not in the
neighboring alignments but in the original *e2f* and *f2e* alignments
"""
# for english word e = 0 ... en
for e_new in range(srclen):
# for foreign word f = 0 ... fn
for f_new in range(trglen):
# if ( ( e-new not aligned and f-new not aligned)
# and (e-new, f-new in union(e2f, f2e) )
if (e_new not in aligned
and f_new not in aligned
and (e_new, f_new) in a):
alignment.add((e_new, f_new))
aligned['e'].add(e_new); aligned['f'].add(f_new)
grow_diag()
final_and(e2f)
final_and(f2e)
return alignment
# run doctests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43.279412 | 80 | 0.526673 |
6d9b3fd63f0b7a0144844f80438b717e03ff45f7 | 930 | py | Python | Tr4PrFnPredLib/postprocess/DeepGoPostProcess.py | Tr4PrFnPred/TransformerProteinFunctionPredLib | 115dd4105631f96e05409fd9e7f6186ecb5df898 | [
"MIT"
] | 1 | 2021-04-13T21:03:10.000Z | 2021-04-13T21:03:10.000Z | Tr4PrFnPredLib/postprocess/DeepGoPostProcess.py | Tr4PrFnPred/TransformerProteinFunctionPredLib | 115dd4105631f96e05409fd9e7f6186ecb5df898 | [
"MIT"
] | 1 | 2021-04-13T22:27:45.000Z | 2021-04-13T22:27:45.000Z | Tr4PrFnPredLib/postprocess/DeepGoPostProcess.py | Tr4PrFnPred/Tr4PrFnPredLib | 115dd4105631f96e05409fd9e7f6186ecb5df898 | [
"MIT"
] | null | null | null | from .PostProcess import PostProcess
class DeepGoPostProcess(PostProcess):
def __init__(self, terms):
self.terms = terms
def postprocess(self, predictions, **kwargs):
ids = kwargs["ids"]
prot_ids = kwargs["prot_ids"]
deep_preds = {}
for i, j in enumerate(ids):
prot_id = prot_ids[j]
if prot_id not in deep_preds:
deep_preds[prot_id] = {}
for l in range(len(self.terms)):
if predictions[i, l] >= 0.01: # Filter out very low scores
if self.terms.iloc[l][0] not in deep_preds[prot_id]:
deep_preds[prot_id][self.terms.iloc[l][0]] = predictions[i, l]
else:
deep_preds[prot_id][self.terms.iloc[l][0]] = max(
deep_preds[prot_id][self.terms.iloc[l][0]], predictions[i, l])
return deep_preds
| 34.444444 | 90 | 0.53871 |
8550d7566cefc016697e7b66a23973817ea23be8 | 7,109 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/files/patch.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/modules/files/patch.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/modules/files/patch.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
# Copyright: (c) 2015, Jakub Jirutka <jakub@jirutka.cz>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: patch
author:
- Jakub Jirutka (@jirutka)
- Luis Alberto Perez Lazaro (@luisperlaz)
version_added: '1.9'
description:
- Apply patch files using the GNU patch tool.
short_description: Apply patch files using the GNU patch tool
options:
basedir:
description:
- Path of a base directory in which the patch file will be applied.
- May be omitted when C(dest) option is specified, otherwise required.
type: path
dest:
description:
- Path of the file on the remote machine to be patched.
- The names of the files to be patched are usually taken from the patch
file, but if there's just one file to be patched it can specified with
this option.
type: path
aliases: [ originalfile ]
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
C(remote_src) is 'no', the patch source file is looked up from the
module's I(files) directory.
type: path
required: true
aliases: [ patchfile ]
state:
description:
- Whether the patch should be applied or reverted.
type: str
choices: [ absent, present ]
default: present
version_added: "2.6"
remote_src:
description:
- If C(no), it will search for src at originating/master machine, if C(yes) it will
go to the remote/target machine for the C(src).
type: bool
default: no
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
that will be stripped from each file name found in the patch file.
- For more information see the strip parameter of the GNU patch tool.
type: int
default: 0
backup:
version_added: "2.0"
description:
- Passes C(--backup --version-control=numbered) to patch, producing numbered backup copies.
type: bool
default: no
binary:
version_added: "2.0"
description:
- Setting to C(yes) will disable patch's heuristic for transforming CRLF
line endings into LF.
- Line endings of src and dest must match.
- If set to C(no), C(patch) will replace CRLF in C(src) files on POSIX.
type: bool
default: no
notes:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''
EXAMPLES = r'''
- name: Apply patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
- name: Apply patch to multiple files under basedir
patch:
src: /tmp/customize.patch
basedir: /var/www
strip: 1
- name: Revert patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
state: absent
'''
import os
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils._text import to_native
class PatchError(Exception):
pass
def add_dry_run_option(opts):
# Older versions of FreeBSD, OpenBSD and NetBSD support the --check option only.
if get_platform().lower() in ['openbsd', 'netbsd', 'freebsd']:
opts.append('--check')
else:
opts.append('--dry-run')
def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, state='present'):
opts = ['--quiet', '--forward',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
add_dry_run_option(opts)
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if state == 'present':
opts.append('--reverse')
(rc, _, _) = patch_func(opts)
return rc == 0
def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False, state='present'):
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if dry_run:
add_dry_run_option(opts)
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if backup:
opts.append('--backup --version-control=numbered')
if state == 'absent':
opts.append('--reverse')
(rc, out, err) = patch_func(opts)
if rc != 0:
msg = err or out
raise PatchError(msg)
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path', required=True, aliases=['patchfile']),
dest=dict(type='path', aliases=['originalfile']),
basedir=dict(type='path'),
strip=dict(type='int', default=0),
remote_src=dict(type='bool', default=False),
# NB: for 'backup' parameter, semantics is slightly different from standard
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
backup=dict(type='bool', default=False),
binary=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
required_one_of=[['dest', 'basedir']],
supports_check_mode=True,
)
# Create type object as namespace for module params
p = type('Params', (), module.params)
if not os.access(p.src, os.R_OK):
module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
if p.dest and not os.access(p.dest, os.W_OK):
module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
if p.basedir and not os.path.exists(p.basedir):
module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
if not p.basedir:
p.basedir = os.path.dirname(p.dest)
patch_bin = module.get_bin_path('patch')
if patch_bin is None:
module.fail_json(msg="patch command not found")
def patch_func(opts):
return module.run_command('%s %s' % (patch_bin, ' '.join(opts)))
# patch need an absolute file name
p.src = os.path.abspath(p.src)
changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, state=p.state):
try:
apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup, state=p.state)
changed = True
except PatchError as e:
module.fail_json(msg=to_native(e), exception=format_exc())
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| 32.610092 | 134 | 0.636658 |
7658ee5d7640d5f4924e1adc305bd8792e904cbc | 15,464 | py | Python | cunt/cmds/init_funcs.py | CallMeBrado/cunt-blockchain | 9b140b7e5541f3baffabe02a55b75d9aeb889999 | [
"Apache-2.0"
] | 7 | 2021-08-09T19:01:51.000Z | 2021-12-09T04:32:09.000Z | cunt/cmds/init_funcs.py | CallMeBrado/cunt-blockchain | 9b140b7e5541f3baffabe02a55b75d9aeb889999 | [
"Apache-2.0"
] | 22 | 2021-08-17T04:12:11.000Z | 2022-03-29T04:10:38.000Z | cunt/cmds/init_funcs.py | CallMeBrado/cunt-blockchain | 9b140b7e5541f3baffabe02a55b75d9aeb889999 | [
"Apache-2.0"
] | 4 | 2021-09-05T12:04:51.000Z | 2022-03-15T08:44:32.000Z | import os
import shutil
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import yaml
from cunt import __version__
from cunt.consensus.coinbase import create_puzzlehash_for_pk
from cunt.ssl.create_ssl import (
ensure_ssl_dirs,
generate_ca_signed_cert,
get_cunt_ca_crt_key,
make_ca_cert,
write_ssl_cert_and_key,
)
from cunt.util.bech32m import encode_puzzle_hash
from cunt.util.config import (
create_default_cunt_config,
initial_config_file,
load_config,
save_config,
unflatten_properties,
)
from cunt.util.ints import uint32
from cunt.util.keychain import Keychain
from cunt.util.path import mkdir
from cunt.util.ssl_check import (
DEFAULT_PERMISSIONS_CERT_FILE,
DEFAULT_PERMISSIONS_KEY_FILE,
RESTRICT_MASK_CERT_FILE,
RESTRICT_MASK_KEY_FILE,
check_and_fix_permissions_for_ssl_file,
fix_ssl,
)
from cunt.wallet.derive_keys import master_sk_to_pool_sk, master_sk_to_wallet_sk
from cunt.cmds.configure import configure
private_node_names = {"full_node", "wallet", "farmer", "harvester", "timelord", "daemon"}
public_node_names = {"full_node", "wallet", "farmer", "introducer", "timelord"}
def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]):
for k in do_not_migrate_keys:
if k in updated and do_not_migrate_keys[k] == "":
updated.pop(k)
for k, v in default.items():
ignore = False
if k in do_not_migrate_keys:
do_not_data = do_not_migrate_keys[k]
if isinstance(do_not_data, dict):
ignore = False
else:
ignore = True
if isinstance(v, dict) and k in updated and ignore is False:
# If there is an intermediate key with empty string value, do not migrate all descendants
if do_not_migrate_keys.get(k, None) == "":
do_not_migrate_keys[k] = v
dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {}))
elif k not in updated or ignore is True:
updated[k] = v
def check_keys(new_root: Path, keychain: Optional[Keychain] = None) -> None:
if keychain is None:
keychain = Keychain()
all_sks = keychain.get_all_private_keys()
if len(all_sks) == 0:
print("No keys are present in the keychain. Generate them with 'cunt keys generate'")
return None
config: Dict = load_config(new_root, "config.yaml")
pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks]
all_targets = []
stop_searching_for_farmer = "vag_target_address" not in config["farmer"]
stop_searching_for_pool = "vag_target_address" not in config["pool"]
number_of_ph_to_search = 500
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
for i in range(number_of_ph_to_search):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
all_targets.append(
encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix)
)
if all_targets[-1] == config["farmer"].get("vag_target_address"):
stop_searching_for_farmer = True
if all_targets[-1] == config["pool"].get("vag_target_address"):
stop_searching_for_pool = True
# Set the destinations, if necessary
updated_target: bool = False
if "vag_target_address" not in config["farmer"]:
print(
f"Setting the vag destination for the farmer reward (1/8 plus fees, solo and pooling) to {all_targets[0]}"
)
config["farmer"]["vag_target_address"] = all_targets[0]
updated_target = True
elif config["farmer"]["vag_target_address"] not in all_targets:
print(
f"WARNING: using a farmer address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config['farmer']['vag_target_address']} with {all_targets[0]}"
)
if "pool" not in config:
config["pool"] = {}
if "vag_target_address" not in config["pool"]:
print(f"Setting the vag destination address for pool reward (7/8 for solo only) to {all_targets[0]}")
config["pool"]["vag_target_address"] = all_targets[0]
updated_target = True
elif config["pool"]["vag_target_address"] not in all_targets:
print(
f"WARNING: using a pool address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config['pool']['vag_target_address']} with {all_targets[0]}"
)
if updated_target:
print(
f"To change the VAG destination addresses, edit the `vag_target_address` entries in"
f" {(new_root / 'config' / 'config.yaml').absolute()}."
)
# Set the pool pks in the farmer
pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys)
if "pool_public_keys" in config["farmer"]:
for pk_hex in config["farmer"]["pool_public_keys"]:
# Add original ones in config
pool_pubkeys_hex.add(pk_hex)
config["farmer"]["pool_public_keys"] = pool_pubkeys_hex
save_config(new_root, "config.yaml", config)
def copy_files_rec(old_path: Path, new_path: Path):
if old_path.is_file():
print(f"{new_path}")
mkdir(new_path.parent)
shutil.copy(old_path, new_path)
elif old_path.is_dir():
for old_path_child in old_path.iterdir():
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
def migrate_from(
old_root: Path,
new_root: Path,
manifest: List[str],
do_not_migrate_settings: List[str],
):
"""
Copy all the files in "manifest" to the new config directory.
"""
if old_root == new_root:
print("same as new path, exiting")
return 1
if not old_root.is_dir():
print(f"{old_root} not found - this is ok if you did not install this version")
return 0
print(f"\n{old_root} found")
print(f"Copying files from {old_root} to {new_root}\n")
for f in manifest:
old_path = old_root / f
new_path = new_root / f
copy_files_rec(old_path, new_path)
# update config yaml with new keys
config: Dict = load_config(new_root, "config.yaml")
config_str: str = initial_config_file("config.yaml")
default_config: Dict = yaml.safe_load(config_str)
flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings})
dict_add_new_default(config, default_config, flattened_keys)
save_config(new_root, "config.yaml", config)
create_all_ssl(new_root)
return 1
def create_all_ssl(root_path: Path):
# remove old key and crt
config_dir = root_path / "config"
old_key_path = config_dir / "trusted.key"
old_crt_path = config_dir / "trusted.crt"
if old_key_path.exists():
print(f"Old key not needed anymore, deleting {old_key_path}")
os.remove(old_key_path)
if old_crt_path.exists():
print(f"Old crt not needed anymore, deleting {old_crt_path}")
os.remove(old_crt_path)
ssl_dir = config_dir / "ssl"
ca_dir = ssl_dir / "ca"
ensure_ssl_dirs([ssl_dir, ca_dir])
private_ca_key_path = ca_dir / "private_ca.key"
private_ca_crt_path = ca_dir / "private_ca.crt"
cunt_ca_crt, cunt_ca_key = get_cunt_ca_crt_key()
cunt_ca_crt_path = ca_dir / "cunt_ca.crt"
cunt_ca_key_path = ca_dir / "cunt_ca.key"
write_ssl_cert_and_key(cunt_ca_crt_path, cunt_ca_crt, cunt_ca_key_path, cunt_ca_key)
if not private_ca_key_path.exists() or not private_ca_crt_path.exists():
# Create private CA
print(f"Can't find private CA, creating a new one in {root_path} to generate TLS certificates")
make_ca_cert(private_ca_crt_path, private_ca_key_path)
# Create private certs for each node
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
else:
# This is entered when user copied over private CA
print(f"Found private CA in {root_path}, using it to generate TLS certificates")
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
cunt_ca_crt, cunt_ca_key = get_cunt_ca_crt_key()
generate_ssl_for_nodes(ssl_dir, cunt_ca_crt, cunt_ca_key, False, overwrite=False)
def generate_ssl_for_nodes(ssl_dir: Path, ca_crt: bytes, ca_key: bytes, private: bool, overwrite=True):
if private:
names = private_node_names
else:
names = public_node_names
for node_name in names:
node_dir = ssl_dir / node_name
ensure_ssl_dirs([node_dir])
if private:
prefix = "private"
else:
prefix = "public"
key_path = node_dir / f"{prefix}_{node_name}.key"
crt_path = node_dir / f"{prefix}_{node_name}.crt"
if key_path.exists() and crt_path.exists() and overwrite is False:
continue
generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path)
def copy_cert_files(cert_path: Path, new_path: Path):
for old_path_child in cert_path.glob("*.crt"):
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
check_and_fix_permissions_for_ssl_file(new_path_child, RESTRICT_MASK_CERT_FILE, DEFAULT_PERMISSIONS_CERT_FILE)
for old_path_child in cert_path.glob("*.key"):
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
check_and_fix_permissions_for_ssl_file(new_path_child, RESTRICT_MASK_KEY_FILE, DEFAULT_PERMISSIONS_KEY_FILE)
def init(create_certs: Optional[Path], root_path: Path, fix_ssl_permissions: bool = False, testnet: bool = False):
if create_certs is not None:
if root_path.exists():
if os.path.isdir(create_certs):
ca_dir: Path = root_path / "config/ssl/ca"
if ca_dir.exists():
print(f"Deleting your OLD CA in {ca_dir}")
shutil.rmtree(ca_dir)
print(f"Copying your CA from {create_certs} to {ca_dir}")
copy_cert_files(create_certs, ca_dir)
create_all_ssl(root_path)
else:
print(f"** Directory {create_certs} does not exist **")
else:
print(f"** {root_path} does not exist. Executing core init **")
# sanity check here to prevent infinite recursion
if (
cunt_init(root_path, fix_ssl_permissions=fix_ssl_permissions, testnet=testnet) == 0
and root_path.exists()
):
return init(create_certs, root_path, fix_ssl_permissions)
print(f"** {root_path} was not created. Exiting **")
return -1
else:
return cunt_init(root_path, fix_ssl_permissions=fix_ssl_permissions, testnet=testnet)
def cunt_version_number() -> Tuple[str, str, str, str]:
scm_full_version = __version__
left_full_version = scm_full_version.split("+")
version = left_full_version[0].split(".")
scm_major_version = version[0]
scm_minor_version = version[1]
if len(version) > 2:
smc_patch_version = version[2]
patch_release_number = smc_patch_version
else:
smc_patch_version = ""
major_release_number = scm_major_version
minor_release_number = scm_minor_version
dev_release_number = ""
# If this is a beta dev release - get which beta it is
if "0b" in scm_minor_version:
original_minor_ver_list = scm_minor_version.split("0b")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta
minor_release_number = scm_major_version
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
elif "0rc" in version[1]:
original_minor_ver_list = scm_minor_version.split("0rc")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate
minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
else:
major_release_number = scm_major_version
minor_release_number = scm_minor_version
patch_release_number = smc_patch_version
dev_release_number = ""
install_release_number = major_release_number + "." + minor_release_number
if len(patch_release_number) > 0:
install_release_number += "." + patch_release_number
if len(dev_release_number) > 0:
install_release_number += dev_release_number
return major_release_number, minor_release_number, patch_release_number, dev_release_number
def cunt_minor_release_number():
res = int(cunt_version_number()[2])
print(f"Install release number: {res}")
return res
def cunt_full_version_str() -> str:
major, minor, patch, dev = cunt_version_number()
return f"{major}.{minor}.{patch}{dev}"
def cunt_init(
root_path: Path, *, should_check_keys: bool = True, fix_ssl_permissions: bool = False, testnet: bool = False
):
"""
Standard first run initialization or migration steps. Handles config creation,
generation of SSL certs, and setting target addresses (via check_keys).
should_check_keys can be set to False to avoid blocking when accessing a passphrase
protected Keychain. When launching the daemon from the GUI, we want the GUI to
handle unlocking the keychain.
"""
if os.environ.get("CUNT_ROOT", None) is not None:
print(
f"warning, your CUNT_ROOT is set to {os.environ['CUNT_ROOT']}. "
f"Please unset the environment variable and run cunt init again\n"
f"or manually migrate config.yaml"
)
print(f"Cunt directory {root_path}")
if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists():
# This is reached if CUNT_ROOT is set, or if user has run cunt init twice
# before a new update.
if testnet:
configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="")
if fix_ssl_permissions:
fix_ssl(root_path)
if should_check_keys:
check_keys(root_path)
print(f"{root_path} already exists, no migration action taken")
return -1
create_default_cunt_config(root_path)
if testnet:
configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="")
create_all_ssl(root_path)
if fix_ssl_permissions:
fix_ssl(root_path)
if should_check_keys:
check_keys(root_path)
print("")
print("To see your keys, run 'cunt keys show --show-mnemonic-seed'")
return 0
| 39.85567 | 118 | 0.673176 |
cac6a5d2b1fa734811173ff229e9b4138720bd5d | 311 | py | Python | blog/migrations/0002_remove_post_excerpt.py | AmineAsli/my-personal-blog | 3ced4ec04c0f75d97db2a31d1291a98f6c852699 | [
"MIT"
] | null | null | null | blog/migrations/0002_remove_post_excerpt.py | AmineAsli/my-personal-blog | 3ced4ec04c0f75d97db2a31d1291a98f6c852699 | [
"MIT"
] | null | null | null | blog/migrations/0002_remove_post_excerpt.py | AmineAsli/my-personal-blog | 3ced4ec04c0f75d97db2a31d1291a98f6c852699 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-04-21 04:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='excerpt',
),
]
| 17.277778 | 45 | 0.572347 |
e27e83d9ddf52fa58c3b7c2f7df6db5d98c010fb | 8,310 | py | Python | tests/parsers/winreg_plugins/windows_version.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 1 | 2020-12-04T10:26:34.000Z | 2020-12-04T10:26:34.000Z | tests/parsers/winreg_plugins/windows_version.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | tests/parsers/winreg_plugins/windows_version.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the WinVer Windows Registry plugin."""
from __future__ import unicode_literals
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.lib import definitions
from plaso.parsers.winreg_plugins import windows_version
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
class WindowsRegistryInstallationEventDataTest(shared_test_lib.BaseTestCase):
"""Tests for the Windows installation event data attribute container."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = windows_version.WindowsRegistryInstallationEventData()
expected_attribute_names = [
'_event_data_stream_row_identifier', 'build_number', 'data_type',
'key_path', 'offset', 'owner', 'parser', 'product_name', 'query',
'service_pack', 'version']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class WindowsVersionPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Windows version Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'CurrentVersion', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
value_data = 'Service Pack 1'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'CSDVersion', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=1892)
registry_key.AddValue(registry_value)
value_data = '5.1'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'CurrentVersion', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=1121)
registry_key.AddValue(registry_value)
value_data = b'\x13\x1aAP'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'InstallDate', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_LITTLE_ENDIAN, offset=1001)
registry_key.AddValue(registry_value)
value_data = 'MyTestOS'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'ProductName', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=123)
registry_key.AddValue(registry_value)
value_data = 'A Concerned Citizen'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'RegisteredOwner', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=612)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = windows_version.WindowsVersionPlugin()
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion')
time_string = '2012-08-31 20:09:55.123521'
registry_key = self._CreateTestKey(key_path, time_string)
plugin = windows_version.WindowsVersionPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-08-31 20:09:55.123521')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_WRITTEN)
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:key_value')
expected_message = (
'[{0:s}] '
'CSDVersion: [REG_SZ] Service Pack 1 '
'CurrentVersion: [REG_SZ] 5.1 '
'ProductName: [REG_SZ] MyTestOS '
'RegisteredOwner: [REG_SZ] A Concerned Citizen').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[1]
self.CheckTimestamp(event.timestamp, '2012-08-31 20:09:55.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_INSTALLATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.data_type, 'windows:registry:installation')
self.assertEqual(event_data.key_path, key_path)
self.assertEqual(event_data.owner, 'A Concerned Citizen')
self.assertEqual(event_data.product_name, 'MyTestOS')
self.assertEqual(event_data.service_pack, 'Service Pack 1')
self.assertEqual(event_data.version, '5.1')
expected_message = (
'MyTestOS 5.1 Service Pack 1 '
'Owner: A Concerned Citizen '
'Origin: {0:s}').format(key_path)
expected_short_message = (
'MyTestOS 5.1 Service Pack 1 '
'Origin: HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Win...')
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testProcessFile(self):
"""Tests the Process function on a Windows Registry file."""
test_file_entry = self._GetTestFileEntry(['SOFTWARE-RunTests'])
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = windows_version.WindowsVersionPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-03-15 07:09:20.671875')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:key_value')
expected_message = (
'[{0:s}] '
'BuildGUID: [REG_SZ] f4bf21b9-55fe-4ee8-a84b-0e91cbd5fe5d '
'BuildLab: [REG_SZ] 7601.win7sp1_gdr.111118-2330 '
'BuildLabEx: [REG_SZ] 7601.17727.amd64fre.win7sp1_gdr.111118-2330 '
'CSDBuildNumber: [REG_SZ] 1130 '
'CSDVersion: [REG_SZ] Service Pack 1 '
'CurrentBuild: [REG_SZ] 7601 '
'CurrentBuildNumber: [REG_SZ] 7601 '
'CurrentType: [REG_SZ] Multiprocessor Free '
'CurrentVersion: [REG_SZ] 6.1 '
'DigitalProductId: [REG_BINARY] (164 bytes) '
'DigitalProductId4: [REG_BINARY] (1272 bytes) '
'EditionID: [REG_SZ] Ultimate '
'InstallationType: [REG_SZ] Client '
'PathName: [REG_SZ] C:\\Windows '
'ProductId: [REG_SZ] 00426-065-0381817-86216 '
'ProductName: [REG_SZ] Windows 7 Ultimate '
'RegisteredOrganization: [REG_SZ] '
'RegisteredOwner: [REG_SZ] Windows User '
'SoftwareType: [REG_SZ] System '
'SystemRoot: [REG_SZ] C:\\Windows').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| 37.264574 | 80 | 0.724669 |
7c154bd7941e6664ea91468d29e01f725ad32c14 | 2,914 | py | Python | app/auth/views.py | ifaraag/app | d952f0dc58fd703074c19ed3235c1520119baf5f | [
"MIT"
] | null | null | null | app/auth/views.py | ifaraag/app | d952f0dc58fd703074c19ed3235c1520119baf5f | [
"MIT"
] | null | null | null | app/auth/views.py | ifaraag/app | d952f0dc58fd703074c19ed3235c1520119baf5f | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, redirect, url_for, request, flash
from flask.ext.login import login_required, login_user, logout_user
from werkzeug import check_password_hash, generate_password_hash
from app import db, login_manager, pubnub, app, _callback
from .models import User
from .forms import LoginForm, SignupForm
mod_auth = Blueprint('auth', __name__)
@mod_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
error = None
print(request.method)
if request.method == 'POST':
user = db.users.find_one({'username': request.form['username']})
if not user:
error = 'User does not exist'
elif not check_password_hash(user['password'], request.form['password']):
error = 'Invalid credentials. Please try again.'
else:
user_obj = User(user['username'])
login_user(user_obj)
return redirect(url_for('devices.list_devices'))
return render_template('auth/login.html',
title='Log In to Hydrosmart',
form=form,
error=error)
@mod_auth.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm(request.form)
error = None
if request.method == 'POST':
existing_user = db.users.find_one({'username' :
request.form['username']})
if existing_user:
error = 'Username already exists'
else:
new_user = {'username' : request.form['username'],
'email' : request.form['email'],
'zip' : request.form['zip'],
'password' : generate_password_hash(request.form['password'])}
db.users.insert_one(new_user)
user = db.users.find_one({'username': request.form['username']})
pubnub.channel_group_add_channel(channel_group=app.config['PUBNUB_CHANNEL_GRP'], channel=user['username'])
pubnub.grant(channel=user['username'], auth_key=app.config['PUBNUB_AUTH_KEY'], read=True, write=True, manage=True, ttl=0)
return redirect(url_for('dashboard.dashboard'))
return render_template('auth/signup.html', form=form,
title='Sign Up for Hydrosmart', error=error)
# @mod_auth.route('/googlelogin', methods=['GET', 'POST'])
@mod_auth.route("/logout")
@login_required
def logout():
logout_user()
flash("Logged out.")
return redirect('/login')
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@login_manager.user_loader
def load_user(username):
u = db.users.find_one({'username': username})
if not u:
return None
return User(u['username'])
def callback(message, channel):
db.data.insert_one(message)
def error(message):
db.data.insert_one(message)
| 37.358974 | 133 | 0.630062 |
ed4a13b59ded1610fd43bb3fef06cbbb21bf228f | 618 | py | Python | project1/fabfile.py | g-grilli/django | e13bba288513ba57ba6f3fe4453058de8a05e609 | [
"MIT"
] | null | null | null | project1/fabfile.py | g-grilli/django | e13bba288513ba57ba6f3fe4453058de8a05e609 | [
"MIT"
] | null | null | null | project1/fabfile.py | g-grilli/django | e13bba288513ba57ba6f3fe4453058de8a05e609 | [
"MIT"
] | null | null | null | from fabric.api import run, env, sudo, cd, prefix
env.hosts = ['108.61.241.147']
env.user = 'ggril'
DIR = '/home/ggril/.ssh/django/project1'
VENV = 'source /home/ggril/.virtualenvs/django/bin/activate && source SECRETS.ENV'
def start ():
with cd(DIR):
with prefix(VENV):
run('pm2 start uwsgi -- --ini uwsgi.ini > start.log')
def stop ():
run('pm2 stop all > stop.log')
def deploy ():
with cd(DIR):
run('git pull')
with prefix(VENV):
run('pip install -r requirements.txt > install.log')
run('pm2 restart all > restart.log')
def hello ():
print("Hello") | 22.888889 | 82 | 0.61165 |
1198e10ff3d4853dd9f52d5bd4261cf385a0536a | 2,388 | py | Python | preprocessing/generate_diffs.py | iernest/dfdc_deepfake_challenge | dd36d791d4662400479ceb9aa595cc3a1fee2de7 | [
"MIT"
] | null | null | null | preprocessing/generate_diffs.py | iernest/dfdc_deepfake_challenge | dd36d791d4662400479ceb9aa595cc3a1fee2de7 | [
"MIT"
] | null | null | null | preprocessing/generate_diffs.py | iernest/dfdc_deepfake_challenge | dd36d791d4662400479ceb9aa595cc3a1fee2de7 | [
"MIT"
] | null | null | null | import argparse
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
# from skimage.measure import compare_ssim
from skimage.metrics import structural_similarity
from functools import partial
from multiprocessing.pool import Pool
from tqdm import tqdm
from preprocessing.utils import get_original_with_fakes
import cv2
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
import numpy as np
cache = {}
def save_diffs(pair, root_dir):
ori_id, fake_id = pair
ori_dir = os.path.join(root_dir, "crops", ori_id)
fake_dir = os.path.join(root_dir, "crops", fake_id)
diff_dir = os.path.join(root_dir, "diffs", fake_id)
os.makedirs(diff_dir, exist_ok=True)
for frame in range(320):
if frame % 10 != 0:
continue
for actor in range(2):
image_id = "{}_{}.png".format(frame, actor)
diff_image_id = "{}_{}_diff.png".format(frame, actor)
ori_path = os.path.join(ori_dir, image_id)
fake_path = os.path.join(fake_dir, image_id)
diff_path = os.path.join(diff_dir, diff_image_id)
if os.path.exists(ori_path) and os.path.exists(fake_path):
img1 = cv2.imread(ori_path, cv2.IMREAD_COLOR)
img2 = cv2.imread(fake_path, cv2.IMREAD_COLOR)
try:
d, a = structural_similarity(img1, img2, multichannel=True, full=True)
a = 1 - a
diff = (a * 255).astype(np.uint8)
diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
cv2.imwrite(diff_path, diff)
except:
pass
def parse_args():
parser = argparse.ArgumentParser(
description="Extract image diffs")
parser.add_argument("--root-dir", help="root directory", default="/mnt/sota/datasets/deepfake")
args = parser.parse_args()
return args
def main():
args = parse_args()
pairs = get_original_with_fakes(args.root_dir)
os.makedirs(os.path.join(args.root_dir, "diffs"), exist_ok=True)
with Pool(processes=os.cpu_count() - 2) as p:
with tqdm(total=len(pairs)) as pbar:
func = partial(save_diffs, root_dir=args.root_dir)
for v in p.imap_unordered(func, pairs):
pbar.update()
if __name__ == '__main__':
main()
| 32.27027 | 99 | 0.628978 |
c2b551ab63eb757ef7a5f55573dcd5d2347ab39a | 2,767 | py | Python | resume/resume/settings.py | Joyash23/resume-website | 6917c8e00b71d9ec886f72744a6eec10ba484811 | [
"MIT"
] | null | null | null | resume/resume/settings.py | Joyash23/resume-website | 6917c8e00b71d9ec886f72744a6eec10ba484811 | [
"MIT"
] | null | null | null | resume/resume/settings.py | Joyash23/resume-website | 6917c8e00b71d9ec886f72744a6eec10ba484811 | [
"MIT"
] | 7 | 2017-06-07T12:57:49.000Z | 2020-10-17T03:16:41.000Z | """
Django settings for resume project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_vlcumk0%la^)&8y_emux%x%=qhcv-v+lzap^&mlm&tl15xym6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cv',
'accounts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'resume.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'resume.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_URL='/login/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media") | 25.385321 | 71 | 0.698591 |
c8826c11ba894d29b5f5da0d2bf2ccd20989e161 | 384 | py | Python | schemas/response/transaction_response_schema.py | borko81/parking_system_with_flask | 0ff10422cd1892bcb8c4c6958a159b08c1da919b | [
"MIT"
] | 1 | 2022-01-14T15:31:11.000Z | 2022-01-14T15:31:11.000Z | schemas/response/transaction_response_schema.py | borko81/parking_system_with_flask | 0ff10422cd1892bcb8c4c6958a159b08c1da919b | [
"MIT"
] | 5 | 2021-12-03T13:27:44.000Z | 2021-12-05T11:46:08.000Z | schemas/response/transaction_response_schema.py | borko81/parking_system_with_flask | 0ff10422cd1892bcb8c4c6958a159b08c1da919b | [
"MIT"
] | null | null | null | from marshmallow import Schema, fields
from schemas.response.pay_type_response_schema import PayTypeResponseSchema
class TransactionsSchema(Schema):
id = fields.Integer()
pr_id = fields.Integer()
created_on = fields.DateTime()
transaction_id = fields.Integer()
pay_type = fields.Integer()
payment_name = fields.Nested(PayTypeResponseSchema(only=("name",)))
| 29.538462 | 75 | 0.755208 |
63edd1fcc3f51b63fe2ebf9cafefd915afc35d2d | 15,006 | py | Python | pysimgame/plotting/manager.py | ScienceGamez/pysimgame | 6c89280441358722efbc63b6d8aa914cbe21575e | [
"WTFPL"
] | null | null | null | pysimgame/plotting/manager.py | ScienceGamez/pysimgame | 6c89280441358722efbc63b6d8aa914cbe21575e | [
"WTFPL"
] | null | null | null | pysimgame/plotting/manager.py | ScienceGamez/pysimgame | 6c89280441358722efbc63b6d8aa914cbe21575e | [
"WTFPL"
] | null | null | null | """plots models for ills fate.
The Graph manager handles plots creations and transfer of data from
the model.
Different plots window can be created:
- Parameters evolution graph. Can plot the parameters of a model
though time.
- Regions evolution graph. Plot the same parameter across regions.
- Regions comparison heatmap.
"""
from __future__ import annotations
import logging
import pathlib
import threading
import time
from importlib.machinery import SourceFileLoader
from typing import TYPE_CHECKING, Dict, List, Tuple, Type
import numpy as np
import pandas
import pygame
import pygame_gui
import pysimgame
from matplotlib.lines import Line2D
from pygame_gui.elements.ui_button import UIButton
from pygame_gui.ui_manager import UIManager
from pygame_matplotlib import pygame_color_to_plt
from pygame_matplotlib.backend_pygame import FigureSurface
from pygame_matplotlib.gui_window import UIPlotWindow
from pysimgame.model import ModelManager
from pysimgame.plotting.base import AbstractPlotsManager
from pysimgame.utils.abstract_managers import GameComponentManager
from pysimgame.utils.strings import beautify_parameter_name
from ..utils.maths import normalize
if TYPE_CHECKING:
from ..game_manager import GameManager
from .plot import Plot
import matplotlib
import matplotlib.axes
import matplotlib.artist
from pysimgame.types import AttributeName, RegionName
import matplotlib.pyplot as plt
COLORS_LIST = ("red", "blue", "green", "orange")
REGION_COLORS = {""}
Region: Type[str] = str
_PLOT_MANAGER: PlotsManager
class PysgamePlotWindow(UIPlotWindow):
"""Attributes for a pysimgame plot.
This is not used.
TODO: See if this could be useful somehow.
"""
# Stores the info on what to plot
regions: List[str]
attributes: List[str]
_regions: List[str]
_attributes: List[str]
# Stores the attributes of the figure
figure: FigureSurface
ax: matplotlib.axes.Axes
artists: List[matplotlib.artist.Artist]
@property
def regions(self) -> List[str]:
return self._regions
@regions.setter
def regions(self, regions: List[str]):
if regions is None:
regions = []
self._regions = list(regions).copy()
@property
def attributes(self) -> List[str]:
return self._attributes
@attributes.setter
def attributes(self, attributes: List[str]):
if attributes is None:
attributes = []
self._attributes = list(attributes).copy()
def __str__(self) -> str:
return "Plot window: \n \t Regions {} \t attributes: {}".format(
self.regions, self.attributes
)
class PlotsManager(AbstractPlotsManager):
"""A manager for plots.
Register all the plots that were created and that are now active.
Updates the at every step with the new data.
Notes on the implementation.
The plotting process runs on separated threads, as it takes a bit of
time, so the main process can run without needing to wait for the plots.
"""
ui_plot_windows: Dict[str, UIPlotWindow]
GAME_MANAGER: GameManager
MODEL_MANAGER: ModelManager
axes: Dict[str, List[matplotlib.axes.Axes]]
lines: Dict[str, List[Line2D]]
_connected: bool = False
region_colors: Dict[str, Tuple[float, float, float, float]]
_menu_button: UIButton
_plot_list_buttons: List[UIButton]
_menu_button_position: Tuple[int, int]
_content_thread: threading.Thread
_surface_thread: threading.Thread
_figsurface_locks: Dict[str, threading.Lock]
_last_time: float
# Initialization Methods #
def prepare(self):
"""Prepare the graph manager."""
super().prepare()
self.ui_plot_windows = {}
self.axes = {}
self.lines = {}
self.previous_serie = None
self._plot_list_buttons = []
self.plots = {}
self._content_thread = None
self._surface_thread = None
self._figsurface_locks = {}
self._last_time = time.time()
self._read_regions_colors()
# Manager for the standard UI stuff
self.UI_MANAGER = self.GAME_MANAGER.UI_MANAGER
# Manager for showing the plots
# This should not be called by the main thread !
# So we make it private
self._UI_MANAGER = UIManager(
self.GAME_MANAGER.MAIN_DISPLAY.get_size(),
)
global _PLOT_MANAGER
_PLOT_MANAGER = self
def _read_regions_colors(self):
self.region_colors = {
name: pygame_color_to_plt(cmpnt.color)
for name, cmpnt in self.GAME_MANAGER.game.REGIONS_DICT.items()
if name is not None
}
self.logger.debug(f"Regions Color {self.region_colors}")
def connect(self):
super().connect()
self._menu_button_position = (
self.GAME_MANAGER.MENU_OVERLAY.overlay_buttons[-1]
.get_abs_rect()
.bottomleft
)
# Adding plots methods
def get_a_rect(self) -> pygame.Rect:
"""Return a rect from a nice place in the main window.
TODO: make it a nice place for real...
"""
return pygame.Rect(0, 0, 300, 300)
def register_plot(self, plot: Plot):
"""Add a :py:class:`Plot` to the manager."""
super().register_plot(plot)
# Also create the locks and surface for it
self._figsurface_locks[plot.name] = threading.Lock()
self.logger.debug(f"Lock created : {self._figsurface_locks[plot.name]}")
if self._connected:
self._create_plot_window(plot.name)
self.logger.setLevel(logging.INFO)
def _create_plot_window(self, plot_name: str | Plot):
"""Create the plot on the window.
Assume plot_window has regions and attributes it need to plot.
"""
if not isinstance(plot_name, str):
plot_name = plot_name.name
if plot_name not in self.ui_plot_windows.keys():
# Needs to recall the ui to update
figure, ax = plt.subplots(1, 1)
plot_window = UIPlotWindow(
self.get_a_rect(),
self._UI_MANAGER,
figure,
window_display_title=plot_name,
object_id=f"#plot_window",
resizable=True,
)
self.ui_plot_windows[plot_name] = plot_window
# The first ax is automatically the first line
self.axes[plot_name] = [ax]
for plot_line in self.plots[plot_name].plot_lines[1:]:
# If other plot lines have different y values
if not plot_line.share_y:
self.axes[plot_name].append(ax.twinx())
self.lines[plot_name] = []
# plot_window.get_container().set_image(figure)
# plot_window._created = False
self.logger.info("Graph added.")
self.logger.debug(f"Graph: {plot_window}.")
else:
plot_window = self.ui_plot_windows[plot_name]
if len(self.model_outputs) < 2:
# Cannot plot lines if only one point
return
# Now it is created
plot_window._created = True
plot_window.update_window_image()
self.logger.debug(f"FigureSurf {plot_window.figuresurf}")
def show_plots_list(self):
x, y = self._menu_button_position
width = 100
heigth = 30
if self._plot_list_buttons:
for button in self._plot_list_buttons:
button.show()
else:
# Create
del self._plot_list_buttons
self._plot_list_buttons = [
UIButton(
relative_rect=pygame.Rect(
x, y + i * heigth, width, heigth
),
text=name,
manager=self.UI_MANAGER,
)
for i, name in enumerate(self.plots.keys())
]
# Adding plots methods
def process_events(self, event: pygame.event.Event) -> bool:
"""Process the events from the main loop."""
self._UI_MANAGER.process_events(event)
if super().process_events(event):
return True
match event:
case pygame.event.EventType(type=pygame_gui.UI_BUTTON_PRESSED):
if event.ui_element in self._plot_list_buttons:
self.logger.info(f"Create Plot {event.ui_element.text}")
self._create_plot_window(event.ui_element.text)
# Deletes all the buttons
for button in self._plot_list_buttons:
button.hide()
return True
case pygame.event.EventType(type=pygame_gui.UI_WINDOW_CLOSE):
if event.ui_element in self.ui_plot_windows:
# Remove the window
window: UIPlotWindow = event.ui_element
del self.ui_plot_windows[window.window_display_title]
return True
case pygame.event.EventType(type=pysimgame.ModelStepped):
# Update the plot on a separated thread
if (
self._content_thread is None
or not self._content_thread.is_alive()
):
del self._content_thread
self._content_thread = threading.Thread(
target=self.update, name="Plot Update"
)
self._content_thread.start()
self.logger.debug(
f"Thread Started : {self._content_thread}"
)
def update(self):
"""Update the plots based on the new outputs.
All the windows are updated with their parameters one by one.
"""
model_outputs = self.MODEL_MANAGER.outputs
x = self.MODEL_MANAGER.time_axis.copy()
if len(model_outputs) < 2:
# Cannot plot lines if only one point
return
for plot_name, plot_window in self.ui_plot_windows.items():
self.logger.info(f"Plotting {plot_window}.")
if not plot_window.visible:
# If the window is not visible
continue
if not plot_window._created:
self._create_plot_window(plot_name)
# First get the ax and cleans it
axes = self.axes[plot_name]
for ax in axes:
ax.clear()
ax.set_xlim(x[0], x[-1])
# Will follow the ax on which to plot
ax_index = int(0)
# Plot all the lines required
for plot_line in self.plots[plot_name].plot_lines:
ax = axes[ax_index]
ax_index += 0 if plot_line.share_y else 1
if plot_line.y_lims is not None:
ax.set_ylim(plot_line.y_lims)
# Gets the attributes
y = (
(
self.model_outputs[
plot_line.region, plot_line.attribute
]
.to_numpy()
.reshape(-1)
)
if isinstance(plot_line.attribute, str)
else np.c_[ # Concatenate the values
[
self.model_outputs[plot_line.region, attr]
.to_numpy()
.reshape(-1)
for attr in plot_line.attribute
]
].T
)
if "label" not in plot_line.kwargs:
plot_line.kwargs[
"label"
] = plot_line.attribute or " ".join(
(plot_line.region, plot_line.attribute)
)
artists = ax.plot(
x,
y,
# color=self.region_colors[plot_line.region],
**plot_line.kwargs,
)
self.logger.debug(
f"Plotting {plot_line.region} {plot_line.attribute}."
)
self.logger.debug(f"Setting: \n x: {x} \n y: {y}.")
ax.legend()
# lock the figsurface, so it is not used during the drawing
self._figsurface_locks[plot_name].acquire()
self.logger.debug(
f"Lock acquired : {self._figsurface_locks[plot_name]}"
)
plot_window.figuresurf.canvas.draw()
plot_window.update_window_image()
self._figsurface_locks[plot_name].release()
# plot_window.figuresurf.canvas.flush_events()
# plot_window.get_container().set_image(plot_window.figuresurf)
def draw(self):
# Call the thread drawing the plot
# if self._surface_thread is None or not self._surface_thread.is_alive():
# self._surface_thread = threading.Thread(
# target=self._draw, name="Drawing Plots on MAIN_DISPLAY"
# )
# self._surface_thread.start()
# self.logger.debug(f"Thread Started : {self._surface_thread}")
self._draw()
# Draw the UI
self._UI_MANAGER.draw_ui(self.GAME_MANAGER.MAIN_DISPLAY)
def _draw(self):
# Aquire the lock on all the active plots
locks = [
self._figsurface_locks[name]
for name in self.ui_plot_windows.keys()
]
for lock in locks:
lock.acquire()
self.logger.debug(f"Lock acquired : {lock}")
# Gets the time required for the UI MANAGER update
_time_elapsed = time.time() - self._last_time
self._UI_MANAGER.update(_time_elapsed)
self._last_time = time.time()
for lock in locks:
lock.release()
self.logger.debug(f"Lock released : {lock}")
def quit(self):
self._content_thread.join()
def coordinates_from_serie(self, serie):
"""Convert a serie to pixel coordinates.
Need to rescale the values of the serie to
the ones of the display.
Also needs to convert geometry startig from
top to down.
"""
pixels_x, pixels_y = self.get_size()
# Split the x y values of the serie
x_axis = serie.keys().to_numpy()
y_axis = serie.values
# Rescale to screen size between 0 and 1
x_norm = normalize(x_axis)
y_norm = normalize(y_axis)
# y starts from the bottom instead of top
y_norm = 1.0 - y_norm
# Compute the positions on the screen
x_screen = pixels_x * x_norm
y_screen = pixels_y * y_norm
# Return as list of pygame coordinates
return [(x, y) for x, y in zip(x_screen, y_screen)]
| 33.873589 | 81 | 0.584833 |
a8917b64c57890f40ab57686b3c31d62b411ca9b | 4,283 | py | Python | RecoHI/HiJetAlgos/python/HiRecoPFJets_cff.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | null | null | null | RecoHI/HiJetAlgos/python/HiRecoPFJets_cff.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | null | null | null | RecoHI/HiJetAlgos/python/HiRecoPFJets_cff.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
## Default Parameter Sets
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
from RecoHI.HiJetAlgos.HiPFJetParameters_cff import *
#pseudo towers for noise suppression background subtraction
import RecoHI.HiJetAlgos.particleTowerProducer_cfi as _mod
PFTowers = _mod.particleTowerProducer.clone(useHF = True)
#dummy sequence to speed-up reconstruction in pp_on_AA era
pfEmptyCollection = cms.EDFilter('GenericPFCandidateSelector',
src = cms.InputTag('particleFlow'),
cut = cms.string("pt<0")
)
ak5PFJets = cms.EDProducer(
"FastjetJetProducer",
HiPFJetParameters,
AnomalousCellParameters,
MultipleAlgoIteratorBlock,
jetAlgorithm = cms.string("AntiKt"),
rParam = cms.double(0.5)
)
ak5PFJets.src = 'particleFlow'
akPu5PFJets = ak5PFJets.clone(
jetType = 'BasicJet',
doPVCorrection = False,
doPUOffsetCorr = True,
subtractorName = "MultipleAlgoIterator",
src = 'PFTowers',
doAreaFastjet = False,
puPtMin = cms.double(25)
)
akPu1PFJets = akPu5PFJets.clone(rParam = 0.1, puPtMin = 10)
akPu2PFJets = akPu5PFJets.clone(rParam = 0.2, puPtMin = 10)
akPu3PFJets = akPu5PFJets.clone(rParam = 0.3, puPtMin = 15)
akPu4PFJets = akPu5PFJets.clone(rParam = 0.4, puPtMin = 20)
akPu6PFJets = akPu5PFJets.clone(rParam = 0.6, puPtMin = 30)
akPu7PFJets = akPu5PFJets.clone(rParam = 0.7, puPtMin = 35)
hiPFCandCleanerforJets = cms.EDFilter('GenericPFCandidateSelector',
src = cms.InputTag('particleFlow'),
cut = cms.string("pt>5 && abs(eta)< 2")
)
ak4PFJetsForFlow = akPu5PFJets.clone(
Ghost_EtaMax = 5.0,
Rho_EtaMax = 4.4,
doRhoFastjet = False,
jetPtMin = 15.0,
nSigmaPU = 1.0,
rParam = 0.4,
radiusPU = 0.5,
src = "hiPFCandCleanerforJets",
)
kt4PFJetsForRho = cms.EDProducer(
"FastjetJetProducer",
HiPFJetParameters,
AnomalousCellParameters,
jetAlgorithm = cms.string("Kt"),
rParam = cms.double(0.4)
)
kt4PFJetsForRho.src = 'particleFlow'
kt4PFJetsForRho.doAreaFastjet = True
kt4PFJetsForRho.jetPtMin = 0.0
kt4PFJetsForRho.GhostArea = 0.005
from RecoHI.HiJetAlgos.hiFJRhoProducer import hiFJRhoProducer
import RecoHI.HiJetAlgos.hiFJRhoFlowModulationProducer_cfi as _mod
hiFJRhoFlowModulation = _mod.hiFJRhoFlowModulationProducer.clone()
import RecoHI.HiJetAlgos.hiPuRhoProducer_cfi as _mod
hiPuRho = _mod.hiPuRhoProducer.clone()
akCs4PFJets = cms.EDProducer(
"CSJetProducer",
HiPFJetParameters,
AnomalousCellParameters,
jetAlgorithm = cms.string("AntiKt"),
rParam = cms.double(0.4),
etaMap = cms.InputTag('hiPuRho', 'mapEtaEdges'),
rho = cms.InputTag('hiPuRho', 'mapToRho'),
rhom = cms.InputTag('hiPuRho', 'mapToRhoM'),
csRParam = cms.double(-1.),
csAlpha = cms.double(2.),
writeJetsWithConst = cms.bool(True),
useModulatedRho = cms.bool(False),
rhoFlowFitParams = cms.InputTag('hiFJRhoFlowModulation', 'rhoFlowFitParams'),
jetCollInstanceName = cms.string("pfParticlesCs"),
)
akCs4PFJets.src = 'particleFlow'
akCs4PFJets.doAreaFastjet = True
akCs4PFJets.jetPtMin = 0.0
akCs4PFJets.useExplicitGhosts = cms.bool(True)
akCs4PFJets.GhostArea = 0.005
akCs3PFJets = akCs4PFJets.clone(rParam = 0.3)
hiRecoPFJetsTask = cms.Task(
PFTowers,
akPu3PFJets,
akPu4PFJets,
akPu5PFJets,
hiPFCandCleanerforJets,
kt4PFJetsForRho,
ak4PFJetsForFlow,
hiFJRhoProducer,
hiPuRho,
hiFJRhoFlowModulation,
akCs3PFJets,
akCs4PFJets
)
hiRecoPFJets = cms.Sequence(hiRecoPFJetsTask)
from Configuration.ProcessModifiers.run2_miniAOD_pp_on_AA_103X_cff import run2_miniAOD_pp_on_AA_103X
run2_miniAOD_pp_on_AA_103X.toModify(akCs4PFJets,src = 'cleanedParticleFlow')
| 35.106557 | 100 | 0.644175 |
8dc4f3c19e212c1c4ed9233d78cf6316cbac9765 | 1,706 | py | Python | sdk/test/test_core_product.py | DarrahK/yapily-sdk-python | 2dcf9a403feafab7dcaf140dabe61794bd4debd3 | [
"MIT"
] | null | null | null | sdk/test/test_core_product.py | DarrahK/yapily-sdk-python | 2dcf9a403feafab7dcaf140dabe61794bd4debd3 | [
"MIT"
] | null | null | null | sdk/test/test_core_product.py | DarrahK/yapily-sdk-python | 2dcf9a403feafab7dcaf140dabe61794bd4debd3 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 1.157.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import yapily
from yapily.models.core_product import CoreProduct # noqa: E501
from yapily.rest import ApiException
class TestCoreProduct(unittest.TestCase):
"""CoreProduct unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CoreProduct
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = yapily.models.core_product.CoreProduct() # noqa: E501
if include_optional :
return CoreProduct(
monthly_maximum_charge = '0',
product_description = '0',
product_url = '0',
sales_access_channels = [
'Branch'
],
servicing_access_channels = [
'ATM'
],
tcs_and_cs_url = '0'
)
else :
return CoreProduct(
)
def testCoreProduct(self):
"""Test CoreProduct"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 27.516129 | 158 | 0.606682 |
e93c4a75547dfda8a250d0359729c8d057515334 | 1,251 | py | Python | examples/development/__init__.py | YaoYao1995/mbpo | b9571e469459ce3a632b19dc3fee68c9ac3857b2 | [
"MIT"
] | null | null | null | examples/development/__init__.py | YaoYao1995/mbpo | b9571e469459ce3a632b19dc3fee68c9ac3857b2 | [
"MIT"
] | null | null | null | examples/development/__init__.py | YaoYao1995/mbpo | b9571e469459ce3a632b19dc3fee68c9ac3857b2 | [
"MIT"
] | null | null | null | """Provides functions that are utilized by the command line interface.
In particular, the examples are exposed to the command line interface
(defined in `softlearning.scripts.console_scripts`) through the
`get_trainable_class`, `get_variant_spec`, and `get_parser` functions.
"""
def get_trainable_class(*args, **kwargs):
from .main import ExperimentRunner
return ExperimentRunner
# def get_variant_spec(command_line_args, *args, **kwargs):
# from .variants import get_variant_spec
# variant_spec = get_variant_spec(command_line_args, *args, **kwargs)
# return variant_spec
def get_params_from_file(filepath, params_name='params'):
import importlib
from dotmap import DotMap
module = importlib.import_module(filepath)
params = getattr(module, params_name)
params = DotMap(params)
return params
def get_variant_spec(command_line_args, *args, **kwargs):
from .base import get_variant_spec
import importlib
params = get_params_from_file(command_line_args.config)
variant_spec = get_variant_spec(command_line_args, *args, params, **kwargs)
return variant_spec
def get_parser():
from examples.utils import get_parser
parser = get_parser()
return parser
| 32.921053 | 80 | 0.748201 |
2aedcc8a0cb213a20c24bc71bf516f9cccb9a9b7 | 3,504 | py | Python | src/Preprocessor.py | cetinsamet/movie-genre-classification | d52088210cbd371846063ebdccc5a77c4582deaf | [
"MIT"
] | 2 | 2019-09-01T13:03:05.000Z | 2020-09-12T06:04:05.000Z | src/Preprocessor.py | cetinsamet/movie-genre-classification | d52088210cbd371846063ebdccc5a77c4582deaf | [
"MIT"
] | null | null | null | src/Preprocessor.py | cetinsamet/movie-genre-classification | d52088210cbd371846063ebdccc5a77c4582deaf | [
"MIT"
] | 2 | 2019-05-20T08:10:27.000Z | 2021-04-26T06:51:12.000Z | #
# Preprocessor.py
#
# Created by Samet Cetin.
# Contact: cetin.samet@outlook.com
#
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import nltk
import os
from nltk.corpus import stopwords
import codecs
import errno
import string
class Preprocessor:
def __init__(self, dataset_directory="Dataset", processed_dataset_directory= "ProcessedDataset"):
self.dataset_directory = dataset_directory
self.processed_dataset_directory = processed_dataset_directory
nltk.download("stopwords")
nltk.download("punkt")
self.stop_words = set(stopwords.words('english'))
def _remove_puncs_numbers_stop_words(self, tokens):
"""Remove punctuations in the words, words including numbers and words in the stop_words list.
:param tokens: list of string
:return: list of string with cleaned version
"""
tokens = [token.replace("'", '') for token in tokens]
tokens_cleaned = [token for token in tokens if token.isalpha() and token not in self.stop_words]
return tokens_cleaned
def _tokenize(self, sentence):
"""Tokenizes given string.
:param sentence: string to tokenize
:return: list of string with tokens
"""
sentence_tokenized = nltk.tokenize.word_tokenize(sentence.replace("\n", " "))
return [token.lower() for token in sentence_tokenized]
def _stem(self, tokens):
"""Stems the tokens with nltk SnowballStemmer
:param tokens: list of string
:return: list of string with words stems
"""
stemmer = nltk.SnowballStemmer(language='english')
tokens_stemmed = [stemmer.stem(token) for token in tokens]
return tokens_stemmed
def preprocess_document(self, document):
"""Calls methods _tokenize, _remove_puncs_numbers_stop_words and _stem respectively.
:param document: string to preprocess
:return: string with processed version
"""
doc_tokenized = self._tokenize(document)
doc_cleaned = self._remove_puncs_numbers_stop_words(doc_tokenized)
doc_stemmed = self._stem(doc_cleaned)
doc_stemmed_str = ' '.join(doc_stemmed)
return doc_stemmed_str
def preprocess(self):
"""Walks through the given directory and calls preprocess_document method. The output is
persisted into processed_dataset_directory by keeping directory structure.
:return: None
"""
for root, dirs, files in os.walk(self.dataset_directory):
if os.path.basename(root) != self.dataset_directory:
print("Processing", root, "directory.")
dest_dir = self.processed_dataset_directory + "/" + root.lstrip(self.dataset_directory + "/")
if not os.path.exists(dest_dir):
try:
os.makedirs(dest_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
for file in files:
file_path = root + "/" + file
with codecs.open(file_path, "r", "ISO-8859-1") as f:
data = f.read().replace("\n", " ")
processed_data = self.preprocess_document(data)
output_file_path = dest_dir + "/" + file
with codecs.open(output_file_path, "w", "ISO-8859-1") as o:
o.write(processed_data)
| 40.744186 | 109 | 0.619292 |
e462208fdb4b19a47ea33a1ed25429fe663f2254 | 1,747 | py | Python | elzzur/snake.py | aogier/elzzur | 4d3c5afa24226b4c4bad193afeaba1ebbef2f6b6 | [
"MIT"
] | 3 | 2016-06-27T14:15:14.000Z | 2019-08-15T04:35:48.000Z | elzzur/snake.py | aogier/elzzur | 4d3c5afa24226b4c4bad193afeaba1ebbef2f6b6 | [
"MIT"
] | null | null | null | elzzur/snake.py | aogier/elzzur | 4d3c5afa24226b4c4bad193afeaba1ebbef2f6b6 | [
"MIT"
] | 1 | 2019-02-15T08:10:15.000Z | 2019-02-15T08:10:15.000Z | #!/usr/bin/env python
# coding=utf-8
"""
A Snake represents a (possibly partial) list of adjacent board cells,
represented by ``(x, y)`` pairs, 0-indexing.
"""
from __future__ import absolute_import
from __future__ import print_function
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2016, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "alberto@albertopettarin.it"
__status__ = "Production"
class Snake(object):
"""
A Snake represents a (possibly partial) list of adjacent board cells,
represented by ``(x, y)`` pairs, 0-indexing.
:param list cells: list of (x, y) pairs representing the cells of the snake
"""
def __init__(self, cells):
self.cells = cells
def __str__(self):
return u" ".join(["(%d,%d)" % cell for cell in self.cells])
def __len__(self):
return len(self.cells)
@property
def start(self):
"""
The cell where the snake starts.
:rtype: (int, int)
"""
return self.cells[0]
@property
def end(self):
"""
The cell where the snake ends.
:rtype: (int, int)
"""
return self.cells[-1]
def has_cell(self, cell):
"""
Return ``True`` if the given ``(x, y)`` cell is already in the snake.
:param tuple cell: the ``(x, y)`` cell to be checked for
:rtype: bool
"""
return cell in self.cells
def extend(self, cell):
"""
Return a new Snake which is the current Snake extended with the given cell.
:param tuple cell: the ``(x, y)`` cell to be added
:rtype: Snake
"""
return Snake(self.cells + [cell])
| 23.931507 | 83 | 0.593017 |
00cd6222230d848e84cd4370fc1053335fb668e0 | 37,922 | py | Python | psychrnn/backend/rnn.py | tianw6/PsychRNN | 8d874002d83916f9cc687d1313cfb2f9c15e57f9 | [
"MIT"
] | 90 | 2020-05-25T05:29:13.000Z | 2022-03-29T15:49:12.000Z | psychrnn/backend/rnn.py | tianw6/PsychRNN | 8d874002d83916f9cc687d1313cfb2f9c15e57f9 | [
"MIT"
] | 26 | 2020-06-23T20:03:25.000Z | 2021-08-05T08:44:37.000Z | psychrnn/backend/rnn.py | tianw6/PsychRNN | 8d874002d83916f9cc687d1313cfb2f9c15e57f9 | [
"MIT"
] | 28 | 2020-05-25T05:29:04.000Z | 2022-03-29T11:39:05.000Z | from __future__ import division
from __future__ import print_function
from abc import ABCMeta, abstractmethod
# abstract class python 2 & 3 compatible
ABC = ABCMeta('ABC', (object,), {})
import tensorflow as tf
import numpy as np
import sys
from time import time
from os import makedirs, path
from inspect import isgenerator
from psychrnn.backend.regularizations import Regularizer
from psychrnn.backend.loss_functions import LossFunction
from psychrnn.backend.initializations import WeightInitializer, GaussianSpectralRadius
tf.compat.v1.disable_eager_execution()
class RNN(ABC):
""" The base recurrent neural network class.
Note:
The base RNN class is not itself a functioning RNN.
forward_pass must be implemented to define a functioning RNN.
Args:
params (dict): The RNN parameters. Use your tasks's :func:`~psychrnn.tasks.task.Task.get_task_params` function to start building this dictionary. Optionally use a different network's :func:`get_weights` function to initialize the network with preexisting weights.
:Dictionary Keys:
* **name** (*str*) -- Unique name used to determine variable scope. Having different variable scopes allows multiple distinct models to be instantiated in the same TensorFlow environment. See `TensorFlow's variable_scope <https://www.tensorflow.org/api_docs/python/tf/compat/v1/variable_scope>`_ for more details.
* **N_in** (*int*) -- The number of network inputs.
* **N_rec** (*int*) -- The number of recurrent units in the network.
* **N_out** (*int*) -- The number of network outputs.
* **N_steps** (*int*): The number of simulation timesteps in a trial.
* **dt** (*float*) -- The simulation timestep.
* **tau** (*float*) -- The intrinsic time constant of neural state decay.
* **N_batch** (*int*) -- The number of trials per training update.
* **rec_noise** (*float, optional*) -- How much recurrent noise to add each time the new state of the network is calculated. Default: 0.0.
* **transfer_function** (*function, optional*) -- Transfer function to use for the network. Default: `tf.nn.relu <https://www.tensorflow.org/api_docs/python/tf/nn/relu>`_.
* **load_weights_path** (*str, optional*) -- When given a path, loads weights from file in that path. Default: None
* **initializer** (:class:`~psychrnn.backend.initializations.WeightInitializer` *or child object, optional*) -- Initializer to use for the network. Default: :class:`~psychrnn.backend.initializations.WeightInitializer` (:data:`params`) if :data:`params` includes :data:`W_rec` or :data:`load_weights_path` as a key, :class:`~psychrnn.backend.initializations.GaussianSpectralRadius` (:data:`params`) otherwise.
* **W_in_train** (*bool, optional*) -- True if input weights, W_in, are trainable. Default: True
* **W_rec_train** (*bool, optional*) -- True if recurrent weights, W_rec, are trainable. Default: True
* **W_out_train** (*bool, optional*) -- True if output weights, W_out, are trainable. Default: True
* **b_rec_train** (*bool, optional*) -- True if recurrent bias, b_rec, is trainable. Default: True
* **b_out_train** (*bool, optional*) -- True if output bias, b_out, is trainable. Default: True
* **init_state_train** (*bool, optional*) -- True if the inital state for the network, init_state, is trainable. Default: True
* **loss_function** (*str, optional*) -- Which loss function to use. See :class:`psychrnn.backend.loss_functions.LossFunction` for details. Defaults to ``"mean_squared_error"``.
:Other Dictionary Keys:
* Any dictionary keys used by the regularizer will be passed onwards to :class:`psychrnn.backend.regularizations.Regularizer`. See :class:`~psychrnn.backend.regularizations.Regularizer` for key names and details.
* Any dictionary keys used for the loss function will be passed onwards to :class:`psychrnn.backend.loss_functions.LossFunction`. See :class:`~psychrnn.backend.loss_functions.LossFunction` for key names and details.
* If :data:`initializer` is not set, any dictionary keys used by the initializer will be pased onwards to :class:`WeightInitializer <psychrnn.backend.initializations.WeightInitializer>` if :data:`load_weights_path` is set or :data:`W_rec` is passed in. Otherwise all keys will be passed to :class:`GaussianSpectralRadius <psychrnn.backend.initializations.GaussianSpectralRadius>`
* If :data:`initializer` is not set and :data:`load_weights_path` is not set, the dictionary entries returned previously by :func:`get_weights` can be passed in to initialize the network. See :class:`WeightInitializer <psychrnn.backend.initializations.WeightInitializer>` for a list and explanation of possible parameters. At a minimum, :data:`W_rec` must be included as a key to make use of this option.
* If :data:`initializer` is not set and :data:`load_weights_path` is not set, the following keys can be used to set biological connectivity constraints:
* **input_connectivity** (*ndarray(dtype=float, shape=(* :attr:`N_rec`, :attr:`N_in` *)), optional*) -- Connectivity mask for the input layer. 1 where connected, 0 where unconnected. Default: np.ones((:attr:`N_rec`, :attr:`N_in`)).
* **rec_connectivity** (*ndarray(dtype=float, shape=(* :attr:`N_rec`, :attr:`N_rec` *)), optional*) -- Connectivity mask for the recurrent layer. 1 where connected, 0 where unconnected. Default: np.ones((:attr:`N_rec`, :attr:`N_rec`)).
* **output_connectivity** (*ndarray(dtype=float, shape=(* :attr:`N_out`, :attr:`N_rec` *)), optional*) -- Connectivity mask for the output layer. 1 where connected, 0 where unconnected. Default: np.ones((:attr:`N_out`, :attr:`N_rec`)).
* **autapses** (*bool, optional*) -- If False, self connections are not allowed in N_rec, and diagonal of :data:`rec_connectivity` will be set to 0. Default: True.
* **dale_ratio** (float, optional) -- Dale's ratio, used to construct Dale_rec and Dale_out. 0 <= dale_ratio <=1 if dale_ratio should be used. ``dale_ratio * N_rec`` recurrent units will be excitatory, the rest will be inhibitory. Default: None
Inferred Parameters:
* **alpha** (*float*) -- The number of unit time constants per simulation timestep.
"""
def __init__(self, params):
self.params = params
# --------------------------------------------
# Unique name used to determine variable scope
# --------------------------------------------
try:
self.name = params['name']
except KeyError:
print("You must pass a 'name' to RNN")
raise
# ----------------------------------
# Network sizes (tensor dimensions)
# ----------------------------------
try:
N_in = self.N_in = params['N_in']
except KeyError:
print("You must pass 'N_in' to RNN")
raise
try:
N_rec = self.N_rec = params['N_rec']
except KeyError:
print("You must pass 'N_rec' to RNN")
raise
try:
N_out = self.N_out = params['N_out']
except KeyError:
print("You must pass 'N_out' to RNN")
raise
try:
N_steps = self.N_steps = params['N_steps']
except KeyError:
print("You must pass 'N_steps' to RNN")
raise
# ----------------------------------
# Physical parameters
# ----------------------------------
try:
self.dt = params['dt']
except KeyError:
print("You must pass 'dt' to RNN")
raise
try:
self.tau = params['tau']
except KeyError:
print("You must pass 'tau' to RNN")
raise
try:
self.tau = self.tau.astype('float32')
except AttributeError:
pass
try:
self.N_batch = params['N_batch']
except KeyError:
print("You must pass 'N_batch' to RNN")
raise
self.alpha = (1.0 * self.dt) / self.tau
self.rec_noise = params.get('rec_noise', 0.0)
self.transfer_function = params.get('transfer_function', tf.nn.relu)
# ----------------------------------
# Load weights path
# ----------------------------------
self.load_weights_path = params.get('load_weights_path', None)
# ------------------------------------------------
# Define initializer for TensorFlow variables
# ------------------------------------------------
if self.load_weights_path is not None:
self.initializer = WeightInitializer(load_weights_path=self.load_weights_path)
elif params.get('W_rec', None) is not None:
self.initializer = params.get('initializer',
WeightInitializer(**params))
else:
self.initializer = params.get('initializer',
GaussianSpectralRadius(**params))
self.dale_ratio = self.initializer.get_dale_ratio()
# ----------------------------------
# Trainable features
# ----------------------------------
self.W_in_train = params.get('W_in_train', True)
self.W_rec_train = params.get('W_rec_train', True)
self.W_out_train = params.get('W_out_train', True)
self.b_rec_train = params.get('b_rec_train', True)
self.b_out_train = params.get('b_out_train', True)
self.init_state_train = params.get('init_state_train', True)
# --------------------------------------------------
# TensorFlow input/output placeholder initializations
# ---------------------------------------------------
self.x = tf.compat.v1.placeholder("float", [None, N_steps, N_in])
self.y = tf.compat.v1.placeholder("float", [None, N_steps, N_out])
self.output_mask = tf.compat.v1.placeholder("float", [None, N_steps, N_out])
# --------------------------------------------------
# Initialize variables in proper scope
# ---------------------------------------------------
with tf.compat.v1.variable_scope(self.name) as scope:
# ------------------------------------------------
# Trainable variables:
# Initial State, weight matrices and biases
# ------------------------------------------------
try:
self.init_state = tf.compat.v1.get_variable('init_state', [1, N_rec],
initializer=self.initializer.get('init_state'),
trainable=self.init_state_train)
except ValueError as error:
raise UserWarning("Try calling model.destruct() or changing params['name'].")
self.init_state = tf.tile(self.init_state, [self.N_batch, 1])
# Input weight matrix:
self.W_in = \
tf.compat.v1.get_variable('W_in', [N_rec, N_in],
initializer=self.initializer.get('W_in'),
trainable=self.W_in_train)
# Recurrent weight matrix:
self.W_rec = \
tf.compat.v1.get_variable(
'W_rec',
[N_rec, N_rec],
initializer=self.initializer.get('W_rec'),
trainable=self.W_rec_train)
# Output weight matrix:
self.W_out = tf.compat.v1.get_variable('W_out', [N_out, N_rec],
initializer=self.initializer.get('W_out'),
trainable=self.W_out_train)
# Recurrent bias:
self.b_rec = tf.compat.v1.get_variable('b_rec', [N_rec], initializer=self.initializer.get('b_rec'),
trainable=self.b_rec_train)
# Output bias:
self.b_out = tf.compat.v1.get_variable('b_out', [N_out], initializer=self.initializer.get('b_out'),
trainable=self.b_out_train)
# ------------------------------------------------
# Non-trainable variables:
# Overall connectivity and Dale's law matrices
# ------------------------------------------------
# Recurrent Dale's law weight matrix:
self.Dale_rec = tf.compat.v1.get_variable('Dale_rec', [N_rec, N_rec],
initializer=self.initializer.get('Dale_rec'),
trainable=False)
# Output Dale's law weight matrix:
self.Dale_out = tf.compat.v1.get_variable('Dale_out', [N_rec, N_rec],
initializer=self.initializer.get('Dale_out'),
trainable=False)
# Connectivity weight matrices:
self.input_connectivity = tf.compat.v1.get_variable('input_connectivity', [N_rec, N_in],
initializer=self.initializer.get('input_connectivity'),
trainable=False)
self.rec_connectivity = tf.compat.v1.get_variable('rec_connectivity', [N_rec, N_rec],
initializer=self.initializer.get('rec_connectivity'),
trainable=False)
self.output_connectivity = tf.compat.v1.get_variable('output_connectivity', [N_out, N_rec],
initializer=self.initializer.get('output_connectivity'),
trainable=False)
# --------------------------------------------------
# Flag to check if variables initialized, model built
# ---------------------------------------------------
self.is_initialized = False
self.is_built = False
def build(self):
""" Build the TensorFlow network and start a TensorFlow session.
"""
# --------------------------------------------------
# Define the predictions
# --------------------------------------------------
self.predictions, self.states = self.forward_pass()
# --------------------------------------------------
# Define the loss (based on the predictions)
# --------------------------------------------------
self.loss = LossFunction(self.params).set_model_loss(self)
# --------------------------------------------------
# Define the regularization
# --------------------------------------------------
self.reg = Regularizer(self.params).set_model_regularization(self)
# --------------------------------------------------
# Define the total regularized loss
# --------------------------------------------------
self.reg_loss = self.loss + self.reg
# --------------------------------------------------
# Open a session
# --------------------------------------------------
self.sess = tf.compat.v1.Session()
# --------------------------------------------------
# Record successful build
# --------------------------------------------------
self.is_built = True
return
def destruct(self):
""" Close the TensorFlow session and reset the global default graph.
"""
# --------------------------------------------------
# Close the session. Delete the graph.
# --------------------------------------------------
if self.is_built:
self.sess.close()
tf.compat.v1.reset_default_graph()
return
def get_effective_W_rec(self):
""" Get the recurrent weights used in the network, after masking by connectivity and dale_ratio.
Returns:
tf.Tensor(dtype=float, shape=(:attr:`N_rec`, :attr:`N_rec` ))
"""
W_rec = self.W_rec * self.rec_connectivity
if self.dale_ratio:
W_rec = tf.matmul(tf.abs(W_rec), self.Dale_rec, name="in_1")
return W_rec
def get_effective_W_in(self):
""" Get the input weights used in the network, after masking by connectivity and dale_ratio.
Returns:
tf.Tensor(dtype=float, shape=(:attr:`N_rec`, :attr:`N_in` ))
"""
W_in = self.W_in * self.input_connectivity
if self.dale_ratio:
W_in = tf.abs(W_in)
return W_in
def get_effective_W_out(self):
""" Get the output weights used in the network, after masking by connectivity, and dale_ratio.
Returns:
tf.Tensor(dtype=float, shape=(:attr:`N_out`, :attr:`N_rec` ))
"""
W_out = self.W_out * self.output_connectivity
if self.dale_ratio:
W_out = tf.matmul(tf.abs(W_out), self.Dale_out, name="in_2")
return W_out
@abstractmethod
def forward_pass(self):
""" Run the RNN on a batch of task inputs.
Note:
This is an abstract function that must be defined in a child class.
Returns:
tuple:
* **predictions** (*ndarray(dtype=float, shape=(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*) -- Network output on inputs found in self.x within the tf network.
* **states** (*ndarray(dtype=float, shape=(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_rec` *))*) -- State variable values over the course of the trials found in self.x within the tf network.
"""
raise UserWarning("forward_pass must be implemented in child class. See Basic for example.")
def get_weights(self):
""" Get weights used in the network.
Allows for rebuilding or tweaking different weights to do experiments / analyses.
Returns:
dict: Dictionary of rnn weights including the following keys:
:Dictionary Keys:
* **init_state** (*ndarray(dtype=float, shape=(1, :attr:`N_rec` *))*) -- Initial state of the network's recurrent units.
* **W_in** (*ndarray(dtype=float, shape=(:attr:`N_rec`. :attr:`N_in` *))*) -- Input weights.
* **W_rec** (*ndarray(dtype=float, shape=(:attr:`N_rec`, :attr:`N_rec` *))*) -- Recurrent weights.
* **W_out** (*ndarray(dtype=float, shape=(:attr:`N_out`, :attr:`N_rec` *))*) -- Output weights.
* **b_rec** (*ndarray(dtype=float, shape=(:attr:`N_rec`, *))*) -- Recurrent bias.
* **b_out** (*ndarray(dtype=float, shape=(:attr:`N_out`, *))*) -- Output bias.
* **Dale_rec** (*ndarray(dtype=float, shape=(:attr:`N_rec`, :attr:`N_rec`*))*) -- Diagonal matrix with ones and negative ones on the diagonal. If :data:`dale_ratio` is not ``None``, indicates whether a recurrent unit is excitatory(1) or inhibitory(-1).
* **Dale_out** (*ndarray(dtype=float, shape=(:attr:`N_rec`, :attr:`N_rec`*))*) -- Diagonal matrix with ones and zeroes on the diagonal. If :data:`dale_ratio` is not ``None``, indicates whether a recurrent unit is excitatory(1) or inhibitory(0). Inhibitory neurons do not contribute to the output.
* **input_connectivity** (*ndarray(dtype=float, shape=(:attr:`N_rec`, :attr:`N_in`*))*) -- Connectivity mask for the input layer. 1 where connected, 0 where unconnected.
* **rec_connectivity** (*ndarray(dtype=float, shape=(:attr:`N_rec`, :attr:`N_rec`*))*) -- Connectivity mask for the recurrent layer. 1 where connected, 0 where unconnected.
* **output_connectivity** (*ndarray(dtype=float, shape=(:attr:`N_out`, :attr:`N_rec`*))*) -- Connectivity mask for the output layer. 1 where connected, 0 where unconnected.
* **dale_ratio** (*float*) -- Dale's ratio, used to construct Dale_rec and Dale_out. Either ``None`` if dale's law was not applied, or 0 <= dale_ratio <=1 if dale_ratio was applied.
Note:
Keys returned may be different / include other keys depending on the implementation of :class:`RNN` used. A different set of keys will be included e.g. if the :class:`~psychrnn.backend.models.lstm.LSTM` implementation is used. The set of keys above is accurate and meaningful for the :class:`~psychrnn.backend.models.basic.Basic` and :class:`~psychrnn.backend.models.basic.BasicScan` implementations.
"""
if not self.is_built:
self.build()
if not self.is_initialized:
self.sess.run(tf.compat.v1.global_variables_initializer())
self.is_initialized = True
weights_dict = dict()
for var in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=self.name):
# avoid saving duplicates
if var.name.endswith(':0') and var.name.startswith(self.name):
name = var.name[len(self.name)+1:-2]
weights_dict.update({name: var.eval(session=self.sess)})
weights_dict.update({'W_rec': self.get_effective_W_rec().eval(session=self.sess)})
weights_dict.update({'W_in': self.get_effective_W_in().eval(session=self.sess)})
weights_dict.update({'W_out': self.get_effective_W_out().eval(session=self.sess)})
weights_dict['dale_ratio'] = self.dale_ratio
return weights_dict
def save(self, save_path):
""" Save the weights returned by :func:`get_weights` to :data:`save_path`
Arguments:
save_path (str): Path for where to save the network weights.
"""
weights_dict = self.get_weights()
np.savez(save_path, **weights_dict)
return
def train(self, trial_batch_generator, train_params={}):
""" Train the network.
Arguments:
trial_batch_generator (:class:`~psychrnn.tasks.task.Task` object or *Generator[tuple, None, None]*): the task to train on, or the task to train on's batch_generator. If a task is passed in, task.:func:`batch_generator` () will be called to get the generator for the task to train on.
train_params (dict, optional): Dictionary of training parameters containing the following possible keys:
:Dictionary Keys:
* **learning_rate** (*float, optional*) -- Sets learning rate if use default optimizer Default: .001
* **training_iters** (*int, optional*) -- Number of iterations to train for Default: 50000.
* **loss_epoch** (*int, optional*) -- Compute and record loss every 'loss_epoch' epochs. Default: 10.
* **verbosity** (*bool, optional*) -- If true, prints information as training progresses. Default: True.
* **save_weights_path** (*str, optional*) -- Where to save the model after training. Default: None
* **save_training_weights_epoch** (*int, optional*) -- Save training weights every 'save_training_weights_epoch' epochs. Weights only actually saved if :data:`training_weights_path` is set. Default: 100.
* **training_weights_path** (*str, optional*) -- What directory to save training weights into as training progresses. Default: None.
* **curriculum** (`~psychrnn.backend.curriculum.Curriculum` *object, optional*) -- Curriculum to train on. If a curriculum object is provided, it overrides the trial_batch_generator argument. Default: None.
* **optimizer** (`tf.compat.v1.train.Optimizer <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Optimizer>`_ *object, optional*) -- What optimizer to use to compute gradients. Default: `tf.train.AdamOptimizer <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/AdamOptimizer>`_ (learning_rate=:data:`train_params`['learning_rate']` ).
* **clip_grads** (*bool, optional*) -- If true, clip gradients by norm 1. Default: True
* **fixed_weights** (*dict, optional*) -- By default all weights are allowed to train unless :data:`fixed_weights` or :data:`W_rec_train`, :data:`W_in_train`, or :data:`W_out_train` are set. Default: None. Dictionary of weights to fix (not allow to train) with the following optional keys:
Fixed Weights Dictionary Keys (in case of :class:`~psychrnn.backend.models.basic.Basic` and :class:`~psychrnn.backend.models.basic.BasicScan` implementations)
* **W_in** (*ndarray(dtype=bool, shape=(:attr:`N_rec`. :attr:`N_in` *)), optional*) -- True for input weights that should be fixed during training.
* **W_rec** (*ndarray(dtype=bool, shape=(:attr:`N_rec`, :attr:`N_rec` *)), optional*) -- True for recurrent weights that should be fixed during training.
* **W_out** (*ndarray(dtype=bool, shape=(:attr:`N_out`, :attr:`N_rec` *)), optional*) -- True for output weights that should be fixed during training.
:Note:
In general, any key in the dictionary output by :func:`get_weights` can have a key in the fixed_weights matrix, however fixed_weights will only meaningfully apply to trainable matrices.
* **performance_cutoff** (*float*) -- If :data:`performance_measure` is not ``None``, training stops as soon as performance_measure surpases the performance_cutoff. Default: None.
* **performance_measure** (*function*) -- Function to calculate the performance of the network using custom criteria. Default: None.
:Arguments:
* **trial_batch** (*ndarray(dtype=float, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*): Task stimuli for :attr:`N_batch` trials.
* **trial_y** (*ndarray(dtype=float, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*): Target output for the network on :attr:`N_batch` trials given the :data:`trial_batch`.
* **output_mask** (*ndarray(dtype=bool, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*): Output mask for :attr:`N_batch` trials. True when the network should aim to match the target output, False when the target output can be ignored.
* **output** (*ndarray(dtype=bool, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*): Output to compute the accuracy of. ``output`` as returned by :func:`psychrnn.backend.rnn.RNN.test`.
* **epoch** (*int*): Current training epoch (e.g. perhaps the performance_measure is calculated differently early on vs late in training)
* **losses** (*list of float*): List of losses from the beginning of training until the current epoch.
* **verbosity** (*bool*): Passed in from :data:`train_params`.
:Returns:
*float*
Performance, greater when the performance is better.
Returns:
tuple:
* **losses** (*list of float*) -- List of losses, computed every :data:`loss_epoch` epochs during training.
* **training_time** (*float*) -- Time spent training.
* **initialization_time** (*float*) -- Time spent initializing the network and preparing to train.
"""
if not self.is_built:
self.build()
t0 = time()
# --------------------------------------------------
# Extract params
# --------------------------------------------------
learning_rate = train_params.get('learning_rate', .001)
training_iters = train_params.get('training_iters', 50000)
loss_epoch = train_params.get('loss_epoch', 10)
verbosity = train_params.get('verbosity', True)
save_weights_path = train_params.get('save_weights_path', None)
save_training_weights_epoch = train_params.get('save_training_weights_epoch', 100)
training_weights_path = train_params.get('training_weights_path', None)
curriculum = train_params.get('curriculum', None)
optimizer = train_params.get('optimizer',
tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate))
clip_grads = train_params.get('clip_grads', True)
fixed_weights = train_params.get('fixed_weights', None) # array of zeroes and ones. One indicates to pin and not train that weight.
performance_cutoff = train_params.get('performance_cutoff', None)
performance_measure = train_params.get('performance_measure', None)
if (performance_cutoff is not None and performance_measure is None) or (performance_cutoff is None and performance_measure is not None):
raise UserWarning("training will not be cutoff based on performance. Make sure both performance_measure and performance_cutoff are defined")
if curriculum is not None:
trial_batch_generator = curriculum.get_generator_function()
if not isgenerator(trial_batch_generator):
trial_batch_generator = trial_batch_generator.batch_generator()
# --------------------------------------------------
# Make weights folder if it doesn't already exist.
# --------------------------------------------------
if save_weights_path != None:
if path.dirname(save_weights_path) != "" and not path.exists(path.dirname(save_weights_path)):
makedirs(path.dirname(save_weights_path))
# --------------------------------------------------
# Make train weights folder if it doesn't already exist.
# --------------------------------------------------
if training_weights_path != None:
if path.dirname(training_weights_path) != "" and not path.exists(path.dirname(training_weights_path)):
makedirs(path.dirname(training_weights_path))
# --------------------------------------------------
# Compute gradients
# --------------------------------------------------
grads = optimizer.compute_gradients(self.reg_loss)
# --------------------------------------------------
# Fixed Weights
# --------------------------------------------------
if fixed_weights is not None:
for i in range(len(grads)):
(grad, var) = grads[i]
name = var.name[len(self.name)+1:-2]
if name in fixed_weights.keys():
grad = tf.multiply(grad, (1-fixed_weights[name]))
grads[i] = (grad, var)
# --------------------------------------------------
# Clip gradients
# --------------------------------------------------
if clip_grads:
grads = [(tf.clip_by_norm(grad, 1.0), var)
if grad is not None else (grad, var)
for grad, var in grads]
# --------------------------------------------------
# Call the optimizer and initialize variables
# --------------------------------------------------
optimize = optimizer.apply_gradients(grads)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.is_initialized = True
# --------------------------------------------------
# Record training time for performance benchmarks
# --------------------------------------------------
t1 = time()
# --------------------------------------------------
# Training loop
# --------------------------------------------------
epoch = 1
batch_size = next(trial_batch_generator)[0].shape[0]
losses = []
if performance_cutoff is not None:
performance = performance_cutoff - 1
while epoch * batch_size < training_iters and (performance_cutoff is None or performance < performance_cutoff):
batch_x, batch_y, output_mask, _ = next(trial_batch_generator)
self.sess.run(optimize, feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask})
# --------------------------------------------------
# Output batch loss
# --------------------------------------------------
if epoch % loss_epoch == 0:
reg_loss = self.sess.run(self.reg_loss,
feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask})
losses.append(reg_loss)
if verbosity:
print("Iter " + str(epoch * batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(reg_loss))
# --------------------------------------------------
# Allow for curriculum learning
# --------------------------------------------------
if curriculum is not None and epoch % curriculum.metric_epoch == 0:
trial_batch, trial_y, output_mask, _ = next(trial_batch_generator)
output, _ = self.test(trial_batch)
if curriculum.metric_test(trial_batch, trial_y, output_mask, output, epoch, losses, verbosity):
if curriculum.stop_training:
break
trial_batch_generator = curriculum.get_generator_function()
# --------------------------------------------------
# Save intermediary weights
# --------------------------------------------------
if epoch % save_training_weights_epoch == 0:
if training_weights_path is not None:
self.save(training_weights_path + str(epoch))
if verbosity:
print("Training weights saved in file: %s" % training_weights_path + str(epoch))
# ---------------------------------------------------
# Update performance value if necessary
# ---------------------------------------------------
if performance_measure is not None:
trial_batch, trial_y, output_mask, _ = next(trial_batch_generator)
output, _ = self.test(trial_batch)
performance = performance_measure(trial_batch, trial_y, output_mask, output, epoch, losses, verbosity)
if verbosity:
print("performance: " + str(performance))
epoch += 1
t2 = time()
if verbosity:
print("Optimization finished!")
# --------------------------------------------------
# Save final weights
# --------------------------------------------------
if save_weights_path is not None:
self.save(save_weights_path)
if verbosity:
print("Model saved in file: %s" % save_weights_path)
# --------------------------------------------------
# Return losses, training time, initialization time
# --------------------------------------------------
return losses, (t2 - t1), (t1 - t0)
def train_curric(self, train_params):
"""Wrapper function for training with curriculum to streamline curriculum learning.
Arguments:
train_params (dict, optional): See :func:`train` for details.
Returns:
tuple: See :func:`train` for details.
"""
# --------------------------------------------------
# Wrapper function for training with curriculum
# to streamline curriculum learning
# --------------------------------------------------
curriculum = train_params.get('curriculum', None)
if curriculum is None:
raise UserWarning("train_curric requires a curriculum. Please pass in a curriculum or use train instead.")
losses, training_time, initialization_time = self.train(curriculum.get_generator_function(), train_params)
return losses, training_time, initialization_time
def test(self, trial_batch):
""" Test the network on a certain task input.
Arguments:
trial_batch ((*ndarray(dtype=float, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*): Task stimulus to run the network on. Stimulus from :func:`psychrnn.tasks.task.Task.get_trial_batch`, or from next(:func:`psychrnn.tasks.task.Task.batch_generator` ).
Returns:
tuple:
* **outputs** (*ndarray(dtype=float, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*) -- Output time series of the network for each trial in the batch.
* **states** (*ndarray(dtype=float, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_rec` *))*) -- Activity of recurrent units during each trial.
"""
if not self.is_built:
self.build()
if not self.is_initialized:
self.sess.run(tf.compat.v1.global_variables_initializer())
self.is_initialized = True
# --------------------------------------------------
# Run the forward pass on trial_batch
# --------------------------------------------------
outputs, states = self.sess.run([self.predictions, self.states],
feed_dict={self.x: trial_batch})
return outputs, states
| 57.370651 | 420 | 0.548573 |
9c1f62f375fa63bc3da4282a11208fe77cc53dbe | 35,393 | py | Python | private/dump_new_fish_data.py | Lodinn/ff14-fish-tracker-app | ff96fea17f4ffa5f77013c5f31d04a53b54f8123 | [
"MIT"
] | null | null | null | private/dump_new_fish_data.py | Lodinn/ff14-fish-tracker-app | ff96fea17f4ffa5f77013c5f31d04a53b54f8123 | [
"MIT"
] | null | null | null | private/dump_new_fish_data.py | Lodinn/ff14-fish-tracker-app | ff96fea17f4ffa5f77013c5f31d04a53b54f8123 | [
"MIT"
] | null | null | null | from typing import Dict, Any, List, Iterable
from dataclasses import make_dataclass, field
import logging
import sys
import os
import re
from itertools import chain, filterfalse, islice, repeat
from functools import reduce
from operator import add, itemgetter
import timeit
from tqdm import tqdm
try:
_SCRIPT_PATH = os.path.abspath(__path__)
except:
_SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
# _HELPER_LIBS_PATH = os.path.join(_SCRIPT_PATH, '..', '..')
_HELPER_LIBS_PATH = _SCRIPT_PATH
# Add the Saint Coinach python API to the path.
sys.path += [os.path.join(_HELPER_LIBS_PATH, 'saintcoinach-py')]
import pysaintcoinach
from pysaintcoinach.ex.language import Language
from pysaintcoinach.xiv import as_row_type, XivRow
# from pysaintcoinach.xiv.masterpiece_supply_duty import MasterpieceSupplyDuty
from pysaintcoinach.xiv.item import Item
from pysaintcoinach.xiv.fishing_spot import FishingSpot
from pysaintcoinach.xiv.fish_parameter import FishParameter
from pysaintcoinach.xiv.gathering_point import GatheringPoint
from pysaintcoinach.xiv.weather import Weather
from pysaintcoinach.xiv.placename import PlaceName
# logging.basicConfig(level=logging.INFO, stream=sys.stderr)
def nth(iterable, n, default=None):
"""Returns the nth item or a default value"""
return next(islice(iterable, n, None), default)
def first(iterable, pred, default=None):
"""Returns the first item for which pred(item) is true.
If no true value is found, returns *default*
"""
return next(filter(pred, iterable), default)
def flatten(listOfLists):
"""Flatten one level of nesting"""
return chain.from_iterable(listOfLists)
def unique_everseen(iterable, key=None):
"""
List unique elements, preserving order. Remember all elements ever seen.
"""
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
_start_time = timeit.default_timer()
def _init_saintcoinach():
# Add the Saint Coinach python API to the path.
sys.path += [os.path.join(_HELPER_LIBS_PATH, 'saintcoinach-py')]
from pysaintcoinach import ARealmReversed
import pysaintcoinach.text as text
from pysaintcoinach.ex.language import Language
# Add ExtractedSheet plugin to library.
import extracted_sheet_plugin
extracted_sheet_plugin.initialize()
global LANGUAGES
LANGUAGES = [Language.english,
Language.japanese,
Language.german,
Language.french,
Language.korean]
_string_decoder = text.XivStringDecoder.default()
# Load up the game data
xiv = ARealmReversed(r"C:\Program Files (x86)\SquareEnix\FINAL FANTASY XIV - A Realm Reborn",
Language.english)
# Override the tag decoder for emphasis so it doesn't produce tags in string...
def omit_tag_decoder(i, t, l):
text.XivStringDecoder.get_integer(i)
return text.nodes.StaticString('')
_string_decoder.set_decoder(text.TagType.Emphasis.value, omit_tag_decoder)
_string_decoder.set_decoder(
text.TagType.SoftHyphen.value,
lambda i,t,l: text.nodes.StaticString("\x26shy;"))
return xiv
realm = _init_saintcoinach()
Fish = make_dataclass('Fish',
[('item', as_row_type('Item')),
('params', Any, field(default=None)),
('spearfishing', bool, field(default=False)),
('spots', list, field(default_factory=list)),
('quest', list, field(default_factory=list)),
('shop', list, field(default_factory=list)),
# ('scrip', MasterpieceSupplyDuty.CollectableItem, field(default=None)),
('scrip', Any, field(default=None)),
('satisfaction', Any, field(default=None)),
('gc', Any, field(default=None)),
('leve', list, field(default_factory=list)),
('craft', list, field(default_factory=list)),
('reduce', Any, field(default=None)),
('aquarium', Any, field(default=None)),
('ecology', bool, field(default=False)),
('expansion', Any, field(default=None))])
GCSupplyDutyTurnin = make_dataclass('GCSupplyDutyTurnin',
[('count', int), ('exp', int), ('seals', int)])
SpearfishingNode = make_dataclass('SpearfishingNode',
[('gathering_point_base', as_row_type('GatheringPointBase')),
('territory_type', as_row_type('TerritoryType')),
('place_name', as_row_type('PlaceName')),
('hidden', bool, field(default=False))])
def _prop_SpearfishingNode_getkey(self):
return self.gathering_point_base.key
setattr(SpearfishingNode, 'key', property(_prop_SpearfishingNode_getkey))
def tracked_iter(_iter, desc, **kwargs):
return tqdm(_iter,
desc,
unit='record',
bar_format='{l_bar:>50.50}{bar}{r_bar:50}',
**kwargs)
# Get a list of catchable fish first.
catchable_fish = {} # type: Dict[int, Fish]
for fishing_spot in tracked_iter(realm.game_data.get_sheet(FishingSpot),
'Scanning fishing spots'):
if fishing_spot.place_name.key == 0:
continue
if fishing_spot.territory_type is None:
continue
logging.info("Checking spot: %s" % fishing_spot.place_name.name)
for item in fishing_spot.items:
if item.key not in catchable_fish:
catchable_fish[item.key] = Fish(item,
reduce=item.is_aetherial_reducible)
catchable_fish[item.key].spots.append(fishing_spot)
if catchable_fish[item.key].expansion is not None:
# Warn if a fish is posted to more than one expansion please.
if catchable_fish[item.key].expansion != fishing_spot.territory_type['ExVersion']:
# FUSE: Shirogane's territory type is set to 0 (ARR).
# So if that's the territory, you can ignore this...
if fishing_spot.territory_type.place_name.name != 'Shirogane':
if catchable_fish[item.key].expansion.key > fishing_spot.territory_type['ExVersion'].key:
logging.warning("%s is found in areas belonging to both %s and %s" %
(item.name,
catchable_fish[item.key].expansion,
fishing_spot.territory_type['ExVersion']))
logging.warning("Entry for fishing spot %u (%s) is from an earlier expac." %
(fishing_spot.key, fishing_spot.place_name.name))
else:
catchable_fish[item.key].expansion = fishing_spot.territory_type['ExVersion']
# Now, we'll check for spearfishing nodes.
for gathering_point in tracked_iter(realm.game_data.get_sheet(GatheringPoint),
'Scanning spearfishing nodes'):
if gathering_point.base.type.key != 4:
continue
# Each GatheringPoint represents a single map spawn node. You need to normalize
# these with the GatheringPointBase...
item: Item
for item in gathering_point.base.items:
if item.key not in catchable_fish:
catchable_fish[item.key] = Fish(item,
spearfishing=True,
reduce=item.is_aetherial_reducible)
# Add the gathering point to this fish.
# The check is necessary because gathering points come in sets. We only want
# the base point.
gathering_point_base = gathering_point.base
if gathering_point_base.key not in map(lambda x: x.gathering_point_base.key,
catchable_fish[item.key].spots):
catchable_fish[item.key].spots.append(
SpearfishingNode(gathering_point_base,
gathering_point.territory_type,
gathering_point.place_name,
gathering_point[2] == 6))
#
# Attempt to run the rest in parallel
#
SCAN_TASKS = []
def scan_task(f):
SCAN_TASKS.append(f)
return f
@scan_task
def scan_fish_params(orig_stdout, n=None):
for fish_params in tracked_iter(realm.game_data.get_sheet(FishParameter),
'Scanning fish parameters',
file=orig_stdout, position=n):
if fish_params.item is None:
continue
fish_key = fish_params.item.key
if fish_key in catchable_fish:
catchable_fish[fish_key].params = fish_params
return True
# @scan_task
# def scan_scrip_turnins(orig_stdout, n=None):
# for duty in tracked_iter(realm.game_data.get_sheet(MasterpieceSupplyDuty),
# 'Scanning scrip turn-ins',
# file=orig_stdout, position=n):
# # Ignore non-FSH entries.
# if str(duty.class_job.abbreviation) != 'FSH':
# continue
# for item in duty.collectable_items:
# if item.required_item.key in catchable_fish:
# catchable_fish[item.required_item.key].scrip = item
# return True
@scan_task
def scan_gc_supply_duties(orig_stdout, n=None):
for duty in tracked_iter(realm.game_data.get_sheet('GCSupplyDuty'),
'Scanning GC supply duties',
file=orig_stdout, position=n):
# There are 3 possible items for each duty. FSH is index 10.
for i in range(3):
item = duty[('Item', i, 10)]
if item.key in catchable_fish:
reward = realm.game_data.get_sheet('GCSupplyDutyReward')[duty.key]
catchable_fish[item.key].gc = GCSupplyDutyTurnin(
duty[('ItemCount', i, 10)],
reward['Experience{Provisioning}'],
reward['Seals{Provisioning}'])
return True
@scan_task
def scan_leves(orig_stdout, n=None):
for leve in tracked_iter(realm.game_data.get_sheet('Leve'),
'Scanning leve turn-ins',
file=orig_stdout, position=n):
if 'FSH' not in [str(job.abbreviation) for job in leve['ClassJobCategory'].class_jobs]:
continue
# These are a little weird. They are actually using CraftLeve... Just go with it...
leve_fish = leve['DataId'].get_raw(XivRow.build_column_name('Item', 0))
if leve_fish in catchable_fish:
catchable_fish[leve_fish].leve.append(leve)
return True
@scan_task
def scan_recipes(orig_stdout, n=None):
for recipe in tracked_iter(realm.game_data.get_sheet('Recipe'),
'Scanning recipes',
file=orig_stdout, position=n):
for i in range(10):
ingredient = recipe.get_raw(XivRow.build_column_name('Item{Ingredient}', i))
if ingredient in catchable_fish:
catchable_fish[ingredient].craft.append(recipe)
return True
@scan_task
def scan_aquariums(orig_stdout, n=None):
for aquarium_fish in tracked_iter(realm.game_data.get_sheet('AquariumFish'),
'Scanning aquarium fish',
file=orig_stdout, position=n):
fish_key = aquarium_fish.get_raw('Item')
if fish_key in catchable_fish:
catchable_fish[fish_key].aquarium = aquarium_fish
return True
# There's actually only 2 or 3 fish that show up in Shops, and the scanning
# process itself takes forever compared to the rest of the scanners. These
# fish are also covered by at least one category in the other scans, making
# check virtually useless.
# for shop in tracked_iter(realm.game_data.shops,
# 'Scanning shops'):
# # Running this per-item is *VERY* slow.
# # Instead, we're going to enumerate all the shop listings, and check
# # if any of our fish are listed as a "cost" item.
# from pysaintcoinach.xiv.interfaces import IShopListing
# shop_item_costs = []
#
# shop_listing: IShopListing
# for shop_listing in shop.shop_listings:
# for cost in shop_listing.costs:
# if cost.item is not None and cost.item.key in catchable_fish:
# # Add this shop list item to the fish it's associated with.
# catchable_fish[cost.item.key].shop += [cost]
@scan_task
def scan_satisfaction(orig_stdout, n=None):
for supply in tracked_iter(realm.game_data.get_sheet('SatisfactionSupply'),
'Scanning satisfaction supply requests',
file=orig_stdout, position=n):
# We only care about Slot #3.
if supply['Slot'] != 3:
continue
item = supply['Item']
if item is not None and item.key in catchable_fish:
# Overwrite the entry if multiple matches are found.
# We ideally want only the last entry anyways...
catchable_fish[item.key].satisfaction = supply
return True
@scan_task
def scan_quests(orig_stdout, n=None):
for quest in tracked_iter(realm.game_data.get_sheet('Quest'),
'Scanning quests',
file=orig_stdout, position=n):
# Quests are a ROYAL PAIN!
# We're looking for the Script{Instruction} fields named "RITEM#".
# These will have corresponding Script{Arg} fields with item ids.
if 'FSH' not in [str(job.abbreviation) for job in quest[('ClassJobCategory', 0)].class_jobs]:
continue
for i in range(50):
if not str(quest[('Script{Instruction}', i)]).startswith('RITEM'):
continue
item = quest.as_T(Item, 'Script{Arg}', i)
if item is not None and item.key in catchable_fish:
catchable_fish[item.key].quest += [(quest.key, str(quest))]
return True
@scan_task
def scan_spearfishing_ecology(orig_stdout, n=None):
# The SpearfishingEcology sheet tells us which fish is needed to pop
# the swimming shadows. This fish might not otherwise be important...
for ecology in tracked_iter(realm.game_data.get_sheet('SpearfishingEcology'),
'Scanning spearfishing ecology',
file=orig_stdout, position=n):
if ecology.key == 0:
continue
m = re.search(r'With (.*) caught,', ecology[1])
if m is not None:
name = m.group(1)
# Search all catchable fish for this name.
# NOTE: You must use the singular display name.
# If Article, then exclude "the " from the search.
for fish in catchable_fish.values():
fish_name = str(fish.item.as_string('Singular'))
if not fish.item.as_boolean('Article'):
fish_name = "the " + fish_name
if fish_name == name:
fish.ecology = True
# import concurrent.futures
#
# import contextlib
# import sys
#
# class DummyTqdmFile(object):
# file = None
# def __init__(self, file):
# self.file = file
#
# def write(self, x):
# if len(x.rstrip()) > 0:
# tqdm.write(x, file=self.file)
#
# def flush(self):
# return getattr(self.file, "flush", lambda: None)()
#
#
# @contextlib.contextmanager
# def std_out_err_redirect_tqdm():
# orig_out_err = sys.stdout, sys.stderr
# try:
# sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
# yield orig_out_err[1]
# except Exception as exc:
# raise exc
# finally:
# sys.stdout, sys.stderr = orig_out_err
#
#
# with concurrent.futures.ThreadPoolExecutor() as executor:
# # with std_out_err_redirect_tqdm() as orig_stdout:
# tasks = {executor.submit(task, None, n) for n, task in enumerate(SCAN_TASKS)}
# results = concurrent.futures.wait(tasks)
#
# for task in results.done:
# result = task.result()
# print()
for n, task in enumerate(SCAN_TASKS):
task(None)
def is_important_fish(fish):
# Always include BIG FISH!
if fish.item.rarity >= 2:
return True
# SUPER IMPORTANT FISH
if fish.params is not None:
if fish.params.time_restricted:
return True
if fish.params.weather_restricted:
return True
if fish.spearfishing:
if all(map(lambda x: x.hidden, fish.spots)):
return True
if fish.ecology:
return True
# Lesser important fish
if fish.reduce:
return True
if fish.scrip is not None:
return True
if len(fish.quest) > 0:
return True
if fish.satisfaction is not None:
return True
if fish.gc is not None:
return True
if len(fish.leve) > 0:
return True
if len(fish.craft) > 0:
return True
if len(fish.shop) > 0:
return True
if fish.aquarium is not None:
return True
# Otherwise... it's unimportant...
return False
important_fish = sorted(list(filter(is_important_fish, catchable_fish.values())),
key=lambda x: x.item.key)
#######################################################################
import yaml
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
# Import the OLD data...
fishes = yaml.load(open("private/fishData.yaml", 'r'), Loader=Loader)
known_fishes = dict([(fish['name'], fish) for fish in fishes])
def get_spot(spot):
if isinstance(spot, SpearfishingNode):
if spot.hidden:
return spot.gathering_point_base.key
return str(spot.place_name.name)
def supports_fish_eyes(fish):
# Fish Eyes does not affect spearfishing.
if fish.spearfishing:
return False
# The fish must not be legendary: i.e. not include the phase: "オオヌシ".
if "オオヌシ" in fish.item.source_row['Description', Language.japanese]:
return False
# As of 5.4, Fish Eyes only works on fish in areas prior to Stormblood.
if fish.expansion.key >= 2:
return False
# While technically any other fish does support Fish Eyes, only fish with
# time restrictions truly can use it.
# NOTE: Disabled because... well, run integrity checks and you'll see -_-
# return fish.params is not None and fish.params.time_restricted
return True
def is_big_fish(fish):
# The fish must include ヌシ in its description
if "ヌシ" in fish.item.source_row['Description', Language.japanese]:
return True
return False
new_fishes = {}
for fish in tracked_iter(important_fish,
'Generating new fish database'):
folklore = False
if fish.params is not None:
folklore = fish.params['GatheringSubCategory']
folklore = False if folklore is None else str(folklore)
new_fishes[fish.item.key] = {
'name': str(fish.item.name),
'dataMissing': True,
'start': 0,
'end': 24,
'prevWeather': None,
'weather': None,
'bait': None,
'intuition': None,
'intuitionLength': None,
'hookset': None,
'tug': None,
'snagging': None,
'gig': None,
'patch': None,
'computed': {
'locations': [get_spot(spot) for spot in fish.spots],
'timeRestricted': fish.params.time_restricted if fish.params is not None else False,
'weatherRestricted': fish.params.weather_restricted if fish.params is not None else False,
'folklore': folklore,
'spearfishing': fish.spearfishing,
'bigfish': fish.item.rarity >= 2,
'quest': len(fish.quest) > 0,
# 'shop': len(fish.shop) > 0,
'satisfaction': fish.satisfaction is not None,
'craft': len(fish.craft) > 0,
'gc': fish.gc is not None,
'leve': len(fish.leve) > 0,
'scrip': fish.scrip is not None,
'reduce': fish.reduce,
'aquarium': fish.aquarium is not None,
'fishEyes': supports_fish_eyes(fish),
'bigFish': is_big_fish(fish)
}
}
if str(fish.item.name) in known_fishes:
known_fish = known_fishes[str(fish.item.name)]
del new_fishes[fish.item.key]['dataMissing']
try:
new_fishes[fish.item.key].update({
'start': known_fish.get('startHour', 0),
'end': known_fish.get('endHour', 24),
'prevWeather': known_fish.get('previousWeatherSet', []),
'weather': known_fish.get('weatherSet', []),
'bait': (known_fish.get('bestCatchPath', []) or [])[-1:],
'intuition': known_fish.get('predators', None),
'intuitionLength': known_fish.get('intuitionLength', None),
'hookset': known_fish.get('hookset', None),
'tug': known_fish.get('tug', None),
'snagging': known_fish.get('snagging', None),
'gig': known_fish.get('gig', None),
'patch': known_fish.get('patch', None)
})
except Exception:
print("ERROR: While processing %s" % fish.item.name)
import pprint
pprint.pprint(known_fish)
raise
for fish in tracked_iter(new_fishes.values(),
'Integrity Checking'):
errors = []
# Check if time restricted.
if fish['computed']['timeRestricted'] and \
fish['start'] == 0 and fish['end'] == 24:
errors += ['should be time restricted']
elif not fish['computed']['timeRestricted'] and \
not (fish['start'] == 0 and fish['end'] == 24):
errors += ['should not be time restricted']
# Check if weather restricted.
if fish['computed']['weatherRestricted'] and \
len(fish['prevWeather'] or []) == 0 and \
len(fish['weather'] or []) == 0:
errors += ['should be weather restricted']
elif not fish['computed']['weatherRestricted'] and \
(len(fish['weather'] or []) != 0 or \
len(fish['prevWeather'] or []) != 0):
errors += ['should not be weather restricted']
if len(errors) > 0:
if 'dataMissing' in fish and fish['dataMissing']:
errors += ['data missing for limited-time fish']
fish['integrityErrors'] = errors
def _get_item_lookup_dict_entries():
# Collect all of the fish and tackle names.
fish_and_tackle_names = list(set(filter(None, reduce(
add, [[fish['name']] +
list((fish.get('intuition', {}) or {}).keys()) +
(fish['bait'] or [])
for fish in new_fishes.values()], []))))
# Match these with records in the Item sheet.
for item in tracked_iter(realm.game_data.get_sheet(Item),
'Getting fish and tackle entries'):
if item.name not in fish_and_tackle_names:
continue
yield (item.name, item.key)
WEATHER = dict([(x.name, x.key) for x in realm.game_data.get_sheet(Weather)])
ITEM = dict(_get_item_lookup_dict_entries())
with open("private/fishDataNew.yaml", 'w') as f:
# Make things prettier...
def represent_none(self, _):
return self.represent_scalar('tag:yaml.org,2002:null', '')
def transformed_fish_pair(fish):
fish_entry = dict(fish)
del fish_entry['name']
del fish_entry['computed']
return fish['name'], fish_entry
Dumper.add_representer(type(None), represent_none)
yaml.dump(dict([transformed_fish_pair(fish) for fish in new_fishes.values()]),
f, Dumper=Dumper, default_flow_style=False, sort_keys=False)
# f.write('---\n')
# f.writelines(['%s\n' % str(fish['name']) for fish in list(new_fishes.values())])
def convert_fish_to_json(fish: Fish):
"""
Converts a Fish entry into JSON. This form is intended to be useful to the
web-site front-end. All language-specific values use lookup IDs.
"""
try:
# Get the new database entry for this fish. (it better exist!)
db_entry = new_fishes[fish.item.key]
weather_keys = list(sorted([WEATHER[x] for x in (db_entry['weather'] or [])]))
prev_weather_keys = list(sorted([WEATHER[x] for x in (db_entry['prevWeather'] or [])]))
bait_keys = [ITEM[x] for x in (db_entry['bait'] or [])]
intuition_entries = {}
if db_entry.get('intuition') is not None:
intuition_entries = dict([(ITEM[x[0]], x[1]) for x in db_entry['intuition'].items()])
def get_location_key(spot):
if isinstance(spot, SpearfishingNode):
if spot.hidden:
return spot.gathering_point_base.key
return spot.key
aquarium_entry = None
if fish.aquarium is not None:
aquarium_entry = {'water': str(fish.aquarium['AquariumWater']),
'size': int(fish.aquarium['Size'])}
folklore_key = False
if fish.params is not None:
folklore = fish.params['GatheringSubCategory']
if folklore is not None:
folklore_key = folklore.key
json_entry = {
'_id': fish.item.key,
# Information sourced via players
'dataMissing': db_entry.get('dataMissing', False),
'prevWeather': prev_weather_keys,
'weather': weather_keys,
'start': db_entry['start'],
'end': db_entry['end'],
'bait': bait_keys,
'intuition': intuition_entries,
'intuitionLength': db_entry['intuitionLength'],
'hookset': db_entry['hookset'],
'tug': db_entry['tug'],
'snagging': db_entry['snagging'],
'patch': db_entry['patch'],
# Information sourced via DATs
'location': [get_location_key(x) for x in fish.spots],
'timeRestricted': fish.params.time_restricted if fish.params is not None else False,
'weatherRestricted': fish.params.weather_restricted if fish.params is not None else False,
'folklore': folklore_key,
'spearfishing': fish.spearfishing,
'bigfish': fish.item.rarity >= 2,
'quest': len(fish.quest) > 0,
# 'shop': len(fish.shop) > 0,
'satisfaction': fish.satisfaction is not None,
'craft': len(fish.craft) > 0,
'gc': fish.gc is not None,
'leve': len(fish.leve) > 0,
'scrip': fish.scrip is not None,
'reduce': fish.reduce,
'aquarium': aquarium_entry,
'fishEyes': supports_fish_eyes(fish),
'bigFish': is_big_fish(fish)
}
return fish.item.key, json_entry
except Exception:
print("ERROR: While processing %s" % fish.item.name)
import pprint
pprint.pprint(db_entry)
raise
def _make_localized_field(fld_name, row, col_name):
from pysaintcoinach.ex import IMultiRow
from pysaintcoinach.xiv import IXivRow
if isinstance(row, IXivRow):
row = row.source_row
if not isinstance(row, IMultiRow):
raise TypeError('Expected row to be a IMultiRow')
def try_get_value(row, col_name, lang):
try:
value = row[(col_name, lang)]
if value != '':
return value
# Fall through if value is blank!
except KeyError:
pass
# Use the default language name instead...
value = row[col_name]
# logging.warning("Missing %s data for %s[%u][%s], using \"%s\" instead.",
# lang.name,
# row.sheet.name,
# row.key,
# col_name,
# value)
return value
return map(lambda lang: (fld_name + lang.get_suffix(), try_get_value(row, col_name, lang)), LANGUAGES)
def _make_static_localized_field(fld_name, value):
return zip([fld_name + lang.get_suffix() for lang in LANGUAGES],
repeat(value, len(LANGUAGES)))
def __build_supporting_json_tables(_iter: Iterable[Fish]):
items = {}
fishing_nodes = {}
spearfishing_nodes = {}
territories = {}
zones = {}
regions = {}
weather_types = {}
folklore_books = {}
# The ITEMS table is generated from the fish and tackle data (ITEMS).
for item_id in tracked_iter(ITEM.values(), 'Generating ITEMS data table'):
item_entry = realm.game_data.get_sheet(Item)[item_id]
items[item_id] = dict([
('_id', item_id),
*_make_localized_field('name', item_entry, 'Name'),
('icon', '%06u' % item_entry.get_raw('Icon'))])
# The rest is based on which fish we actually have.
# Technically, we should still generate the territory list for everything,
# but screw that, only what we actually need is fine...
for fish in tracked_iter(_iter, 'Generating necessary lookup tables'):
territories_to_add = set()
if fish.spearfishing:
def _decode_spearfishing_node_name(x):
if x.hidden:
return _make_static_localized_field('name', 'Swimming Shadows')
else:
return _make_localized_field('name', x.place_name, 'Name')
for spot in fish.spots:
if spot.gathering_point_base.key not in spearfishing_nodes:
spearfishing_nodes[spot.gathering_point_base.key] = dict([
('_id', spot.gathering_point_base.key),
*_decode_spearfishing_node_name(spot),
('territory_id', spot.territory_type.key),
('placename_id', spot.place_name.key),
('hidden', spot.hidden)])
territories_to_add.add(spot.territory_type)
else:
for spot in fish.spots:
if spot.key not in fishing_nodes:
fishing_nodes[spot.key] = dict([
('_id', spot.key),
*_make_localized_field('name', spot.place_name, 'Name'),
('territory_id', spot.get_raw('TerritoryType')),
('placename_id', spot.place_name.key),
('map_coords', [spot.map_x, spot.map_y, spot.radius])])
territories_to_add.add(spot.territory_type)
for territory in territories_to_add:
if territory is not None and territory.key not in territories:
def _collect_weather_rates(rate):
return [(r[1].key, r[0]) for r in rate.weather_rates if r[1].key != 0]
territories[territory.key] = dict({
'_id': territory.key,
'map_id': territory.map.key,
'map_scale': territory.map.size_factor,
'zone_id': territory.place_name.key,
'region_id': territory.region_place_name.key,
'weather_rates': _collect_weather_rates(territory.weather_rate)})
# Add entries for this territory's region and zone as well.
if territory.place_name.key not in zones:
zones[territory.place_name.key] = dict(
_make_localized_field('name', territory.place_name, 'Name'))
if territory.region_place_name.key not in regions:
regions[territory.region_place_name.key] = dict(
_make_localized_field('name', territory.region_place_name, 'Name'))
# Add any new unique weather types to the table.
for weather in territory.weather_rate.possible_weathers:
if weather.key != 0 and weather.key not in weather_types:
weather_types[weather.key] = dict([
*_make_localized_field('name', weather, 'Name'),
('icon', '%06u' % weather.get_raw('Icon'))])
if fish.params is not None:
folklore = fish.params['GatheringSubCategory']
if folklore is not None and folklore.key not in folklore_books:
folklore_books[folklore.key] = dict([
*_make_localized_field('book', folklore, 'FolkloreBook'),
*_make_localized_field('name', folklore['Item'], 'Name')])
return {'items': dict(sorted(items.items(), key=itemgetter(0))),
'fishing_nodes': dict(sorted(fishing_nodes.items(), key=itemgetter(0))),
'spearfishing_nodes': dict(sorted(spearfishing_nodes.items(), key=itemgetter(0))),
'folklore_books': dict(sorted(folklore_books.items(), key=itemgetter(0))),
'territories': dict(sorted(territories.items(), key=itemgetter(0))),
'zones': dict(sorted(zones.items(), key=itemgetter(0))),
'regions': dict(sorted(regions.items(), key=itemgetter(0))),
'weather_types': dict(sorted(weather_types.items(), key=itemgetter(0)))}
def pretty_dump(obj):
return json.dumps(obj, sort_keys=False, indent=2).replace('\n', '\n ')
with open("private/new_data.js", 'w') as f:
# Output everything in JavaScript format, using IDs to support localization.
import json
import datetime
support_tables = __build_supporting_json_tables(important_fish)
f.write('const DATA = {\n')
f.write(' FISH: %s,\n' % pretty_dump(dict(map(convert_fish_to_json,
tracked_iter(important_fish,
'Converting fish')))))
f.write(' FISHING_SPOTS: %s,\n' % pretty_dump(support_tables['fishing_nodes']))
f.write(' SPEARFISHING_SPOTS: %s,\n' % pretty_dump(support_tables['spearfishing_nodes']))
f.write(' ITEMS: %s,\n' % pretty_dump(support_tables['items']))
f.write(' TERRITORIES: %s,\n' % pretty_dump(support_tables['territories']))
f.write(' WEATHER_TYPES: %s,\n' % pretty_dump(support_tables['weather_types']))
f.write(' REGIONS: %s,\n' % pretty_dump(support_tables['regions']))
f.write(' ZONES: %s,\n' % pretty_dump(support_tables['zones']))
f.write(' FOLKLORE: %s,\n' % pretty_dump(support_tables['folklore_books']))
f.write(' VERSION: "%s"\n};' % datetime.datetime.now().strftime('%Y.%m.%d.%H.%M'))
_finish_time = timeit.default_timer()
from datetime import timedelta
print("Total Time: %s" % timedelta(seconds=_finish_time - _start_time))
| 38.850714 | 109 | 0.593959 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.