hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14facc158c9ae0211dc9f79665a995fbf1d899ee | 2,366 | py | Python | amime/modules/anime/TV-SHOW/KATEGORI/SCIFI/ktgr_action7.py | Myudi422/ccgnime_req | a0f7596ba101204539b4120dffa08912b6560efe | [
"MIT"
] | null | null | null | amime/modules/anime/TV-SHOW/KATEGORI/SCIFI/ktgr_action7.py | Myudi422/ccgnime_req | a0f7596ba101204539b4120dffa08912b6560efe | [
"MIT"
] | null | null | null | amime/modules/anime/TV-SHOW/KATEGORI/SCIFI/ktgr_action7.py | Myudi422/ccgnime_req | a0f7596ba101204539b4120dffa08912b6560efe | [
"MIT"
] | null | null | null | import httpx
from anilist.types import Anime
from pyrogram import filters
from pyrogram.types import CallbackQuery
from pyromod.helpers import ikb
from pyromod.nav import Pagination
from amime.amime import Amime
@Amime.on_callback_query(filters.regex(r"^tv_scifi7 anime (?P<page>\d+)"))
async def anime_suggestions(bot: Amime, callback: CallbackQuery):
page = int(callback.matches[0]["page"])
message = callback.message
lang = callback._lang
keyboard = []
async with httpx.AsyncClient(http2=True) as client:
response = await client.post(
url="https://graphql.anilist.co",
json=dict(
query="""
query($per_page: Int) {
Page(page: 8, perPage: $per_page) {
media(type: ANIME, format: TV, sort: TRENDING_DESC, status: FINISHED, genre: "sci-fi") {
id
title {
romaji
english
native
}
siteUrl
}
}
}
""",
variables=dict(
perPage=100,
),
),
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
)
data = response.json()
await client.aclose()
if data["data"]:
items = data["data"]["Page"]["media"]
suggestions = [
Anime(id=item["id"], title=item["title"], url=item["siteUrl"])
for item in items
]
layout = Pagination(
suggestions,
item_data=lambda i, pg: f"menu {i.id}",
item_title=lambda i, pg: i.title.romaji,
page_data=lambda pg: f"tv_scifi7 anime {pg}",
)
lines = layout.create(page, lines=8)
if len(lines) > 0:
keyboard += lines
keyboard.append([(lang.Prev, "tv_scifi6 anime 1"), (lang.Next, "tv_scifi8 anime 1")])
keyboard.append([(lang.back_button, "ktgr-finish")])
await message.edit_text(
lang.suggestions_text,
reply_markup=ikb(keyboard),
)
| 31.972973 | 112 | 0.482671 |
7b30514631433ce7fe88a6e4ceb8ab644b46db91 | 5,348 | py | Python | src/datamgr/datamanager/collection/cmdb/processes/host.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/datamgr/datamanager/collection/cmdb/processes/host.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/datamgr/datamanager/collection/cmdb/processes/host.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from collection.common.process import BKDFlowProcessor
from collection.common.process_nodes import (
CleanTemplate,
IgniteStorageTemplate,
ProcessTemplate,
)
from collection.conf import constants
class CleanCMDBHostInfoTemplate(CleanTemplate):
template = "clean_of_cmdb_host_info.jinja"
class IgniteStorageCMDBHostInfoTemplate(IgniteStorageTemplate):
template = "storage_of_cmdb_host_info.jinja"
class DataModelCMDBHostInfoTemplate(ProcessTemplate):
template = "datamodel_of_cmdb_host_info.jinja"
class DataModelInstCMDBHostInfoTemplate(ProcessTemplate):
template = "datamodel_inst_of_cmdb_host_info.jinja"
BKDFlowProcessor.regiter_process_template(CleanCMDBHostInfoTemplate)
BKDFlowProcessor.regiter_process_template(IgniteStorageCMDBHostInfoTemplate)
BKDFlowProcessor.regiter_process_template(DataModelCMDBHostInfoTemplate)
BKDFlowProcessor.regiter_process_template(DataModelInstCMDBHostInfoTemplate)
def process_cmdb_host_info():
process_cmdb_host_config = {
"pipeline": [
{
"process_node": "AccessNode",
"process_template": "AccessCustomTemplate",
"process_context": {
"bk_biz_id": constants.BKDATA_BIZ_ID,
"raw_data_name": constants.CMDB_HOST_TABLE_NAME,
"raw_data_alias": constants.CMDB_HOST_TABLE_ALIA,
},
},
{
"process_node": "CleanNode",
"process_template": "CleanCMDBHostInfoTemplate",
"process_context": {
"bk_biz_id": constants.BKDATA_BIZ_ID,
"raw_data_id": "$0.raw_data_id",
"result_table_name": constants.CMDB_HOST_TABLE_NAME,
"result_table_alias": constants.CMDB_HOST_TABLE_ALIA,
},
},
{
"process_node": "IgniteStorageNode",
"process_template": "IgniteStorageCMDBHostInfoTemplate",
"process_context": {
"bk_biz_id": constants.BKDATA_BIZ_ID,
"raw_data_id": "$0.raw_data_id",
"result_table_id": f"{constants.BKDATA_BIZ_ID}_{constants.CMDB_HOST_TABLE_NAME}",
"result_table_name": constants.CMDB_HOST_TABLE_NAME,
"result_table_alias": constants.CMDB_HOST_TABLE_ALIA,
"storage_cluster": constants.DEFAULT_IGNITE_CLUSTER,
},
},
{
"process_node": "AuthProjectDataNode",
"process_template": "SimpleTemplate",
"process_context": {
"project_id": constants.BKPUB_PROJECT_ID,
"bk_biz_id": constants.BKDATA_BIZ_ID,
"result_table_id": f"{constants.BKDATA_BIZ_ID}_{constants.CMDB_HOST_TABLE_NAME}",
},
},
{
"process_node": "DataModelNode",
"process_template": "DataModelCMDBHostInfoTemplate",
"process_context": {
"project_id": constants.BKPUB_PROJECT_ID,
"model_name": constants.CMDB_HOST_DATAMODEL_NAME,
},
},
{
"process_node": "DataModelInstNode",
"process_template": "DataModelInstCMDBHostInfoTemplate",
"process_context": {
"model_id": "$4.model_id",
"project_id": constants.BKPUB_PROJECT_ID,
"bk_biz_id": constants.BKDATA_BIZ_ID,
"input_result_table_id": f"{constants.BKDATA_BIZ_ID}_{constants.CMDB_HOST_TABLE_NAME}",
"table_name": constants.CMDB_HOST_DATAMODEL_TABLE_NAME,
"cluster_name": constants.DEFAULT_IGNITE_CLUSTER,
},
},
]
}
BKDFlowProcessor(process_cmdb_host_config["pipeline"]).build()
| 45.709402 | 111 | 0.642857 |
475f9aed1b8fb98c5f0912be8b263461de871cc4 | 931 | py | Python | tests/unit_tests/mathTools/testPricingEngine.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
] | 1 | 2021-10-04T03:15:50.000Z | 2021-10-04T03:15:50.000Z | tests/unit_tests/mathTools/testPricingEngine.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
] | null | null | null | tests/unit_tests/mathTools/testPricingEngine.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from irLib.mathTools.pricingEngine import blackEngine, bachelierEngine
F = 0.03
K = 0.032
sigma = 0.2
time2Maturity = 2
# r = 0 for easy intergration swaption pricing which just need the future
# expected value of forward swap rate over strike
class testPricingEngine(unittest.TestCase):
def testBlack(self):
b = blackEngine()
b.setArgument(F, K, sigma, time2Maturity, 'call')
self.assertEqual(np.round(b.calculate(), 9), 0.002576078)
b.setArgument(F, K, sigma, time2Maturity, 'put')
self.assertEqual(np.round(b.calculate(), 9), 0.004576078)
def testBachelier(self):
b = bachelierEngine()
b.setArgument(F, K, sigma, time2Maturity, 'call')
self.assertEqual(np.round(b.calculate(), 9), 0.111840738)
b.setArgument(F, K, sigma, time2Maturity, 'put')
self.assertEqual(np.round(b.calculate(), 9), 0.113840738)
| 34.481481 | 73 | 0.683136 |
b11817406e0a7b597961436fc53be75b9f309b16 | 79,537 | py | Python | ais/imo_001_11_handcoded.py | rolker/noaadata | 052a4b7d634e0a3a92a348b543e5db536ae24f02 | [
"Apache-2.0"
] | 35 | 2015-02-15T17:23:00.000Z | 2022-01-27T01:49:43.000Z | ais/imo_001_11_handcoded.py | rolker/noaadata | 052a4b7d634e0a3a92a348b543e5db536ae24f02 | [
"Apache-2.0"
] | 2 | 2017-10-04T17:24:38.000Z | 2017-10-04T18:22:00.000Z | ais/imo_001_11_handcoded.py | rolker/noaadata | 052a4b7d634e0a3a92a348b543e5db536ae24f02 | [
"Apache-2.0"
] | 22 | 2015-02-08T13:29:58.000Z | 2022-03-09T03:03:16.000Z | #!/usr/bin/env python
__version__ = '$Revision: 4791 $'.split()[1]
__date__ = '$Date: 2010-03-31 $'.split()[1]
__author__ = 'xmlbinmsg'
__doc__='''
Autogenerated python functions to serialize/deserialize binary messages.
Generated by: ../scripts/aisxmlbinmsg2py.py
Need to then wrap these functions with the outer AIS packet and then
convert the whole binary blob to a NMEA string. Those functions are
not currently provided in this file.
serialize: python to ais binary
deserialize: ais binary to python
The generated code uses translators.py, binary.py, and aisstring.py
which should be packaged with the resulting files.
@requires: U{epydoc<http://epydoc.sourceforge.net/>} > 3.0alpha3
@requires: U{BitVector<http://cheeseshop.python.org/pypi/BitVector>}
@author: '''+__author__+'''
@version: ''' + __version__ +'''
@var __date__: Date of last svn commit
@undocumented: __version__ __author__ __doc__ parser
@status: under development
@license: Generated code has no license
@todo: FIX: put in a description of the message here with fields and types.
'''
import sys
from decimal import Decimal
from aisutils.BitVector import BitVector
from aisutils import aisstring
from aisutils import sqlhelp
from aisutils import uscg
from aisutils import binary
# FIX: check to see if these will be needed
TrueBV = BitVector(bitstring="1")
"Why always rebuild the True bit? This should speed things up a bunch"
FalseBV = BitVector(bitstring="0")
"Why always rebuild the False bit? This should speed things up a bunch"
fieldList = (
'MessageID',
'RepeatIndicator',
'UserID',
'Spare',
'dac',
'fid',
'latitude',
'longitude',
'day',
'hour',
'min',
'avewind',
'windgust',
'winddir',
'windgustdir',
'airtemp',
'relhumid',
'dewpoint',
'airpressure',
'airpressuretrend',
'horizvis',
'waterlevel',
'waterleveltrend',
'surfcurspeed',
'surfcurdir',
'curspeed2',
'curdir2',
'curlevel2',
'curspeed3',
'curdir3',
'curlevel3',
'sigwaveheight',
'waveperiod',
'wavedir',
'swellheight',
'swellperiod',
'swelldir',
'seastate',
'watertemp',
'preciptype',
'salinity',
'ice',
'Spare2',
)
fieldListPostgres = (
'MessageID',
'RepeatIndicator',
'UserID',
'Spare',
'dac',
'fid',
'Position', # PostGIS data type
'day',
'hour',
'min',
'avewind',
'windgust',
'winddir',
'windgustdir',
'airtemp',
'relhumid',
'dewpoint',
'airpressure',
'airpressuretrend',
'horizvis',
'waterlevel',
'waterleveltrend',
'surfcurspeed',
'surfcurdir',
'curspeed2',
'curdir2',
'curlevel2',
'curspeed3',
'curdir3',
'curlevel3',
'sigwaveheight',
'waveperiod',
'wavedir',
'swellheight',
'swellperiod',
'swelldir',
'seastate',
'watertemp',
'preciptype',
'salinity',
'ice',
'Spare2',
)
toPgFields = {
'latitude':'Position',
'longitude':'Position',
}
'''
Go to the Postgis field names from the straight field name
'''
fromPgFields = {
'Position':('latitude','longitude',),
}
'''
Go from the Postgis field names to the straight field name
'''
pgTypes = {
'Position':'POINT',
}
'''
Lookup table for each postgis field name to get its type.
'''
def encode(params, validate=False):
'''Create a imo_met_hydro binary message payload to pack into an AIS Msg imo_met_hydro.
Fields in params:
- MessageID(uint): AIS message number. Must be 8 (field automatically set to "8")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): MMSI number of transmitter broadcasting the message
- Spare(uint): Reserved for definition by a regional authority. (field automatically set to "0")
- dac(uint): Designated Area Code - part 1 of the IAI (field automatically set to "1")
- fid(uint): Functional Identifier - part 2 of the IAI (field automatically set to "11")
- latitude(decimal): Location of the vessel. North South location
- longitude(decimal): Location of the vessel. East West location
- day(uint): Day 0..31
- hour(uint): Hour 0..23
- min(uint): Min
- avewind(uint): Average wind speed values for the last 10 minutes.
- windgust(uint): Wind gust is the max wind speed value reading during the last 10 minutes.
- winddir(uint): Wind direction
- windgustdir(uint): Wind direction for the gust.
- airtemp(decimal): Dry bulb temperature
- relhumid(uint): Relative humidity
- dewpoint(decimal): Dew Point
- airpressure(udecimal): Air pressure
- airpressuretrend(uint): Air pressure trend
- horizvis(udecimal): Horizontal visibility
- waterlevel(decimal): Water level (incl. tide)
- waterleveltrend(uint): Water level trend
- surfcurspeed(udecimal): Surface current speed
- surfcurdir(uint): Surface current direction
- curspeed2(udecimal): Level 2 current speed
- curdir2(uint): Level 2 current direction
- curlevel2(uint): Measuring level below sea surface for level 2
- curspeed3(udecimal): Level 3 current speed
- curdir3(uint): Level 3 current direction
- curlevel3(uint): Measuring level below sea surface for level 3
- sigwaveheight(udecimal): Significant wave height
- waveperiod(uint): Wave period
- wavedir(uint): Wave direction
- swellheight(udecimal): Swell height
- swellperiod(uint): Swell period
- swelldir(uint): Swell direction
- seastate(uint): Sea state according to the Beaufort scale
- watertemp(udecimal): Water temperature
- preciptype(uint): According to WMO
- salinity(decimal): Salinity
- ice(uint): Yes or no for the presence of ice
- Spare2(uint): Must be zero (field automatically set to "0")
@param params: Dictionary of field names/values. Throws a ValueError exception if required is missing
@param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented.
@rtype: BitVector
@return: encoded binary message (for binary messages, this needs to be wrapped in a msg 8
@note: The returned bits may not be 6 bit aligned. It is up to you to pad out the bits.
'''
assert False # Need to handle encoding in the style of Ohmex using Spare2
bvList = []
bvList.append(binary.setBitVectorSize(BitVector(intVal=8),6))
if 'RepeatIndicator' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['RepeatIndicator']),2))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),2))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['UserID']),30))
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),2))
bvList.append(binary.setBitVectorSize(BitVector(intVal=1),10))
bvList.append(binary.setBitVectorSize(BitVector(intVal=11),6))
if 'latitude' in params:
bvList.append(binary.bvFromSignedInt(int(Decimal(params['latitude'])*Decimal('60000')),24))
else:
bvList.append(binary.bvFromSignedInt(5460000,24))
if 'longitude' in params:
bvList.append(binary.bvFromSignedInt(int(Decimal(params['longitude'])*Decimal('60000')),25))
else:
bvList.append(binary.bvFromSignedInt(10860000,25))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['day']),5))
if 'hour' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['hour']),5))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=31),5))
if 'min' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['min']),6))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=63),6))
if 'avewind' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['avewind']),7))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=127),7))
if 'windgust' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['windgust']),7))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=127),7))
if 'winddir' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['winddir']),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=511),9))
if 'windgustdir' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['windgustdir']),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=511),9))
if 'airtemp' in params:
bvList.append(binary.bvFromSignedInt(int(Decimal(params['airtemp'])*Decimal('10')),11))
else:
bvList.append(binary.bvFromSignedInt(1023,11))
if 'relhumid' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['relhumid']),7))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=127),7))
if 'dewpoint' in params:
bvList.append(binary.bvFromSignedInt(int(Decimal(params['dewpoint'])*Decimal('10')),10))
else:
bvList.append(binary.bvFromSignedInt(511,10))
if 'airpressure' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['airpressure']-(800))*Decimal('1')))),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(1311)),9))
if 'airpressuretrend' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['airpressuretrend']),2))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=3),2))
if 'horizvis' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['horizvis'])*Decimal('10')))),8))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(255)),8))
bvList.append(binary.bvFromSignedInt(int(Decimal(params['waterlevel'])*Decimal('10')),9))
if 'waterleveltrend' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['waterleveltrend']),2))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=3),2))
if 'surfcurspeed' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['surfcurspeed'])*Decimal('10')))),8))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(255)),8))
if 'surfcurdir' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['surfcurdir']),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=511),9))
if 'curspeed2' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['curspeed2'])*Decimal('10')))),8))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(255)),8))
if 'curdir2' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['curdir2']),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=511),9))
if 'curlevel2' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['curlevel2']),5))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=31),5))
if 'curspeed3' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['curspeed3'])*Decimal('10')))),8))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(255)),8))
if 'curdir3' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['curdir3']),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=511),9))
if 'curlevel3' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['curlevel3']),5))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=31),5))
if 'sigwaveheight' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['sigwaveheight'])*Decimal('10')))),8))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(255)),8))
if 'waveperiod' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['waveperiod']),6))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=63),6))
if 'wavedir' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['wavedir']),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=511),9))
if 'swellheight' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['swellheight'])*Decimal('10')))),8))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(255)),8))
if 'swellperiod' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['swellperiod']),6))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=63),6))
if 'swelldir' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['swelldir']),9))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=511),9))
if 'seastate' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['seastate']),4))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=15),4))
if 'watertemp' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int((Decimal(params['watertemp']-(-10))*Decimal('10')))),10))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=int(923)),10))
if 'preciptype' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['preciptype']),3))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=7),3))
if 'salinity' in params:
bvList.append(binary.bvFromSignedInt(int(Decimal(params['salinity'])*Decimal('10')),9))
else:
bvList.append(binary.bvFromSignedInt(923,9))
if 'ice' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['ice']),2))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=3),2))
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),6)) # FIX: add Ohmex style encoding
return binary.joinBV(bvList)
def decode(bv, validate=False):
'''Unpack a imo_met_hydro message
Fields in params:
- MessageID(uint): AIS message number. Must be 8 (field automatically set to "8")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): MMSI number of transmitter broadcasting the message
- Spare(uint): Reserved for definition by a regional authority. (field automatically set to "0")
- dac(uint): Designated Area Code - part 1 of the IAI (field automatically set to "1")
- fid(uint): Functional Identifier - part 2 of the IAI (field automatically set to "11")
- latitude(decimal): Location of the vessel. North South location
- longitude(decimal): Location of the vessel. East West location
- day(uint): Day 0..31
- hour(uint): Hour 0..23
- min(uint): Min
- avewind(uint): Average wind speed values for the last 10 minutes.
- windgust(uint): Wind gust is the max wind speed value reading during the last 10 minutes.
- winddir(uint): Wind direction
- windgustdir(uint): Wind direction for the gust.
- airtemp(decimal): Dry bulb temperature
- relhumid(uint): Relative humidity
- dewpoint(decimal): Dew Point
- airpressure(udecimal): Air pressure
- airpressuretrend(uint): Air pressure trend
- horizvis(udecimal): Horizontal visibility
- waterlevel(decimal): Water level (incl. tide)
- waterleveltrend(uint): Water level trend
- surfcurspeed(udecimal): Surface current speed
- surfcurdir(uint): Surface current direction
- curspeed2(udecimal): Level 2 current speed
- curdir2(uint): Level 2 current direction
- curlevel2(uint): Measuring level below sea surface for level 2
- curspeed3(udecimal): Level 3 current speed
- curdir3(uint): Level 3 current direction
- curlevel3(uint): Measuring level below sea surface for level 3
- sigwaveheight(udecimal): Significant wave height
- waveperiod(uint): Wave period
- wavedir(uint): Wave direction
- swellheight(udecimal): Swell height
- swellperiod(uint): Swell period
- swelldir(uint): Swell direction
- seastate(uint): Sea state according to the Beaufort scale
- watertemp(udecimal): Water temperature
- preciptype(uint): According to WMO
- salinity(decimal): Salinity
- ice(uint): Yes or no for the presence of ice
- Spare2(uint): Must be zero (field automatically set to "0")
@type bv: BitVector
@param bv: Bits defining a message
@param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented.
@rtype: dict
@return: params
'''
#Would be nice to check the bit count here..
#if validate:
# assert (len(bv)==FIX: SOME NUMBER)
r = {}
r['MessageID']=8
r['RepeatIndicator']=int(bv[6:8])
r['UserID']=int(bv[8:38])
r['Spare']=0
r['dac']=1
r['fid']=11
r['latitude']=Decimal(binary.signedIntFromBV(bv[56:80]))/Decimal('60000')
r['longitude']=Decimal(binary.signedIntFromBV(bv[80:105]))/Decimal('60000')
r['day']=int(bv[105:110])
r['hour']=int(bv[110:115])
r['min']=int(bv[115:121])
r['avewind']=int(bv[121:128])
r['windgust']=int(bv[128:135])
r['winddir']=int(bv[135:144])
r['windgustdir']=int(bv[144:153])
r['airtemp']=Decimal(binary.signedIntFromBV(bv[153:164]))/Decimal('10')
r['relhumid']=int(bv[164:171])
r['dewpoint']=Decimal(binary.signedIntFromBV(bv[171:181]))/Decimal('10')
r['airpressure']=Decimal(int(bv[181:190]))/Decimal('1')+Decimal('800')
r['airpressuretrend']=int(bv[190:192])
r['horizvis']=Decimal(int(bv[192:200]))/Decimal('10')
r['waterlevel']=Decimal(binary.signedIntFromBV(bv[200:209]))/Decimal('10')
r['waterleveltrend']=int(bv[209:211])
r['surfcurspeed']=Decimal(int(bv[211:219]))/Decimal('10')
r['surfcurdir']=int(bv[219:228])
r['curspeed2']=Decimal(int(bv[228:236]))/Decimal('10')
r['curdir2']=int(bv[236:245])
r['curlevel2']=int(bv[245:250])
r['curspeed3']=Decimal(int(bv[250:258]))/Decimal('10')
r['curdir3']=int(bv[258:267])
r['curlevel3']=int(bv[267:272])
r['sigwaveheight']=Decimal(int(bv[272:280]))/Decimal('10')
r['waveperiod']=int(bv[280:286])
r['wavedir']=int(bv[286:295])
r['swellheight']=Decimal(int(bv[295:303]))/Decimal('10')
r['swellperiod']=int(bv[303:309])
r['swelldir']=int(bv[309:318])
r['seastate']=int(bv[318:322])
r['watertemp']=Decimal(int(bv[322:332]))/Decimal('10')+Decimal('-10')
r['preciptype']=int(bv[332:335])
r['salinity']=Decimal(binary.signedIntFromBV(bv[335:344]))/Decimal('10')
r['ice']=int(bv[344:346])
#assert(6 == len(bv[346:352]))
#print 'Spare2:',bv[346:352]
r['Spare2']=binary.signedIntFromBV(bv[346:352])
# Ohmex uses Spare2 as signed cm
r['waterlevel'] += Decimal(str( binary.signedIntFromBV(bv[346:352])/100. ))
return r
def decodeMessageID(bv, validate=False):
return 8
def decodeRepeatIndicator(bv, validate=False):
return int(bv[6:8])
def decodeUserID(bv, validate=False):
return int(bv[8:38])
def decodeSpare(bv, validate=False):
return 0
def decodedac(bv, validate=False):
return 1
def decodefid(bv, validate=False):
return 11
def decodelatitude(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[56:80]))/Decimal('60000')
def decodelongitude(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[80:105]))/Decimal('60000')
def decodeday(bv, validate=False):
return int(bv[105:110])
def decodehour(bv, validate=False):
return int(bv[110:115])
def decodemin(bv, validate=False):
return int(bv[115:121])
def decodeavewind(bv, validate=False):
return int(bv[121:128])
def decodewindgust(bv, validate=False):
return int(bv[128:135])
def decodewinddir(bv, validate=False):
return int(bv[135:144])
def decodewindgustdir(bv, validate=False):
return int(bv[144:153])
def decodeairtemp(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[153:164]))/Decimal('10')
def decoderelhumid(bv, validate=False):
return int(bv[164:171])
def decodedewpoint(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[171:181]))/Decimal('10')
def decodeairpressure(bv, validate=False):
return Decimal(int(bv[181:190]))/Decimal('1')+Decimal('800')
def decodeairpressuretrend(bv, validate=False):
return int(bv[190:192])
def decodehorizvis(bv, validate=False):
return Decimal(int(bv[192:200]))/Decimal('10')
def decodewaterlevel(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[200:209]))/Decimal('10')
def decodewaterleveltrend(bv, validate=False):
return int(bv[209:211])
def decodesurfcurspeed(bv, validate=False):
return Decimal(int(bv[211:219]))/Decimal('10')
def decodesurfcurdir(bv, validate=False):
return int(bv[219:228])
def decodecurspeed2(bv, validate=False):
return Decimal(int(bv[228:236]))/Decimal('10')
def decodecurdir2(bv, validate=False):
return int(bv[236:245])
def decodecurlevel2(bv, validate=False):
return int(bv[245:250])
def decodecurspeed3(bv, validate=False):
return Decimal(int(bv[250:258]))/Decimal('10')
def decodecurdir3(bv, validate=False):
return int(bv[258:267])
def decodecurlevel3(bv, validate=False):
return int(bv[267:272])
def decodesigwaveheight(bv, validate=False):
return Decimal(int(bv[272:280]))/Decimal('10')
def decodewaveperiod(bv, validate=False):
return int(bv[280:286])
def decodewavedir(bv, validate=False):
return int(bv[286:295])
def decodeswellheight(bv, validate=False):
return Decimal(int(bv[295:303]))/Decimal('10')
def decodeswellperiod(bv, validate=False):
return int(bv[303:309])
def decodeswelldir(bv, validate=False):
return int(bv[309:318])
def decodeseastate(bv, validate=False):
return int(bv[318:322])
def decodewatertemp(bv, validate=False):
return Decimal(int(bv[322:332]))/Decimal('10')+Decimal('-10')
def decodepreciptype(bv, validate=False):
return int(bv[332:335])
def decodesalinity(bv, validate=False):
return Decimal(binary.signedIntFromBV(bv[335:344]))/Decimal('10')
def decodeice(bv, validate=False):
return int(bv[344:346])
def decodeSpare2(bv, validate=False):
return int(bv[346:352])
def printHtml(params, out=sys.stdout):
out.write("<h3>imo_met_hydro</h3>\n")
out.write("<table border=\"1\">\n")
out.write("<tr bgcolor=\"orange\">\n")
out.write("<th align=\"left\">Field Name</th>\n")
out.write("<th align=\"left\">Type</th>\n")
out.write("<th align=\"left\">Value</th>\n")
out.write("<th align=\"left\">Value in Lookup Table</th>\n")
out.write("<th align=\"left\">Units</th>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>MessageID</td>\n")
out.write("<td>uint</td>\n")
if 'MessageID' in params:
out.write(" <td>"+str(params['MessageID'])+"</td>\n")
out.write(" <td>"+str(params['MessageID'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>RepeatIndicator</td>\n")
out.write("<td>uint</td>\n")
if 'RepeatIndicator' in params:
out.write(" <td>"+str(params['RepeatIndicator'])+"</td>\n")
if str(params['RepeatIndicator']) in RepeatIndicatorDecodeLut:
out.write("<td>"+RepeatIndicatorDecodeLut[str(params['RepeatIndicator'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>UserID</td>\n")
out.write("<td>uint</td>\n")
if 'UserID' in params:
out.write(" <td>"+str(params['UserID'])+"</td>\n")
out.write(" <td>"+str(params['UserID'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Spare</td>\n")
out.write("<td>uint</td>\n")
if 'Spare' in params:
out.write(" <td>"+str(params['Spare'])+"</td>\n")
out.write(" <td>"+str(params['Spare'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>dac</td>\n")
out.write("<td>uint</td>\n")
if 'dac' in params:
out.write(" <td>"+str(params['dac'])+"</td>\n")
out.write(" <td>"+str(params['dac'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>fid</td>\n")
out.write("<td>uint</td>\n")
if 'fid' in params:
out.write(" <td>"+str(params['fid'])+"</td>\n")
out.write(" <td>"+str(params['fid'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>latitude</td>\n")
out.write("<td>decimal</td>\n")
if 'latitude' in params:
out.write(" <td>"+str(params['latitude'])+"</td>\n")
out.write(" <td>"+str(params['latitude'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>longitude</td>\n")
out.write("<td>decimal</td>\n")
if 'longitude' in params:
out.write(" <td>"+str(params['longitude'])+"</td>\n")
out.write(" <td>"+str(params['longitude'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>day</td>\n")
out.write("<td>uint</td>\n")
if 'day' in params:
out.write(" <td>"+str(params['day'])+"</td>\n")
out.write(" <td>"+str(params['day'])+"</td>\n")
out.write("<td>days</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>hour</td>\n")
out.write("<td>uint</td>\n")
if 'hour' in params:
out.write(" <td>"+str(params['hour'])+"</td>\n")
out.write(" <td>"+str(params['hour'])+"</td>\n")
out.write("<td>hours</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>min</td>\n")
out.write("<td>uint</td>\n")
if 'min' in params:
out.write(" <td>"+str(params['min'])+"</td>\n")
out.write(" <td>"+str(params['min'])+"</td>\n")
out.write("<td>minutes</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>avewind</td>\n")
out.write("<td>uint</td>\n")
if 'avewind' in params:
out.write(" <td>"+str(params['avewind'])+"</td>\n")
out.write(" <td>"+str(params['avewind'])+"</td>\n")
out.write("<td>knots</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>windgust</td>\n")
out.write("<td>uint</td>\n")
if 'windgust' in params:
out.write(" <td>"+str(params['windgust'])+"</td>\n")
out.write(" <td>"+str(params['windgust'])+"</td>\n")
out.write("<td>knots</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>winddir</td>\n")
out.write("<td>uint</td>\n")
if 'winddir' in params:
out.write(" <td>"+str(params['winddir'])+"</td>\n")
out.write(" <td>"+str(params['winddir'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>windgustdir</td>\n")
out.write("<td>uint</td>\n")
if 'windgustdir' in params:
out.write(" <td>"+str(params['windgustdir'])+"</td>\n")
out.write(" <td>"+str(params['windgustdir'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>airtemp</td>\n")
out.write("<td>decimal</td>\n")
if 'airtemp' in params:
out.write(" <td>"+str(params['airtemp'])+"</td>\n")
out.write(" <td>"+str(params['airtemp'])+"</td>\n")
out.write("<td>degrees Celsius</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>relhumid</td>\n")
out.write("<td>uint</td>\n")
if 'relhumid' in params:
out.write(" <td>"+str(params['relhumid'])+"</td>\n")
out.write(" <td>"+str(params['relhumid'])+"</td>\n")
out.write("<td>percent</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>dewpoint</td>\n")
out.write("<td>decimal</td>\n")
if 'dewpoint' in params:
out.write(" <td>"+str(params['dewpoint'])+"</td>\n")
out.write(" <td>"+str(params['dewpoint'])+"</td>\n")
out.write("<td>degrees Celsius</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>airpressure</td>\n")
out.write("<td>udecimal</td>\n")
if 'airpressure' in params:
out.write(" <td>"+str(params['airpressure'])+"</td>\n")
out.write(" <td>"+str(params['airpressure'])+"</td>\n")
out.write("<td>hPa</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>airpressuretrend</td>\n")
out.write("<td>uint</td>\n")
if 'airpressuretrend' in params:
out.write(" <td>"+str(params['airpressuretrend'])+"</td>\n")
if str(params['airpressuretrend']) in airpressuretrendDecodeLut:
out.write("<td>"+airpressuretrendDecodeLut[str(params['airpressuretrend'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>horizvis</td>\n")
out.write("<td>udecimal</td>\n")
if 'horizvis' in params:
out.write(" <td>"+str(params['horizvis'])+"</td>\n")
out.write(" <td>"+str(params['horizvis'])+"</td>\n")
out.write("<td>nm</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>waterlevel</td>\n")
out.write("<td>decimal</td>\n")
if 'waterlevel' in params:
out.write(" <td>"+str(params['waterlevel'])+"</td>\n")
out.write(" <td>"+str(params['waterlevel'])+"</td>\n")
out.write("<td>m</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>waterleveltrend</td>\n")
out.write("<td>uint</td>\n")
if 'waterleveltrend' in params:
out.write(" <td>"+str(params['waterleveltrend'])+"</td>\n")
if str(params['waterleveltrend']) in waterleveltrendDecodeLut:
out.write("<td>"+waterleveltrendDecodeLut[str(params['waterleveltrend'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>surfcurspeed</td>\n")
out.write("<td>udecimal</td>\n")
if 'surfcurspeed' in params:
out.write(" <td>"+str(params['surfcurspeed'])+"</td>\n")
out.write(" <td>"+str(params['surfcurspeed'])+"</td>\n")
out.write("<td>knots</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>surfcurdir</td>\n")
out.write("<td>uint</td>\n")
if 'surfcurdir' in params:
out.write(" <td>"+str(params['surfcurdir'])+"</td>\n")
out.write(" <td>"+str(params['surfcurdir'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>curspeed2</td>\n")
out.write("<td>udecimal</td>\n")
if 'curspeed2' in params:
out.write(" <td>"+str(params['curspeed2'])+"</td>\n")
out.write(" <td>"+str(params['curspeed2'])+"</td>\n")
out.write("<td>knots</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>curdir2</td>\n")
out.write("<td>uint</td>\n")
if 'curdir2' in params:
out.write(" <td>"+str(params['curdir2'])+"</td>\n")
out.write(" <td>"+str(params['curdir2'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>curlevel2</td>\n")
out.write("<td>uint</td>\n")
if 'curlevel2' in params:
out.write(" <td>"+str(params['curlevel2'])+"</td>\n")
out.write(" <td>"+str(params['curlevel2'])+"</td>\n")
out.write("<td>m</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>curspeed3</td>\n")
out.write("<td>udecimal</td>\n")
if 'curspeed3' in params:
out.write(" <td>"+str(params['curspeed3'])+"</td>\n")
out.write(" <td>"+str(params['curspeed3'])+"</td>\n")
out.write("<td>knots</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>curdir3</td>\n")
out.write("<td>uint</td>\n")
if 'curdir3' in params:
out.write(" <td>"+str(params['curdir3'])+"</td>\n")
out.write(" <td>"+str(params['curdir3'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>curlevel3</td>\n")
out.write("<td>uint</td>\n")
if 'curlevel3' in params:
out.write(" <td>"+str(params['curlevel3'])+"</td>\n")
out.write(" <td>"+str(params['curlevel3'])+"</td>\n")
out.write("<td>m</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>sigwaveheight</td>\n")
out.write("<td>udecimal</td>\n")
if 'sigwaveheight' in params:
out.write(" <td>"+str(params['sigwaveheight'])+"</td>\n")
out.write(" <td>"+str(params['sigwaveheight'])+"</td>\n")
out.write("<td>m</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>waveperiod</td>\n")
out.write("<td>uint</td>\n")
if 'waveperiod' in params:
out.write(" <td>"+str(params['waveperiod'])+"</td>\n")
out.write(" <td>"+str(params['waveperiod'])+"</td>\n")
out.write("<td>sec</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>wavedir</td>\n")
out.write("<td>uint</td>\n")
if 'wavedir' in params:
out.write(" <td>"+str(params['wavedir'])+"</td>\n")
out.write(" <td>"+str(params['wavedir'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>swellheight</td>\n")
out.write("<td>udecimal</td>\n")
if 'swellheight' in params:
out.write(" <td>"+str(params['swellheight'])+"</td>\n")
out.write(" <td>"+str(params['swellheight'])+"</td>\n")
out.write("<td>m</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>swellperiod</td>\n")
out.write("<td>uint</td>\n")
if 'swellperiod' in params:
out.write(" <td>"+str(params['swellperiod'])+"</td>\n")
out.write(" <td>"+str(params['swellperiod'])+"</td>\n")
out.write("<td>sec</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>swelldir</td>\n")
out.write("<td>uint</td>\n")
if 'swelldir' in params:
out.write(" <td>"+str(params['swelldir'])+"</td>\n")
out.write(" <td>"+str(params['swelldir'])+"</td>\n")
out.write("<td>degrees</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>seastate</td>\n")
out.write("<td>uint</td>\n")
if 'seastate' in params:
out.write(" <td>"+str(params['seastate'])+"</td>\n")
if str(params['seastate']) in seastateDecodeLut:
out.write("<td>"+seastateDecodeLut[str(params['seastate'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("<td>Beaufort scale</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>watertemp</td>\n")
out.write("<td>udecimal</td>\n")
if 'watertemp' in params:
out.write(" <td>"+str(params['watertemp'])+"</td>\n")
out.write(" <td>"+str(params['watertemp'])+"</td>\n")
out.write("<td>degrees Celsius</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>preciptype</td>\n")
out.write("<td>uint</td>\n")
if 'preciptype' in params:
out.write(" <td>"+str(params['preciptype'])+"</td>\n")
if str(params['preciptype']) in preciptypeDecodeLut:
out.write("<td>"+preciptypeDecodeLut[str(params['preciptype'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("<td>WMO scale index</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>salinity</td>\n")
out.write("<td>decimal</td>\n")
if 'salinity' in params:
out.write(" <td>"+str(params['salinity'])+"</td>\n")
out.write(" <td>"+str(params['salinity'])+"</td>\n")
out.write("<td>0/00</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>ice</td>\n")
out.write("<td>uint</td>\n")
if 'ice' in params:
out.write(" <td>"+str(params['ice'])+"</td>\n")
if str(params['ice']) in iceDecodeLut:
out.write("<td>"+iceDecodeLut[str(params['ice'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Spare2</td>\n")
out.write("<td>uint</td>\n")
if 'Spare2' in params:
out.write(" <td>"+str(params['Spare2'])+"</td>\n")
out.write(" <td>"+str(params['Spare2'])+"</td>\n")
out.write("</tr>\n")
out.write("</table>\n")
def printKml(params, out=sys.stdout):
'''KML (Keyhole Markup Language) for Google Earth, but without the header/footer'''
out.write("\ <Placemark>\n")
out.write("\t <name>"+str(params['UserID'])+"</name>\n")
out.write("\t\t<description>\n")
import StringIO
buf = StringIO.StringIO()
printHtml(params,buf)
import cgi
out.write(cgi.escape(buf.getvalue()))
out.write("\t\t</description>\n")
out.write("\t\t<styleUrl>#m_ylw-pushpin_copy0</styleUrl>\n")
out.write("\t\t<Point>\n")
out.write("\t\t\t<coordinates>")
out.write(str(params['longitude']))
out.write(',')
out.write(str(params['latitude']))
out.write(",0</coordinates>\n")
out.write("\t\t</Point>\n")
out.write("\t</Placemark>\n")
def printFields(params, out=sys.stdout, format='std', fieldList=None, dbType='postgres'):
'''Print a imo_met_hydro message to stdout.
Fields in params:
- MessageID(uint): AIS message number. Must be 8 (field automatically set to "8")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): MMSI number of transmitter broadcasting the message
- Spare(uint): Reserved for definition by a regional authority. (field automatically set to "0")
- dac(uint): Designated Area Code - part 1 of the IAI (field automatically set to "1")
- fid(uint): Functional Identifier - part 2 of the IAI (field automatically set to "11")
- latitude(decimal): Location of the vessel. North South location
- longitude(decimal): Location of the vessel. East West location
- day(uint): Day 0..31
- hour(uint): Hour 0..23
- min(uint): Min
- avewind(uint): Average wind speed values for the last 10 minutes.
- windgust(uint): Wind gust is the max wind speed value reading during the last 10 minutes.
- winddir(uint): Wind direction
- windgustdir(uint): Wind direction for the gust.
- airtemp(decimal): Dry bulb temperature
- relhumid(uint): Relative humidity
- dewpoint(decimal): Dew Point
- airpressure(udecimal): Air pressure
- airpressuretrend(uint): Air pressure trend
- horizvis(udecimal): Horizontal visibility
- waterlevel(decimal): Water level (incl. tide)
- waterleveltrend(uint): Water level trend
- surfcurspeed(udecimal): Surface current speed
- surfcurdir(uint): Surface current direction
- curspeed2(udecimal): Level 2 current speed
- curdir2(uint): Level 2 current direction
- curlevel2(uint): Measuring level below sea surface for level 2
- curspeed3(udecimal): Level 3 current speed
- curdir3(uint): Level 3 current direction
- curlevel3(uint): Measuring level below sea surface for level 3
- sigwaveheight(udecimal): Significant wave height
- waveperiod(uint): Wave period
- wavedir(uint): Wave direction
- swellheight(udecimal): Swell height
- swellperiod(uint): Swell period
- swelldir(uint): Swell direction
- seastate(uint): Sea state according to the Beaufort scale
- watertemp(udecimal): Water temperature
- preciptype(uint): According to WMO
- salinity(decimal): Salinity
- ice(uint): Yes or no for the presence of ice
- Spare2(uint): Must be zero (field automatically set to "0")
@param params: Dictionary of field names/values.
@param out: File like object to write to
@rtype: stdout
@return: text to out
'''
if 'std'==format:
out.write("imo_met_hydro:\n")
if 'MessageID' in params: out.write(" MessageID: "+str(params['MessageID'])+"\n")
if 'RepeatIndicator' in params: out.write(" RepeatIndicator: "+str(params['RepeatIndicator'])+"\n")
if 'UserID' in params: out.write(" UserID: "+str(params['UserID'])+"\n")
if 'Spare' in params: out.write(" Spare: "+str(params['Spare'])+"\n")
if 'dac' in params: out.write(" dac: "+str(params['dac'])+"\n")
if 'fid' in params: out.write(" fid: "+str(params['fid'])+"\n")
if 'latitude' in params: out.write(" latitude: "+str(params['latitude'])+"\n")
if 'longitude' in params: out.write(" longitude: "+str(params['longitude'])+"\n")
if 'day' in params: out.write(" day: "+str(params['day'])+"\n")
if 'hour' in params: out.write(" hour: "+str(params['hour'])+"\n")
if 'min' in params: out.write(" min: "+str(params['min'])+"\n")
if 'avewind' in params: out.write(" avewind: "+str(params['avewind'])+"\n")
if 'windgust' in params: out.write(" windgust: "+str(params['windgust'])+"\n")
if 'winddir' in params: out.write(" winddir: "+str(params['winddir'])+"\n")
if 'windgustdir' in params: out.write(" windgustdir: "+str(params['windgustdir'])+"\n")
if 'airtemp' in params: out.write(" airtemp: "+str(params['airtemp'])+"\n")
if 'relhumid' in params: out.write(" relhumid: "+str(params['relhumid'])+"\n")
if 'dewpoint' in params: out.write(" dewpoint: "+str(params['dewpoint'])+"\n")
if 'airpressure' in params: out.write(" airpressure: "+str(params['airpressure'])+"\n")
if 'airpressuretrend' in params: out.write(" airpressuretrend: "+str(params['airpressuretrend'])+"\n")
if 'horizvis' in params: out.write(" horizvis: "+str(params['horizvis'])+"\n")
if 'waterlevel' in params: out.write(" waterlevel: "+str(params['waterlevel'])+"\n")
if 'waterleveltrend' in params: out.write(" waterleveltrend: "+str(params['waterleveltrend'])+"\n")
if 'surfcurspeed' in params: out.write(" surfcurspeed: "+str(params['surfcurspeed'])+"\n")
if 'surfcurdir' in params: out.write(" surfcurdir: "+str(params['surfcurdir'])+"\n")
if 'curspeed2' in params: out.write(" curspeed2: "+str(params['curspeed2'])+"\n")
if 'curdir2' in params: out.write(" curdir2: "+str(params['curdir2'])+"\n")
if 'curlevel2' in params: out.write(" curlevel2: "+str(params['curlevel2'])+"\n")
if 'curspeed3' in params: out.write(" curspeed3: "+str(params['curspeed3'])+"\n")
if 'curdir3' in params: out.write(" curdir3: "+str(params['curdir3'])+"\n")
if 'curlevel3' in params: out.write(" curlevel3: "+str(params['curlevel3'])+"\n")
if 'sigwaveheight' in params: out.write(" sigwaveheight: "+str(params['sigwaveheight'])+"\n")
if 'waveperiod' in params: out.write(" waveperiod: "+str(params['waveperiod'])+"\n")
if 'wavedir' in params: out.write(" wavedir: "+str(params['wavedir'])+"\n")
if 'swellheight' in params: out.write(" swellheight: "+str(params['swellheight'])+"\n")
if 'swellperiod' in params: out.write(" swellperiod: "+str(params['swellperiod'])+"\n")
if 'swelldir' in params: out.write(" swelldir: "+str(params['swelldir'])+"\n")
if 'seastate' in params: out.write(" seastate: "+str(params['seastate'])+"\n")
if 'watertemp' in params: out.write(" watertemp: "+str(params['watertemp'])+"\n")
if 'preciptype' in params: out.write(" preciptype: "+str(params['preciptype'])+"\n")
if 'salinity' in params: out.write(" salinity: "+str(params['salinity'])+"\n")
if 'ice' in params: out.write(" ice: "+str(params['ice'])+"\n")
if 'Spare2' in params: out.write(" Spare2: "+str(params['Spare2'])+"\n")
elif 'csv'==format:
if None == options.fieldList:
options.fieldList = fieldList
needComma = False;
for field in fieldList:
if needComma: out.write(',')
needComma = True
if field in params:
out.write(str(params[field]))
# else: leave it empty
out.write("\n")
elif 'html'==format:
printHtml(params,out)
elif 'sql'==format:
sqlInsertStr(params,out,dbType=dbType)
elif 'kml'==format:
printKml(params,out)
elif 'kml-full'==format:
out.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
out.write("<kml xmlns=\"http://earth.google.com/kml/2.1\">\n")
out.write("<Document>\n")
out.write(" <name>imo_met_hydro</name>\n")
printKml(params,out)
out.write("</Document>\n")
out.write("</kml>\n")
else:
print "ERROR: unknown format:",format
assert False
return # Nothing to return
RepeatIndicatorEncodeLut = {
'default':'0',
'do not repeat any more':'3',
} #RepeatIndicatorEncodeLut
RepeatIndicatorDecodeLut = {
'0':'default',
'3':'do not repeat any more',
} # RepeatIndicatorEncodeLut
airpressuretrendEncodeLut = {
'steady':'0',
'decreasing':'1',
'increasing':'2',
'unavailable':'3',
} #airpressuretrendEncodeLut
airpressuretrendDecodeLut = {
'0':'steady',
'1':'decreasing',
'2':'increasing',
'3':'unavailable',
} # airpressuretrendEncodeLut
waterleveltrendEncodeLut = {
'steady':'0',
'decreasing':'1',
'increasing':'2',
'unavailable':'3',
} #waterleveltrendEncodeLut
waterleveltrendDecodeLut = {
'0':'steady',
'1':'decreasing',
'2':'increasing',
'3':'unavailable',
} # waterleveltrendEncodeLut
seastateEncodeLut = {
'Calm':'0',
'Light air':'1',
'Light breeze':'2',
'Gentle breeze':'3',
'Moderate breeze':'4',
'Fresh breeze':'5',
'Strong breeze':'6',
'Near gale':'7',
'Gale':'8',
'Strong gale':'9',
'Storm':'10',
'Violent storm':'11',
'Hurricane':'12',
'unavailable':'15',
} #seastateEncodeLut
seastateDecodeLut = {
'0':'Calm',
'1':'Light air',
'2':'Light breeze',
'3':'Gentle breeze',
'4':'Moderate breeze',
'5':'Fresh breeze',
'6':'Strong breeze',
'7':'Near gale',
'8':'Gale',
'9':'Strong gale',
'10':'Storm',
'11':'Violent storm',
'12':'Hurricane',
'15':'unavailable',
} # seastateEncodeLut
preciptypeEncodeLut = {
'FIX: find the WMO list of types':'0',
'unavailable':'7',
} #preciptypeEncodeLut
preciptypeDecodeLut = {
'0':'FIX: find the WMO list of types',
'7':'unavailable',
} # preciptypeEncodeLut
iceEncodeLut = {
'No ice':'0',
'Ice present':'1',
'Reserved':'2',
'Unknown if there is ice present':'3',
} #iceEncodeLut
iceDecodeLut = {
'0':'No ice',
'1':'Ice present',
'2':'Reserved',
'3':'Unknown if there is ice present',
} # iceEncodeLut
######################################################################
# SQL SUPPORT
######################################################################
dbTableName='imo_met_hydro'
'Database table name'
def sqlCreateStr(outfile=sys.stdout, fields=None, extraFields=None
,addCoastGuardFields=True
,dbType='postgres'
):
'''
Return the SQL CREATE command for this message type
@param outfile: file like object to print to.
@param fields: which fields to put in the create. Defaults to all.
@param extraFields: A sequence of tuples containing (name,sql type) for additional fields
@param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format
@param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres')
@type addCoastGuardFields: bool
@return: sql create string
@rtype: str
@see: sqlCreate
'''
# FIX: should this sqlCreate be the same as in LaTeX (createFuncName) rather than hard coded?
outfile.write(str(sqlCreate(fields,extraFields,addCoastGuardFields,dbType=dbType)))
def sqlCreate(fields=None, extraFields=None, addCoastGuardFields=True, dbType='postgres'):
'''
Return the sqlhelp object to create the table.
@param fields: which fields to put in the create. Defaults to all.
@param extraFields: A sequence of tuples containing (name,sql type) for additional fields
@param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format
@type addCoastGuardFields: bool
@param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres')
@return: An object that can be used to generate a return
@rtype: sqlhelp.create
'''
if None == fields: fields = fieldList
c = sqlhelp.create('imo_met_hydro',dbType=dbType)
c.addPrimaryKey()
if 'MessageID' in fields: c.addInt ('MessageID')
if 'RepeatIndicator' in fields: c.addInt ('RepeatIndicator')
if 'UserID' in fields: c.addInt ('UserID')
if 'Spare' in fields: c.addInt ('Spare')
if 'dac' in fields: c.addInt ('dac')
if 'fid' in fields: c.addInt ('fid')
if dbType != 'postgres':
if 'latitude' in fields: c.addDecimal('latitude',7,4)
if dbType != 'postgres':
if 'longitude' in fields: c.addDecimal('longitude',7,4)
if 'day' in fields: c.addInt ('day')
if 'hour' in fields: c.addInt ('hour')
if 'min' in fields: c.addInt ('min')
if 'avewind' in fields: c.addInt ('avewind')
if 'windgust' in fields: c.addInt ('windgust')
if 'winddir' in fields: c.addInt ('winddir')
if 'windgustdir' in fields: c.addInt ('windgustdir')
if 'airtemp' in fields: c.addDecimal('airtemp',4,1)
if 'relhumid' in fields: c.addInt ('relhumid')
if 'dewpoint' in fields: c.addDecimal('dewpoint',4,1)
if 'airpressure' in fields: c.addDecimal('airpressure',3,0)
if 'airpressuretrend' in fields: c.addInt ('airpressuretrend')
if 'horizvis' in fields: c.addDecimal('horizvis',3,1)
if 'waterlevel' in fields: c.addDecimal('waterlevel',3,1)
if 'waterleveltrend' in fields: c.addInt ('waterleveltrend')
if 'surfcurspeed' in fields: c.addDecimal('surfcurspeed',3,1)
if 'surfcurdir' in fields: c.addInt ('surfcurdir')
if 'curspeed2' in fields: c.addDecimal('curspeed2',3,1)
if 'curdir2' in fields: c.addInt ('curdir2')
if 'curlevel2' in fields: c.addInt ('curlevel2')
if 'curspeed3' in fields: c.addDecimal('curspeed3',3,1)
if 'curdir3' in fields: c.addInt ('curdir3')
if 'curlevel3' in fields: c.addInt ('curlevel3')
if 'sigwaveheight' in fields: c.addDecimal('sigwaveheight',3,1)
if 'waveperiod' in fields: c.addInt ('waveperiod')
if 'wavedir' in fields: c.addInt ('wavedir')
if 'swellheight' in fields: c.addDecimal('swellheight',3,1)
if 'swellperiod' in fields: c.addInt ('swellperiod')
if 'swelldir' in fields: c.addInt ('swelldir')
if 'seastate' in fields: c.addInt ('seastate')
if 'watertemp' in fields: c.addDecimal('watertemp',4,1)
if 'preciptype' in fields: c.addInt ('preciptype')
if 'salinity' in fields: c.addDecimal('salinity',3,1)
if 'ice' in fields: c.addInt ('ice')
if 'Spare2' in fields: c.addInt ('Spare2')
if addCoastGuardFields:
# c.addInt('cg_s_rssi') # Relative signal strength indicator
# c.addInt('cg_d_strength') # dBm receive strength
# c.addVarChar('cg_x',10) # Idonno
c.addInt('cg_t_arrival') # Receive timestamp from the AIS equipment 'T'
c.addInt('cg_s_slotnum') # Slot received in
c.addVarChar('cg_r',15) # Receiver station ID - should usually be an MMSI, but sometimes is a string
c.addInt('cg_sec') # UTC seconds since the epoch
c.addTimestamp('cg_timestamp') # UTC decoded cg_sec - not actually in the data stream
if dbType == 'postgres':
#--- EPSG 4326 : WGS 84
#INSERT INTO "spatial_ref_sys" ("srid","auth_name","auth_srid","srtext","proj4text") VALUES (4326,'EPSG',4326,'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]','+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ');
c.addPostGIS('Position','POINT',2,SRID=4326);
return c
def sqlInsertStr(params, outfile=sys.stdout, extraParams=None, dbType='postgres'):
'''
Return the SQL INSERT command for this message type
@param params: dictionary of values keyed by field name
@param outfile: file like object to print to.
@param extraParams: A sequence of tuples containing (name,sql type) for additional fields
@return: sql create string
@rtype: str
@see: sqlCreate
'''
outfile.write(str(sqlInsert(params,extraParams,dbType=dbType)))
def sqlInsert(params,extraParams=None,dbType='postgres'):
'''
Give the SQL INSERT statement
@param params: dict keyed by field name of values
@param extraParams: any extra fields that you have created beyond the normal ais message fields
@rtype: sqlhelp.insert
@return: insert class instance
@todo: allow optional type checking of params?
@warning: this will take invalid keys happily and do what???
'''
i = sqlhelp.insert('imo_met_hydro',dbType=dbType)
if dbType=='postgres':
finished = []
for key in params:
if key in finished:
continue
if key not in toPgFields and key not in fromPgFields:
if type(params[key])==Decimal: i.add(key,float(params[key]))
else: i.add(key,params[key])
else:
if key in fromPgFields:
val = params[key]
# Had better be a WKT type like POINT(-88.1 30.321)
i.addPostGIS(key,val)
finished.append(key)
else:
# Need to construct the type.
pgName = toPgFields[key]
#valStr='GeomFromText(\''+pgTypes[pgName]+'('
valStr=pgTypes[pgName]+'('
vals = []
for nonPgKey in fromPgFields[pgName]:
vals.append(str(params[nonPgKey]))
finished.append(nonPgKey)
valStr+=' '.join(vals)+')'
i.addPostGIS(pgName,valStr)
else:
for key in params:
if type(params[key])==Decimal: i.add(key,float(params[key]))
else: i.add(key,params[key])
if None != extraParams:
for key in extraParams:
i.add(key,extraParams[key])
return i
######################################################################
# LATEX SUPPORT
######################################################################
def latexDefinitionTable(outfile=sys.stdout
):
'''
Return the LaTeX definition table for this message type
@param outfile: file like object to print to.
@type outfile: file obj
@return: LaTeX table string via the outfile
@rtype: str
'''
o = outfile
o.write('''
\\begin{table}%[htb]
\\centering
\\begin{tabular}{|l|c|l|}
\\hline
Parameter & Number of bits & Description
\\\\ \\hline\\hline
MessageID & 6 & AIS message number. Must be 8 \\\\ \hline
RepeatIndicator & 2 & Indicated how many times a message has been repeated \\\\ \hline
UserID & 30 & MMSI number of transmitter broadcasting the message \\\\ \hline
Spare & 2 & Reserved for definition by a regional authority. \\\\ \hline
dac & 10 & Designated Area Code - part 1 of the IAI \\\\ \hline
fid & 6 & Functional Identifier - part 2 of the IAI \\\\ \hline
latitude & 24 & Location of the vessel. North South location \\\\ \hline
longitude & 25 & Location of the vessel. East West location \\\\ \hline
day & 5 & Day 0..31 \\\\ \hline
hour & 5 & Hour 0..23 \\\\ \hline
min & 6 & Min \\\\ \hline
avewind & 7 & Average wind speed values for the last 10 minutes. \\\\ \hline
windgust & 7 & Wind gust is the max wind speed value reading during the last 10 minutes. \\\\ \hline
winddir & 9 & Wind direction \\\\ \hline
windgustdir & 9 & Wind direction for the gust. \\\\ \hline
airtemp & 11 & Dry bulb temperature \\\\ \hline
relhumid & 7 & Relative humidity \\\\ \hline
dewpoint & 10 & Dew Point \\\\ \hline
airpressure & 9 & Air pressure \\\\ \hline
airpressuretrend & 2 & Air pressure trend \\\\ \hline
horizvis & 8 & Horizontal visibility \\\\ \hline
waterlevel & 9 & Water level (incl. tide) \\\\ \hline
waterleveltrend & 2 & Water level trend \\\\ \hline
surfcurspeed & 8 & Surface current speed \\\\ \hline
surfcurdir & 9 & Surface current direction \\\\ \hline
curspeed2 & 8 & Level 2 current speed \\\\ \hline
curdir2 & 9 & Level 2 current direction \\\\ \hline
curlevel2 & 5 & Measuring level below sea surface for level 2 \\\\ \hline
curspeed3 & 8 & Level 3 current speed \\\\ \hline
curdir3 & 9 & Level 3 current direction \\\\ \hline
curlevel3 & 5 & Measuring level below sea surface for level 3 \\\\ \hline
sigwaveheight & 8 & Significant wave height \\\\ \hline
waveperiod & 6 & Wave period \\\\ \hline
wavedir & 9 & Wave direction \\\\ \hline
swellheight & 8 & Swell height \\\\ \hline
swellperiod & 6 & Swell period \\\\ \hline
swelldir & 9 & Swell direction \\\\ \hline
seastate & 4 & Sea state according to the Beaufort scale \\\\ \hline
watertemp & 10 & Water temperature \\\\ \hline
preciptype & 3 & According to WMO \\\\ \hline
salinity & 9 & Salinity \\\\ \hline
ice & 2 & Yes or no for the presence of ice \\\\ \hline
Spare2 & 6 & Must be zero\\\\ \\hline \\hline
Total bits & 352 & Appears to take 2 slots with 72 pad bits to fill the last slot \\\\ \\hline
\\end{tabular}
\\caption{AIS message number 8: IMO meteorological and hydroglogical data. Specified in SN\\Circ.236 Annex 2. Also defined in IALA Guidelines on AIS, Vol 1, Part 1, Ed. 1.3. Guildeline No 1028. }
\\label{tab:imo_met_hydro}
\\end{table}
''')
######################################################################
# Text Definition
######################################################################
def textDefinitionTable(outfile=sys.stdout
,delim='\t'
):
'''
Return the text definition table for this message type
@param outfile: file like object to print to.
@type outfile: file obj
@return: text table string via the outfile
@rtype: str
'''
o = outfile
o.write('''Parameter'''+delim+'Number of bits'''+delim+'''Description
MessageID'''+delim+'''6'''+delim+'''AIS message number. Must be 8
RepeatIndicator'''+delim+'''2'''+delim+'''Indicated how many times a message has been repeated
UserID'''+delim+'''30'''+delim+'''MMSI number of transmitter broadcasting the message
Spare'''+delim+'''2'''+delim+'''Reserved for definition by a regional authority.
dac'''+delim+'''10'''+delim+'''Designated Area Code - part 1 of the IAI
fid'''+delim+'''6'''+delim+'''Functional Identifier - part 2 of the IAI
latitude'''+delim+'''24'''+delim+'''Location of the vessel. North South location
longitude'''+delim+'''25'''+delim+'''Location of the vessel. East West location
day'''+delim+'''5'''+delim+'''Day 0..31
hour'''+delim+'''5'''+delim+'''Hour 0..23
min'''+delim+'''6'''+delim+'''Min
avewind'''+delim+'''7'''+delim+'''Average wind speed values for the last 10 minutes.
windgust'''+delim+'''7'''+delim+'''Wind gust is the max wind speed value reading during the last 10 minutes.
winddir'''+delim+'''9'''+delim+'''Wind direction
windgustdir'''+delim+'''9'''+delim+'''Wind direction for the gust.
airtemp'''+delim+'''11'''+delim+'''Dry bulb temperature
relhumid'''+delim+'''7'''+delim+'''Relative humidity
dewpoint'''+delim+'''10'''+delim+'''Dew Point
airpressure'''+delim+'''9'''+delim+'''Air pressure
airpressuretrend'''+delim+'''2'''+delim+'''Air pressure trend
horizvis'''+delim+'''8'''+delim+'''Horizontal visibility
waterlevel'''+delim+'''9'''+delim+'''Water level (incl. tide)
waterleveltrend'''+delim+'''2'''+delim+'''Water level trend
surfcurspeed'''+delim+'''8'''+delim+'''Surface current speed
surfcurdir'''+delim+'''9'''+delim+'''Surface current direction
curspeed2'''+delim+'''8'''+delim+'''Level 2 current speed
curdir2'''+delim+'''9'''+delim+'''Level 2 current direction
curlevel2'''+delim+'''5'''+delim+'''Measuring level below sea surface for level 2
curspeed3'''+delim+'''8'''+delim+'''Level 3 current speed
curdir3'''+delim+'''9'''+delim+'''Level 3 current direction
curlevel3'''+delim+'''5'''+delim+'''Measuring level below sea surface for level 3
sigwaveheight'''+delim+'''8'''+delim+'''Significant wave height
waveperiod'''+delim+'''6'''+delim+'''Wave period
wavedir'''+delim+'''9'''+delim+'''Wave direction
swellheight'''+delim+'''8'''+delim+'''Swell height
swellperiod'''+delim+'''6'''+delim+'''Swell period
swelldir'''+delim+'''9'''+delim+'''Swell direction
seastate'''+delim+'''4'''+delim+'''Sea state according to the Beaufort scale
watertemp'''+delim+'''10'''+delim+'''Water temperature
preciptype'''+delim+'''3'''+delim+'''According to WMO
salinity'''+delim+'''9'''+delim+'''Salinity
ice'''+delim+'''2'''+delim+'''Yes or no for the presence of ice
Spare2'''+delim+'''6'''+delim+'''Must be zero
Total bits'''+delim+'''352'''+delim+'''Appears to take 2 slots with 72 pad bits to fill the last slot''')
######################################################################
# UNIT TESTING
######################################################################
import unittest
def testParams():
'''Return a params file base on the testvalue tags.
@rtype: dict
@return: params based on testvalue tags
'''
params = {}
params['MessageID'] = 8
params['RepeatIndicator'] = 1
params['UserID'] = 1193046
params['Spare'] = 0
params['dac'] = 1
params['fid'] = 11
params['latitude'] = Decimal('37.42446')
params['longitude'] = Decimal('-122.16328')
params['day'] = 3
params['hour'] = 21
params['min'] = 58
params['avewind'] = 23
params['windgust'] = 35
params['winddir'] = 329
params['windgustdir'] = 293
params['airtemp'] = Decimal('-40.1')
params['relhumid'] = 99
params['dewpoint'] = Decimal('-19.2')
params['airpressure'] = Decimal('1150')
params['airpressuretrend'] = 2
params['horizvis'] = Decimal('11.9')
params['waterlevel'] = Decimal('-8.9')
params['waterleveltrend'] = 0
params['surfcurspeed'] = Decimal('22.3')
params['surfcurdir'] = 321
params['curspeed2'] = Decimal('12.7')
params['curdir2'] = 122
params['curlevel2'] = 29
params['curspeed3'] = Decimal('19.2')
params['curdir3'] = 93
params['curlevel3'] = 28
params['sigwaveheight'] = Decimal('22.8')
params['waveperiod'] = 2
params['wavedir'] = 187
params['swellheight'] = Decimal('0.2')
params['swellperiod'] = 59
params['swelldir'] = 1
params['seastate'] = 12
params['watertemp'] = Decimal('48.8')
params['preciptype'] = 2
params['salinity'] = Decimal('0.9')
params['ice'] = 1
params['Spare2'] = 0
return params
class Testimo_met_hydro(unittest.TestCase):
'''Use testvalue tag text from each type to build test case the imo_met_hydro message'''
def testEncodeDecode(self):
params = testParams()
bits = encode(params)
r = decode(bits)
# Check that each parameter came through ok.
self.failUnlessEqual(r['MessageID'],params['MessageID'])
self.failUnlessEqual(r['RepeatIndicator'],params['RepeatIndicator'])
self.failUnlessEqual(r['UserID'],params['UserID'])
self.failUnlessEqual(r['Spare'],params['Spare'])
self.failUnlessEqual(r['dac'],params['dac'])
self.failUnlessEqual(r['fid'],params['fid'])
self.failUnlessAlmostEqual(r['latitude'],params['latitude'],4)
self.failUnlessAlmostEqual(r['longitude'],params['longitude'],4)
self.failUnlessEqual(r['day'],params['day'])
self.failUnlessEqual(r['hour'],params['hour'])
self.failUnlessEqual(r['min'],params['min'])
self.failUnlessEqual(r['avewind'],params['avewind'])
self.failUnlessEqual(r['windgust'],params['windgust'])
self.failUnlessEqual(r['winddir'],params['winddir'])
self.failUnlessEqual(r['windgustdir'],params['windgustdir'])
self.failUnlessAlmostEqual(r['airtemp'],params['airtemp'],1)
self.failUnlessEqual(r['relhumid'],params['relhumid'])
self.failUnlessAlmostEqual(r['dewpoint'],params['dewpoint'],1)
self.failUnlessAlmostEqual(r['airpressure'],params['airpressure'],0)
self.failUnlessEqual(r['airpressuretrend'],params['airpressuretrend'])
self.failUnlessAlmostEqual(r['horizvis'],params['horizvis'],1)
self.failUnlessAlmostEqual(r['waterlevel'],params['waterlevel'],1)
self.failUnlessEqual(r['waterleveltrend'],params['waterleveltrend'])
self.failUnlessAlmostEqual(r['surfcurspeed'],params['surfcurspeed'],1)
self.failUnlessEqual(r['surfcurdir'],params['surfcurdir'])
self.failUnlessAlmostEqual(r['curspeed2'],params['curspeed2'],1)
self.failUnlessEqual(r['curdir2'],params['curdir2'])
self.failUnlessEqual(r['curlevel2'],params['curlevel2'])
self.failUnlessAlmostEqual(r['curspeed3'],params['curspeed3'],1)
self.failUnlessEqual(r['curdir3'],params['curdir3'])
self.failUnlessEqual(r['curlevel3'],params['curlevel3'])
self.failUnlessAlmostEqual(r['sigwaveheight'],params['sigwaveheight'],1)
self.failUnlessEqual(r['waveperiod'],params['waveperiod'])
self.failUnlessEqual(r['wavedir'],params['wavedir'])
self.failUnlessAlmostEqual(r['swellheight'],params['swellheight'],1)
self.failUnlessEqual(r['swellperiod'],params['swellperiod'])
self.failUnlessEqual(r['swelldir'],params['swelldir'])
self.failUnlessEqual(r['seastate'],params['seastate'])
self.failUnlessAlmostEqual(r['watertemp'],params['watertemp'],1)
self.failUnlessEqual(r['preciptype'],params['preciptype'])
self.failUnlessAlmostEqual(r['salinity'],params['salinity'],1)
self.failUnlessEqual(r['ice'],params['ice'])
self.failUnlessEqual(r['Spare2'],params['Spare2'])
def addMsgOptions(parser):
parser.add_option('-d','--decode',dest='doDecode',default=False,action='store_true',
help='decode a "imo_met_hydro" AIS message')
parser.add_option('-e','--encode',dest='doEncode',default=False,action='store_true',
help='encode a "imo_met_hydro" AIS message')
parser.add_option('--RepeatIndicator-field', dest='RepeatIndicatorField',default=0,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--UserID-field', dest='UserIDField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--latitude-field', dest='latitudeField',default=Decimal('91'),metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--longitude-field', dest='longitudeField',default=Decimal('181'),metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--day-field', dest='dayField',metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--hour-field', dest='hourField',default=31,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--min-field', dest='minField',default=63,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--avewind-field', dest='avewindField',default=127,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--windgust-field', dest='windgustField',default=127,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--winddir-field', dest='winddirField',default=511,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--windgustdir-field', dest='windgustdirField',default=511,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--airtemp-field', dest='airtempField',default=Decimal('102.3'),metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--relhumid-field', dest='relhumidField',default=127,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--dewpoint-field', dest='dewpointField',default=Decimal('51.1'),metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--airpressure-field', dest='airpressureField',default=Decimal('1311'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--airpressuretrend-field', dest='airpressuretrendField',default=3,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--horizvis-field', dest='horizvisField',default=Decimal('25.5'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--waterlevel-field', dest='waterlevelField',metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--waterleveltrend-field', dest='waterleveltrendField',default=3,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--surfcurspeed-field', dest='surfcurspeedField',default=Decimal('25.5'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--surfcurdir-field', dest='surfcurdirField',default=511,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--curspeed2-field', dest='curspeed2Field',default=Decimal('25.5'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--curdir2-field', dest='curdir2Field',default=511,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--curlevel2-field', dest='curlevel2Field',default=31,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--curspeed3-field', dest='curspeed3Field',default=Decimal('25.5'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--curdir3-field', dest='curdir3Field',default=511,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--curlevel3-field', dest='curlevel3Field',default=31,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--sigwaveheight-field', dest='sigwaveheightField',default=Decimal('25.5'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--waveperiod-field', dest='waveperiodField',default=63,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--wavedir-field', dest='wavedirField',default=511,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--swellheight-field', dest='swellheightField',default=Decimal('25.5'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--swellperiod-field', dest='swellperiodField',default=63,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--swelldir-field', dest='swelldirField',default=511,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--seastate-field', dest='seastateField',default=15,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--watertemp-field', dest='watertempField',default=Decimal('92.3'),metavar='udecimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--preciptype-field', dest='preciptypeField',default=7,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
parser.add_option('--salinity-field', dest='salinityField',default=Decimal('92.3'),metavar='decimal',type='string'
,help='Field parameter value [default: %default]')
parser.add_option('--ice-field', dest='iceField',default=3,metavar='uint',type='int'
,help='Field parameter value [default: %default]')
def main():
from optparse import OptionParser
parser = OptionParser(usage="%prog [options]",
version="%prog "+__version__)
parser.add_option('--unit-test',dest='unittest',default=False,action='store_true',
help='run the unit tests')
parser.add_option('-v','--verbose',dest='verbose',default=False,action='store_true',
help='Make the test output verbose')
# FIX: remove nmea from binary messages. No way to build the whole packet?
# FIX: or build the surrounding msg 8 for a broadcast?
typeChoices = ('binary','nmeapayload','nmea') # FIX: what about a USCG type message?
parser.add_option('-t','--type',choices=typeChoices,type='choice',dest='ioType'
,default='nmeapayload'
,help='What kind of string to write for encoding ('+', '.join(typeChoices)+') [default: %default]')
outputChoices = ('std','html','csv','sql' , 'kml','kml-full')
parser.add_option('-T','--output-type',choices=outputChoices,type='choice',dest='outputType'
,default='std'
,help='What kind of string to output ('+', '.join(outputChoices)+') [default: %default]')
parser.add_option('-o','--output',dest='outputFileName',default=None,
help='Name of the python file to write [default: stdout]')
parser.add_option('-f','--fields',dest='fieldList',default=None, action='append',
choices=fieldList,
help='Which fields to include in the output. Currently only for csv output [default: all]')
parser.add_option('-p','--print-csv-field-list',dest='printCsvfieldList',default=False,action='store_true',
help='Print the field name for csv')
parser.add_option('-c','--sql-create',dest='sqlCreate',default=False,action='store_true',
help='Print out an sql create command for the table.')
parser.add_option('--latex-table',dest='latexDefinitionTable',default=False,action='store_true',
help='Print a LaTeX table of the type')
parser.add_option('--text-table',dest='textDefinitionTable',default=False,action='store_true',
help='Print delimited table of the type (for Word table importing)')
parser.add_option('--delimt-text-table',dest='delimTextDefinitionTable',default='\t'
,help='Delimiter for text table [default: \'%default\'](for Word table importing)')
dbChoices = ('sqlite','postgres')
parser.add_option('-D','--db-type',dest='dbType',default='postgres'
,choices=dbChoices,type='choice'
,help='What kind of database ('+', '.join(dbChoices)+') [default: %default]')
addMsgOptions(parser)
options, args = parser.parse_args()
if options.unittest:
sys.argv = [sys.argv[0]]
if options.verbose: sys.argv.append('-v')
unittest.main()
outfile = sys.stdout
if None!=options.outputFileName:
outfile = file(options.outputFileName,'w')
if options.doEncode:
# First make sure all non required options are specified
if None==options.RepeatIndicatorField: parser.error("missing value for RepeatIndicatorField")
if None==options.UserIDField: parser.error("missing value for UserIDField")
if None==options.latitudeField: parser.error("missing value for latitudeField")
if None==options.longitudeField: parser.error("missing value for longitudeField")
if None==options.dayField: parser.error("missing value for dayField")
if None==options.hourField: parser.error("missing value for hourField")
if None==options.minField: parser.error("missing value for minField")
if None==options.avewindField: parser.error("missing value for avewindField")
if None==options.windgustField: parser.error("missing value for windgustField")
if None==options.winddirField: parser.error("missing value for winddirField")
if None==options.windgustdirField: parser.error("missing value for windgustdirField")
if None==options.airtempField: parser.error("missing value for airtempField")
if None==options.relhumidField: parser.error("missing value for relhumidField")
if None==options.dewpointField: parser.error("missing value for dewpointField")
if None==options.airpressureField: parser.error("missing value for airpressureField")
if None==options.airpressuretrendField: parser.error("missing value for airpressuretrendField")
if None==options.horizvisField: parser.error("missing value for horizvisField")
if None==options.waterlevelField: parser.error("missing value for waterlevelField")
if None==options.waterleveltrendField: parser.error("missing value for waterleveltrendField")
if None==options.surfcurspeedField: parser.error("missing value for surfcurspeedField")
if None==options.surfcurdirField: parser.error("missing value for surfcurdirField")
if None==options.curspeed2Field: parser.error("missing value for curspeed2Field")
if None==options.curdir2Field: parser.error("missing value for curdir2Field")
if None==options.curlevel2Field: parser.error("missing value for curlevel2Field")
if None==options.curspeed3Field: parser.error("missing value for curspeed3Field")
if None==options.curdir3Field: parser.error("missing value for curdir3Field")
if None==options.curlevel3Field: parser.error("missing value for curlevel3Field")
if None==options.sigwaveheightField: parser.error("missing value for sigwaveheightField")
if None==options.waveperiodField: parser.error("missing value for waveperiodField")
if None==options.wavedirField: parser.error("missing value for wavedirField")
if None==options.swellheightField: parser.error("missing value for swellheightField")
if None==options.swellperiodField: parser.error("missing value for swellperiodField")
if None==options.swelldirField: parser.error("missing value for swelldirField")
if None==options.seastateField: parser.error("missing value for seastateField")
if None==options.watertempField: parser.error("missing value for watertempField")
if None==options.preciptypeField: parser.error("missing value for preciptypeField")
if None==options.salinityField: parser.error("missing value for salinityField")
if None==options.iceField: parser.error("missing value for iceField")
msgDict={
'MessageID': '8',
'RepeatIndicator': options.RepeatIndicatorField,
'UserID': options.UserIDField,
'Spare': '0',
'dac': '1',
'fid': '11',
'latitude': options.latitudeField,
'longitude': options.longitudeField,
'day': options.dayField,
'hour': options.hourField,
'min': options.minField,
'avewind': options.avewindField,
'windgust': options.windgustField,
'winddir': options.winddirField,
'windgustdir': options.windgustdirField,
'airtemp': options.airtempField,
'relhumid': options.relhumidField,
'dewpoint': options.dewpointField,
'airpressure': options.airpressureField,
'airpressuretrend': options.airpressuretrendField,
'horizvis': options.horizvisField,
'waterlevel': options.waterlevelField,
'waterleveltrend': options.waterleveltrendField,
'surfcurspeed': options.surfcurspeedField,
'surfcurdir': options.surfcurdirField,
'curspeed2': options.curspeed2Field,
'curdir2': options.curdir2Field,
'curlevel2': options.curlevel2Field,
'curspeed3': options.curspeed3Field,
'curdir3': options.curdir3Field,
'curlevel3': options.curlevel3Field,
'sigwaveheight': options.sigwaveheightField,
'waveperiod': options.waveperiodField,
'wavedir': options.wavedirField,
'swellheight': options.swellheightField,
'swellperiod': options.swellperiodField,
'swelldir': options.swelldirField,
'seastate': options.seastateField,
'watertemp': options.watertempField,
'preciptype': options.preciptypeField,
'salinity': options.salinityField,
'ice': options.iceField,
'Spare2': '0',
}
bits = encode(msgDict)
if 'binary'==options.ioType: print str(bits)
elif 'nmeapayload'==options.ioType:
# FIX: figure out if this might be necessary at compile time
#print "bitLen",len(bits)
bitLen=len(bits)
if bitLen%6!=0:
bits = bits + BitVector(size=(6 - (bitLen%6))) # Pad out to multiple of 6
#print "result:",binary.bitvectoais6(bits)[0]
print binary.bitvectoais6(bits)[0]
# FIX: Do not emit this option for the binary message payloads. Does not make sense.
elif 'nmea'==options.ioType:
nmea = uscg.create_nmea(bits)
print nmea
else: sys.exit('ERROR: unknown ioType. Help!')
if options.sqlCreate:
sqlCreateStr(outfile,options.fieldList,dbType=options.dbType)
if options.latexDefinitionTable:
latexDefinitionTable(outfile)
# For conversion to word tables
if options.textDefinitionTable:
textDefinitionTable(outfile,options.delimTextDefinitionTable)
if options.printCsvfieldList:
# Make a csv separated list of fields that will be displayed for csv
if None == options.fieldList: options.fieldList = fieldList
import StringIO
buf = StringIO.StringIO()
for field in options.fieldList:
buf.write(field+',')
result = buf.getvalue()
if result[-1] == ',': print result[:-1]
else: print result
if options.doDecode:
if len(args)==0: args = sys.stdin
for msg in args:
bv = None
if msg[0] in ('$','!') and msg[3:6] in ('VDM','VDO'):
# Found nmea
# FIX: do checksum
bv = binary.ais6tobitvec(msg.split(',')[5])
else: # either binary or nmeapayload... expect mostly nmeapayloads
# assumes that an all 0 and 1 string can not be a nmeapayload
binaryMsg=True
for c in msg:
if c not in ('0','1'):
binaryMsg=False
break
if binaryMsg:
bv = BitVector(bitstring=msg)
else: # nmeapayload
bv = binary.ais6tobitvec(msg)
printFields(decode(bv)
,out=outfile
,format=options.outputType
,fieldList=options.fieldList
,dbType=options.dbType
)
############################################################
if __name__=='__main__':
main()
| 40.10943 | 448 | 0.677119 |
cddb773d747456bda545807b94273a567fea14a6 | 263 | py | Python | setup.py | patel-zeel/nsgpytorch | 9b8fe76597547e0a29d7ce68ce1c06ac2aa2862a | [
"MIT"
] | null | null | null | setup.py | patel-zeel/nsgpytorch | 9b8fe76597547e0a29d7ce68ce1c06ac2aa2862a | [
"MIT"
] | 3 | 2021-09-22T08:31:49.000Z | 2021-10-13T14:23:14.000Z | setup.py | patel-zeel/regdata | 4b823368750a1afd24b1b8d63e3b2ba58d983c79 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
with open('requirements.txt') as f:
requirements = f.read().strip().split('\n')
setup(
packages=find_packages(),
python_requires=">=3.6",
install_requires=requirements,
include_package_data=True,
) | 23.909091 | 47 | 0.711027 |
1516bff91669d113f4a9159f7fbb3840595d4d6b | 2,832 | py | Python | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/applications/__init__.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | null | null | null | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/applications/__init__.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | null | null | null | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/applications/__init__.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 1 | 2021-01-28T01:57:41.000Z | 2021-01-28T01:57:41.000Z | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Keras Applications are canned architectures with pre-trained weights.
"""
from __future__ import print_function as _print_function
import sys as _sys
from . import densenet
from . import efficientnet
from . import imagenet_utils
from . import inception_resnet_v2
from . import inception_v3
from . import mobilenet
from . import mobilenet_v2
from . import nasnet
from . import resnet
from . import resnet50
from . import resnet_v2
from . import vgg16
from . import vgg19
from . import xception
from tensorflow.python.keras.applications.densenet import DenseNet121
from tensorflow.python.keras.applications.densenet import DenseNet169
from tensorflow.python.keras.applications.densenet import DenseNet201
from tensorflow.python.keras.applications.efficientnet import EfficientNetB0
from tensorflow.python.keras.applications.efficientnet import EfficientNetB1
from tensorflow.python.keras.applications.efficientnet import EfficientNetB2
from tensorflow.python.keras.applications.efficientnet import EfficientNetB3
from tensorflow.python.keras.applications.efficientnet import EfficientNetB4
from tensorflow.python.keras.applications.efficientnet import EfficientNetB5
from tensorflow.python.keras.applications.efficientnet import EfficientNetB6
from tensorflow.python.keras.applications.efficientnet import EfficientNetB7
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.python.keras.applications.nasnet import NASNetLarge
from tensorflow.python.keras.applications.nasnet import NASNetMobile
from tensorflow.python.keras.applications.resnet import ResNet101
from tensorflow.python.keras.applications.resnet import ResNet152
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.applications.resnet_v2 import ResNet101V2
from tensorflow.python.keras.applications.resnet_v2 import ResNet152V2
from tensorflow.python.keras.applications.resnet_v2 import ResNet50V2
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.vgg19 import VGG19
from tensorflow.python.keras.applications.xception import Xception
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "keras.applications", public_apis=None, deprecation=True,
has_lite=False)
| 48 | 87 | 0.858404 |
2b31d691ec9b91bc760bcf9cfd6e173bfe5b5261 | 21,087 | py | Python | tests/test_nfw.py | msimet/OffsetNFW | c07c12596eb55ed4de10d39bf98234f2d98ad44d | [
"BSD-3-Clause"
] | 1 | 2019-03-25T19:22:35.000Z | 2019-03-25T19:22:35.000Z | tests/test_nfw.py | msimet/OffsetNFW | c07c12596eb55ed4de10d39bf98234f2d98ad44d | [
"BSD-3-Clause"
] | 1 | 2016-02-22T21:55:33.000Z | 2016-03-02T23:52:39.000Z | tests/test_nfw.py | msimet/OffsetNFW | c07c12596eb55ed4de10d39bf98234f2d98ad44d | [
"BSD-3-Clause"
] | 1 | 2019-03-27T19:17:09.000Z | 2019-03-27T19:17:09.000Z | import numpy
import numpy.testing
import astropy.cosmology
import astropy.units as u
try:
import offset_nfw
except ImportError:
import sys
sys.path.append('..')
import offset_nfw
# A "cosmology" object that passes initialization tests.
class fake_cosmo(object):
critical_density0 = 1
Om0 = 1
def critical_density(self, x):
return 1
def angular_diameter_distance(self, x):
return 1
def angular_diameter_distance_z1z2(self, x, y):
return 1
def Om(self, x):
return 1
m_c_z_test_list = [(1E14, 4, 0.2), (1E13, 4, 0.2), (1E15, 4, 0.2),
(1E14, 2, 0.2), (1E14, 6, 0.2),
(1E14, 4, 0.05), (1E14, 4, 0.5), (1E14, 4, 4)]
m_c_z_multi_test_list = [([1E14, 1E15], 4, 0.2),
(1E14, [2,4,6], 0.2),
(1E14, 4, [0.2,0.5])]
cosmo = astropy.cosmology.FlatLambdaCDM(H0=100, Om0=0.3)
def test_object_creation():
# Need a cosmology object
numpy.testing.assert_raises(TypeError, offset_nfw.NFWModel)
# Need a REAL cosmology object
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, None)
cosmology_obj = fake_cosmo()
offset_nfw.NFWModel(cosmology_obj) # This should pass
# Existing directory
offset_nfw.NFWModel(cosmology_obj, dir='.')
# Non-existing directory
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, dir='_random_dir')
# Wrong rho type
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, rho='rho_dm')
# Nonsensical delta
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, delta=-200)
# Non-working ranges
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, x_range=3)
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, x_range=(3,4,5))
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, x_range=('a', 'b'))
# Should work
offset_nfw.NFWModel(cosmology_obj, x_range=[3,4])
# Non-working ranges
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, miscentering_range=3)
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, miscentering_range=(3,4,5))
numpy.testing.assert_raises(RuntimeError, offset_nfw.NFWModel, cosmology_obj, miscentering_range=('a', 'b'))
# Should work
offset_nfw.NFWModel(cosmology_obj, miscentering_range=[3,4])
obj = offset_nfw.NFWModel(cosmology_obj, '.', 'rho_m', delta=150, precision=0.02, x_range=(0.1,2),
miscentering_range=(0.1,2), comoving=False)
numpy.testing.assert_equal(obj.cosmology, cosmology_obj)
numpy.testing.assert_equal(obj.dir, '.')
numpy.testing.assert_equal(obj.rho, 'rho_m')
numpy.testing.assert_equal(obj.delta, 150)
numpy.testing.assert_equal(obj.precision, 0.02)
numpy.testing.assert_equal(obj.x_range, (0.1,2))
numpy.testing.assert_equal(obj.miscentering_range, (0.1,2))
numpy.testing.assert_equal(obj.comoving, False)
# Should work
offset_nfw.NFWModel(astropy.cosmology.FlatLambdaCDM(H0=100, Om0=0.3))
def test_scale_radii():
""" Test scale radius measurement. """
# Test against some precomputed values
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c')
numpy.testing.assert_allclose(nfw_1.scale_radius(1E14, 4, 0.2).to(u.Mpc).value, 0.2120377818122246)
numpy.testing.assert_allclose(nfw_1.scale_radius(1E15, 3, 0.2).to(u.Mpc).value, 0.609095398969911)
numpy.testing.assert_allclose(nfw_1.scale_radius(1E13, 5, 0.2).to(u.Mpc).value, 0.07873537663340793)
numpy.testing.assert_allclose(nfw_1.scale_radius(1E14, 4, 0.1).to(u.Mpc).value, 0.20114937491773577)
numpy.testing.assert_allclose(nfw_1.scale_radius(1E14, 4.5, 0.3).to(u.Mpc).value, 0.1968790019866928)
nfw_2 = offset_nfw.NFWModel(cosmo, delta=150, rho='rho_c')
numpy.testing.assert_allclose(nfw_2.scale_radius(1E14, 4, 0.2).to(u.Mpc).value, 0.23337777629652395)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E15, 3, 0.2).to(u.Mpc).value, 0.6703962310354946)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E13, 5, 0.2).to(u.Mpc).value, 0.08665949510284233)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E14, 4, 0.1).to(u.Mpc).value, 0.22139353383402788)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E14, 4.5, 0.3).to(u.Mpc).value, 0.21669338025721746)
nfw_3 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_m')
# These were computed using separate code, hence almost_equal instead of equal
numpy.testing.assert_almost_equal(nfw_3.scale_radius(1E14, 4, 0.2).to(u.Mpc).value, 0.281924022285, decimal=4)
numpy.testing.assert_almost_equal(nfw_3.scale_radius(1E15, 3, 0.2).to(u.Mpc).value, 0.809849191419, decimal=4)
numpy.testing.assert_almost_equal(nfw_3.scale_radius(1E13, 5, 0.2).to(u.Mpc).value, 0.104686031501, decimal=4)
numpy.testing.assert_almost_equal(nfw_3.scale_radius(1E14, 4, 0.1).to(u.Mpc).value, 0.281924022285, decimal=4)
numpy.testing.assert_almost_equal(nfw_3.scale_radius(1E14, 4.5, 0.3).to(u.Mpc).value, 0.25059913092, decimal=4)
nfw_4 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_m', comoving=False)
numpy.testing.assert_allclose(nfw_3.scale_radius(1E14, 4, 0.2).to(u.Mpc).value,
1.2*nfw_4.scale_radius(1E14, 4, 0.2).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_3.scale_radius(1E15, 3, 0.2).to(u.Mpc).value,
1.2*nfw_4.scale_radius(1E15, 3, 0.2).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_3.scale_radius(1E13, 5, 0.2).to(u.Mpc).value,
1.2*nfw_4.scale_radius(1E13, 5, 0.2).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_3.scale_radius(1E14, 4, 0.1).to(u.Mpc).value,
1.1*nfw_4.scale_radius(1E14, 4, 0.1).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_3.scale_radius(1E14, 4.5, 0.3).to(u.Mpc).value,
1.3*nfw_4.scale_radius(1E14, 4.5, 0.3).to(u.Mpc).value)
nfw_5 = offset_nfw.NFWModel(cosmo, delta=150, rho='rho_c', comoving=False)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E14, 4, 0.2).to(u.Mpc).value,
1.2*nfw_5.scale_radius(1E14, 4, 0.2).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E15, 3, 0.2).to(u.Mpc).value,
1.2*nfw_5.scale_radius(1E15, 3, 0.2).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E13, 5, 0.2).to(u.Mpc).value,
1.2*nfw_5.scale_radius(1E13, 5, 0.2).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E14, 4, 0.1).to(u.Mpc).value,
1.1*nfw_5.scale_radius(1E14, 4, 0.1).to(u.Mpc).value)
numpy.testing.assert_allclose(nfw_2.scale_radius(1E14, 4.5, 0.3).to(u.Mpc).value,
1.3*nfw_5.scale_radius(1E14, 4.5, 0.3).to(u.Mpc).value)
nfw_6 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c', comoving=False)
try:
import galsim.nfw_halo
# There is a hard-coded constant in galsim.nfw_halo that is 3 decimals, so we cannot go
# more precise than that
for m, c, z in m_c_z_test_list:
nfw_comp = galsim.nfw_halo.NFWHalo(m, c, z, omega_m=cosmo.Om0)
numpy.testing.assert_almost_equal(nfw_6.scale_radius(m, c, z).to(u.Mpc).value, nfw_comp.rs, decimal=3)
except ImportError:
pass
def test_against_colossus():
try:
import colossus.Cosmology, colossus.HaloDensityProfile
params = {'flat': True, 'H0': 100, 'Om0': 0.3, 'Ob0': 0.043, 'sigma8': 0.8, 'ns': 0.97}
colossus.Cosmology.setCosmology('myCosmo', params)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E14, c=4, z=0.2, mdef='200m')
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_m', comoving=False)
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.2, mdef='200m')/4,
nfw_1.scale_radius(1.E14, 4, 0.2).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E15, c=4, z=0.2, mdef='200m')
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.2, mdef='200m')/4,
nfw_1.scale_radius(1.E15, 4, 0.2).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E13, c=3.5, z=0.2, mdef='200m')
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.2, mdef='200m')/3.5,
nfw_1.scale_radius(1.E13, 3.5, 0.2).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E14, c=4, z=0.4, mdef='200m')
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.4, mdef='200m')/4,
nfw_1.scale_radius(1.E14, 4, 0.4).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E14, c=4, z=0.4, mdef='180m')
nfw_2 = offset_nfw.NFWModel(cosmo, delta=180, rho='rho_m', comoving=False)
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.4, mdef='180m')/4,
nfw_2.scale_radius(1.E14, 4, 0.4).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E14, c=4, z=0.2, mdef='200c')
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c', comoving=False)
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.2, mdef='200c')/4,
nfw_1.scale_radius(1.E14, 4, 0.2).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E15, c=4, z=0.2, mdef='200c')
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.2, mdef='200c')/4,
nfw_1.scale_radius(1.E15, 4, 0.2).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E13, c=3.5, z=0.2, mdef='200c')
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.2, mdef='200c')/3.5,
nfw_1.scale_radius(1.E13, 3.5, 0.2).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E14, c=4, z=0.4, mdef='200c')
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.4, mdef='200c')/4,
nfw_1.scale_radius(1.E14, 4, 0.4).to(u.Mpc).value, decimal=4)
colossus_nfw_1 = colossus.HaloDensityProfile.NFWProfile(M=1E14, c=4, z=0.4, mdef='180c')
nfw_2 = offset_nfw.NFWModel(cosmo, delta=180, rho='rho_c', comoving=False)
numpy.testing.assert_almost_equal(
0.001*colossus_nfw_1.RDelta(z=0.4, mdef='180c')/4,
nfw_2.scale_radius(1.E14, 4, 0.4).to(u.Mpc).value, decimal=4)
except ImportError:
pass
def test_against_galsim_theory():
""" Test against the GalSim implementation of NFWs. """
try:
import galsim
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c', comoving=False)
z_source = 4.95
radbins = numpy.exp(numpy.linspace(numpy.log(0.01), numpy.log(20), num=100))
for m, c, z in [(1E14, 4, 0.2), (1E13, 4, 0.2), (1E15, 4, 0.2),
(1E14, 2, 0.2), (1E14, 6, 0.2),
(1E14, 4, 0.05), (1E14, 4, 0.5), (1E14, 4, 4)]:
galsim_nfw = galsim.NFWHalo(m, c, z, omega_m = cosmo.Om0)
angular_pos_x = radbins/cosmo.angular_diameter_distance(z)*206265
angular_pos_y = numpy.zeros_like(angular_pos_x)
# want tangential shear; galsim gives us 2-component, but all along x-axis, so just use
# first component with negative sign
nfw_1.gamma_theory(radbins, m, c, z, z_source)
numpy.testing.assert_almost_equal(-galsim_nfw.getShear((angular_pos_x, angular_pos_y), z_source, reduced=False)[0],
nfw_1.gamma_theory(radbins, m, c, z, z_source), decimal=3)
numpy.testing.assert_almost_equal(galsim_nfw.getConvergence((angular_pos_x, angular_pos_y), z_source),
nfw_1.kappa_theory(radbins, m, c, z, z_source), decimal=3)
# Really, we should test reduced shear too. However, the combo of the fact that
# there's a large scale dependence & we disagree most at large radii means both
# assert_almost_equal and assert_allclose fail for some range of radii; therefore,
# we can't pass the test, although the individual pieces of reduced shear do pass.
except ImportError:
import warnings
warnings.warn("Could not test against GalSim -- import failure")
return True
def test_against_clusterlensing_theory():
""" Test against the cluster-lensing implementation of NFWs. """
try:
import clusterlensing
except ImportError:
import warnings
warnings.warn("Could not test against cluster-lensing -- import failure")
def test_sigma_to_deltasigma_theory():
""" Test that the numerical sigma -> deltasigma produces the theoretical DS. """
radbins = numpy.exp(numpy.linspace(numpy.log(0.001), numpy.log(100), num=500))
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c')
for m, c, z in m_c_z_test_list:
ds = nfw_1.deltasigma_theory(radbins, m, c, z)
sig = nfw_1.sigma_theory(radbins, m, c, z)
ds_from_sigma = nfw_1.sigma_to_deltasigma(radbins, sig)
import matplotlib.pyplot as plt
n_to_keep=int(len(radbins)*0.6)
numpy.testing.assert_almost_equal(ds.value[-n_to_keep:], ds_from_sigma.value[-n_to_keep:], decimal=3)
numpy.testing.assert_equal(ds.unit, ds_from_sigma.unit)
import matplotlib.pyplot as plt
plt.plot(radbins, ds_from_sigma/ds, label="ds")
# plt.plot(radbins, ds_from_sigma, label="ds from sigma")
plt.xscale('log')
plt.ylim((0., 2))
plt.savefig('test.png')
for m, c, z in m_c_z_multi_test_list:
ds = nfw_1.deltasigma_theory(radbins, m, c, z)
sig = nfw_1.sigma_theory(radbins, m, c, z)
ds_from_sigma = nfw_1.sigma_to_deltasigma(radbins, sig)
import matplotlib.pyplot as plt
n_to_keep=int(len(radbins)*0.6)
numpy.testing.assert_almost_equal(ds.value[:,-n_to_keep:], ds_from_sigma.value[:,-n_to_keep:], decimal=3)
numpy.testing.assert_equal(ds.unit, ds_from_sigma.unit)
import matplotlib.pyplot as plt
plt.plot(radbins, ds_from_sigma[0]/ds[0], label="ds")
plt.plot(radbins, ds_from_sigma[1]/ds[1], label="ds")
# plt.plot(radbins, ds_from_sigma, label="ds from sigma")
plt.xscale('log')
plt.ylim((0., 2))
plt.savefig('test.png')
#TODO: do again, miscentered
def test_z_ratios_theory():
""" Test that the theoretical shear changes properly with redshift"""
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c')
base = nfw_1.gamma_theory(1., 1.E14, 4, 0.1, 0.15)
new_z = numpy.linspace(0.15, 1.1, num=20)
new_gamma = nfw_1.gamma_theory(1, 1.E14, 4, 0.1, new_z)
new_gamma /= base
numpy.testing.assert_allclose(new_gamma, cosmo.angular_diameter_distance_z1z2(0.1, new_z)/cosmo.angular_diameter_distance_z1z2(0.1, 0.15)*cosmo.angular_diameter_distance(0.15)/cosmo.angular_diameter_distance(new_z))
base = nfw_1.kappa_theory(1., 1.E14, 4, 0.1, 0.15)
new_sigma = nfw_1.kappa_theory(1, 1.E14, 4, 0.1, new_z)
new_sigma /= base
numpy.testing.assert_allclose(new_sigma, cosmo.angular_diameter_distance_z1z2(0.1, new_z)/cosmo.angular_diameter_distance_z1z2(0.1, 0.15)*cosmo.angular_diameter_distance(0.15)/cosmo.angular_diameter_distance(new_z))
#TODO: do again, miscentered
def test_g():
""" Test that the theoretical returned g is the appropriate reduced shear """
radbins = numpy.exp(numpy.linspace(numpy.log(0.001), numpy.log(100), num=500))
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c')
z_source = 4.95
for m, c, z in m_c_z_test_list+m_c_z_multi_test_list:
numpy.testing.assert_allclose(nfw_1.g_theory(radbins, m, c, z, z_source),
nfw_1.gamma_theory(radbins, m, c, z, z_source)
/(1-nfw_1.kappa_theory(radbins, m, c, z, z_source)))
def test_Upsilon():
""" Test that the theoretical Upsilon is the appropriate value. """
radbins = numpy.exp(numpy.linspace(numpy.log(0.001), numpy.log(100), num=500))
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c')
for m, c, z in m_c_z_test_list:
for r in radbins[100:400:4]:
numpy.testing.assert_allclose(nfw_1.Upsilon_theory(radbins, m, c, z, r).value,
nfw_1.deltasigma_theory(radbins, m, c, z).value
- (r/radbins)**2*nfw_1.deltasigma_theory(r, m, c, z).value)
for m, c, z in m_c_z_multi_test_list:
for r in radbins[100:400:4]:
numpy.testing.assert_allclose(nfw_1.Upsilon_theory(radbins, m, c, z, r).value,
nfw_1.deltasigma_theory(radbins, m, c, z).value
- (r/radbins)**2*nfw_1.deltasigma_theory(r, m, c, z).value[:, numpy.newaxis])
def test_ordering():
""" Test that the axes are set up properly for multidimensional inputs."""
radbins = numpy.exp(numpy.linspace(numpy.log(0.001), numpy.log(100), num=10))
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c')
m, c, z = m_c_z_test_list[0]
zs = [z+0.1, z+0.1, z+1]
base_result = nfw_1.deltasigma_theory(radbins, m, c, z)
comp_m = nfw_1.deltasigma_theory(radbins, [m,m], c, z)
numpy.testing.assert_equal(comp_m[0], comp_m[1])
numpy.testing.assert_equal(base_result, comp_m[0])
comp_c = nfw_1.deltasigma_theory(radbins, m, [c,c], z)
numpy.testing.assert_equal(comp_c[0], comp_c[1])
numpy.testing.assert_equal(base_result, comp_c[0])
comp_z = nfw_1.deltasigma_theory(radbins, m, c, [z,z])
numpy.testing.assert_equal(comp_z[0], comp_z[1])
numpy.testing.assert_equal(base_result, comp_z[0])
sub_base_result = nfw_1.g_theory(radbins, m, c, z, zs[0])
base_result = nfw_1.g_theory(radbins, m, c, z, zs)
numpy.testing.assert_equal(base_result[0], sub_base_result)
numpy.testing.assert_equal(base_result[0], base_result[1])
# There's no "assert not equal", so let's try this
numpy.testing.assert_raises(AssertionError,
numpy.testing.assert_equal, base_result[0], base_result[2])
comp_m = nfw_1.g_theory(radbins, [m,m], c, z, zs)
numpy.testing.assert_equal(comp_m[:,0], comp_m[:,1])
numpy.testing.assert_equal(base_result, comp_m[:,0])
numpy.testing.assert_equal(comp_m[0,0], comp_m[0,1])
numpy.testing.assert_equal(comp_m[1,0], comp_m[1,1])
numpy.testing.assert_equal(comp_m[2,0], comp_m[2,1])
numpy.testing.assert_equal(sub_base_result, comp_m[0,0])
numpy.testing.assert_raises(AssertionError,
numpy.testing.assert_equal, comp_m[1,0], comp_m[2,0])
comp_c = nfw_1.g_theory(radbins, m, [c,c], z, zs)
numpy.testing.assert_equal(comp_c[:,0], comp_c[:,1])
numpy.testing.assert_equal(base_result, comp_c[:,0])
numpy.testing.assert_equal(comp_c[0,0], comp_c[0,1])
numpy.testing.assert_equal(comp_c[1,0], comp_c[1,1])
numpy.testing.assert_equal(comp_c[2,0], comp_c[2,1])
numpy.testing.assert_equal(sub_base_result, comp_c[0,0])
numpy.testing.assert_raises(AssertionError,
numpy.testing.assert_equal, comp_c[1,0], comp_c[2,0])
comp_z = nfw_1.g_theory(radbins, m, c, [z,z], zs)
numpy.testing.assert_equal(comp_z[:,0], comp_z[:,1])
numpy.testing.assert_equal(base_result, comp_z[:,0])
numpy.testing.assert_equal(comp_z[0,0], comp_z[0,1])
numpy.testing.assert_equal(comp_z[1,0], comp_z[1,1])
numpy.testing.assert_equal(comp_z[2,0], comp_z[2,1])
numpy.testing.assert_equal(sub_base_result, comp_z[0,0])
numpy.testing.assert_raises(AssertionError,
numpy.testing.assert_equal, comp_z[1,0], comp_z[2,0])
def setup_table():
""" Generate a small interpolation table so we can test its outputs. """
nfw_halo = offset_nfw.NFWModel(cosmo, generate=True, x_range=(0.1, 2), miscentering_range=(0, 0.2))
if __name__=='__main__':
#setup_table()
test_object_creation()
test_scale_radii()
test_against_colossus()
test_z_ratios_theory()
test_against_galsim_theory()
test_against_clusterlensing_theory()
test_sigma_to_deltasigma_theory()
test_g()
test_Upsilon()
test_ordering()
| 56.382353 | 219 | 0.657846 |
365ecd1e4a8b856360577f4f1d63535b70ffe3e4 | 31,386 | py | Python | dask/base.py | dubielt1/dask | fe2a42477caab379b5668097379be4d3cee7ed44 | [
"BSD-3-Clause"
] | null | null | null | dask/base.py | dubielt1/dask | fe2a42477caab379b5668097379be4d3cee7ed44 | [
"BSD-3-Clause"
] | null | null | null | dask/base.py | dubielt1/dask | fe2a42477caab379b5668097379be4d3cee7ed44 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from functools import partial
from hashlib import md5
from operator import getitem
import inspect
import pickle
import os
import threading
import uuid
import warnings
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import long, unicode, Iterator
from .context import thread_state
from .core import flatten, quote, get as simple_get
from .hashing import hash_buffer_hex
from .utils import Dispatch, ensure_dict
from . import config, local, threaded, sharedict
__all__ = ("DaskMethodsMixin",
"is_dask_collection",
"compute", "persist", "optimize", "visualize",
"tokenize", "normalize_token")
def is_dask_collection(x):
"""Returns ``True`` if ``x`` is a dask collection"""
try:
return x.__dask_graph__() is not None
except (AttributeError, TypeError):
return False
class DaskMethodsMixin(object):
"""A mixin adding standard dask collection methods"""
__slots__ = ()
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def persist(self, **kwargs):
"""Persist this dask collection into memory
This turns a lazy Dask collection into a Dask collection with the same
metadata, but now with the results fully computed or actively computing
in the background.
The action of function differs significantly depending on the active
task scheduler. If the task scheduler supports asynchronous computing,
such as is the case of the dask.distributed scheduler, then persist
will return *immediately* and the return value's task graph will
contain Dask Future objects. However if the task scheduler only
supports blocking computation then the call to persist will *block*
and the return value's task graph will contain concrete Python results.
This function is particularly useful when using distributed systems,
because the results will be kept in distributed memory, rather than
returned to the local process as with compute.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
See Also
--------
dask.base.persist
"""
(result,) = persist(self, traverse=False, **kwargs)
return result
def compute(self, **kwargs):
"""Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask.array turns into a :func:`numpy.array` and a Dask.dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
See Also
--------
dask.base.compute
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
def compute_as_if_collection(cls, dsk, keys, get=None, scheduler=None, **kwargs):
"""Compute a graph as if it were of type cls.
Allows for applying the same optimizations and default scheduler."""
schedule = get_scheduler(get=get, scheduler=scheduler, cls=cls)
dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)
return schedule(dsk2, keys, **kwargs)
def dont_optimize(dsk, keys, **kwargs):
return dsk
def optimization_function(x):
return getattr(x, '__dask_optimize__', dont_optimize)
def collections_to_dsk(collections, optimize_graph=True, **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
optimizations = (kwargs.pop('optimizations', None) or
config.get('optimizations', []))
if optimize_graph:
groups = groupby(optimization_function, collections)
groups = {opt: _extract_graph_and_keys(val)
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: (opt(dsk, keys), keys)
for k, (dsk, keys) in groups.items()}
dsk = merge(*map(ensure_dict, [opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()]))
else:
dsk, _ = _extract_graph_and_keys(collections)
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() v in vals]``."""
graphs = [v.__dask_graph__() for v in vals]
keys = [v.__dask_keys__() for v in vals]
if any(isinstance(graph, sharedict.ShareDict) for graph in graphs):
graph = sharedict.merge(*graphs)
else:
graph = merge(*graphs)
return graph, keys
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop('traverse', True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ is dict:
tsk = (dict, [[_unpack(k), _unpack(v)]
for k, v in expr.items()])
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
def optimize(*args, **kwargs):
"""Optimize several dask collections at once.
Returns equivalent dask collections that all share the same merged and
optimized underlying graph. This can be useful if converting multiple
collections to delayed objects, or to manually apply the optimizations at
strategic points.
Note that in most cases you shouldn't need to call this method directly.
Parameters
----------
*args : objects
Any number of objects. If a dask object, its graph is optimized and
merged with all those of all other dask objects before returning an
equivalent dask collection. Non-dask arguments are passed through
unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``optimize``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimizations : list of callables, optional
Additional optimization passes to perform.
**kwargs
Extra keyword arguments to forward to the optimization passes.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> a2, b2 = optimize(a, b)
>>> a2.compute() == a.compute()
True
>>> b2.compute() == b.compute()
True
"""
collections, repack = unpack_collections(*args, **kwargs)
if not collections:
return args
dsk = collections_to_dsk(collections, **kwargs)
postpersists = [a.__dask_postpersist__() if is_dask_collection(a)
else (None, a) for a in args]
keys, postpersists = [], []
for a in collections:
keys.extend(flatten(a.__dask_keys__()))
postpersists.append(a.__dask_postpersist__())
return repack([r(dsk, *s) for r, s in postpersists])
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> compute({'a': a, 'b': b, 'c': 1}) # doctest: +SKIP
({'a': 45, 'b': 4.5, 'c': 1},)
"""
traverse = kwargs.pop('traverse', True)
optimize_graph = kwargs.pop('optimize_graph', True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(get=kwargs.pop('get', None),
scheduler=kwargs.pop('scheduler', None),
collections=collections)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys = [x.__dask_keys__() for x in collections]
postcomputes = [x.__dask_postcompute__() for x in collections]
results = schedule(dsk, keys, **kwargs)
return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
from dask.dot import dot_graph
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if is_dask_collection(arg)]
dsk = collections_to_dsk(args, optimize_graph=optimize_graph)
for d in dsks:
dsk.update(d)
color = kwargs.get('color')
if color == 'order':
from .order import order
import matplotlib.pyplot as plt
o = order(dsk)
try:
cmap = kwargs.pop('cmap')
except KeyError:
cmap = plt.cm.RdBu
if isinstance(cmap, str):
import matplotlib.pyplot as plt
cmap = getattr(plt.cm, cmap)
mx = max(o.values()) + 1
colors = {k: _colorize(cmap(v / mx, bytes=True)) for k, v in o.items()}
kwargs['function_attributes'] = {k: {'color': v, 'label': str(o[k])}
for k, v in colors.items()}
kwargs['data_attributes'] = {k: {'color': v} for k, v in colors.items()}
elif color:
raise NotImplementedError("Unknown value color=%s" % color)
return dot_graph(dsk, filename=filename, **kwargs)
def persist(*args, **kwargs):
""" Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small :class:`numpy.array`
(in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``persist``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
"""
traverse = kwargs.pop('traverse', True)
optimize_graph = kwargs.pop('optimize_graph', True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(get=kwargs.pop('get', None),
scheduler=kwargs.pop('scheduler', None),
collections=collections)
if inspect.ismethod(schedule):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == schedule:
results = client.persist(collections,
optimize_graph=optimize_graph,
**kwargs)
return repack(results)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postpersists = [], []
for a in collections:
a_keys = list(flatten(a.__dask_keys__()))
rebuild, state = a.__dask_postpersist__()
keys.extend(a_keys)
postpersists.append((rebuild, a_keys, state))
results = schedule(dsk, keys, **kwargs)
d = dict(zip(keys, results))
results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]
return repack(results2)
############
# Tokenize #
############
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
normalize_token = Dispatch()
normalize_token.register((int, long, float, str, unicode, bytes, type(None),
type, slice, complex, type(Ellipsis)),
identity)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@normalize_token.register(object)
def normalize_object(o):
method = getattr(o, '__dask_tokenize__', None)
if method is not None:
return method()
return normalize_function(o) if callable(o) else uuid.uuid4().hex
function_cache = {}
function_cache_lock = threading.Lock()
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
with function_cache_lock:
if len(function_cache) >= 500:
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
args = tuple(normalize_token(i) for i in func.args)
if func.keywords:
kws = tuple((k, normalize_token(v))
for k, v in sorted(func.keywords.items()))
else:
kws = None
return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except Exception:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except Exception:
return str(func)
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
@normalize_token.register(pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = hash_buffer_hex('-'.join(x.flat).encode('utf-8'))
except TypeError:
data = hash_buffer_hex(b'-'.join([unicode(item).encode('utf-8') for item in
x.flat]))
else:
try:
data = hash_buffer_hex(x.ravel(order='K').view('i1'))
except (BufferError, AttributeError, ValueError):
data = hash_buffer_hex(x.copy().ravel(order='K').view('i1'))
return (data, x.dtype, x.shape, x.strides)
@normalize_token.register(np.matrix)
def normalize_matrix(x):
return type(x).__name__, normalize_array(x.view(type=np.ndarray))
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return 'np.' + name
except AttributeError:
return normalize_function(x)
@normalize_token.register_lazy("scipy")
def register_scipy():
import scipy.sparse as sp
def normalize_sparse_matrix(x, attrs):
return type(x).__name__, normalize_seq((normalize_token(getattr(x, key))
for key in attrs))
for cls, attrs in [(sp.dia_matrix, ('data', 'offsets', 'shape')),
(sp.bsr_matrix, ('data', 'indices', 'indptr',
'blocksize', 'shape')),
(sp.coo_matrix, ('data', 'row', 'col', 'shape')),
(sp.csr_matrix, ('data', 'indices', 'indptr', 'shape')),
(sp.csc_matrix, ('data', 'indices', 'indptr', 'shape')),
(sp.lil_matrix, ('data', 'rows', 'shape'))]:
normalize_token.register(cls,
partial(normalize_sparse_matrix, attrs=attrs))
@normalize_token.register(sp.dok_matrix)
def normalize_dok_matrix(x):
return type(x).__name__, normalize_token(sorted(x.items()))
def _colorize(t):
""" Convert (r, g, b) triple to "#RRGGBB" string
For use with ``visualize(color=...)``
Examples
--------
>>> _colorize((255, 255, 255))
'#FFFFFF'
>>> _colorize((0, 32, 128))
'#002080'
"""
t = t[:3]
i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))
h = hex(int(i))[2:].upper()
h = '0' * (6 - len(h)) + h
return "#" + h
named_schedulers = {
'sync': local.get_sync,
'synchronous': local.get_sync,
'single-threaded': local.get_sync,
'threads': threaded.get,
'threading': threaded.get,
}
try:
from dask import multiprocessing as dask_multiprocessing
except ImportError:
pass
else:
named_schedulers.update({
'processes': dask_multiprocessing.get,
'multiprocessing': dask_multiprocessing.get,
})
_warnned_on_get = [False]
def warn_on_get(get):
if _warnned_on_get[0]:
return
else:
if get in named_schedulers.values():
_warnned_on_get[0] = True
warnings.warn(
"The get= keyword has been deprecated. "
"Please use the scheduler= keyword instead with the name of "
"the desired scheduler like 'threads' or 'processes'\n"
" x.compute(scheduler='threads') \n"
"or with a function that takes the graph and keys\n"
" x.compute(scheduler=my_scheduler_function)")
def get_scheduler(get=None, scheduler=None, collections=None, cls=None):
""" Get scheduler function
There are various ways to specify the scheduler to use:
1. Passing in get= parameters (deprecated)
2. Passing in scheduler= parameters
3. Passing these into global confiuration
4. Using defaults of a dask collection
This function centralizes the logic to determine the right scheduler to use
from those many options
"""
if get is not None:
if scheduler is not None:
raise ValueError("Both get= and scheduler= provided. Choose one")
warn_on_get(get)
return get
if scheduler is not None:
if callable(scheduler):
return scheduler
elif "Client" in type(scheduler).__name__ and hasattr(scheduler, 'get'):
return scheduler.get
elif scheduler.lower() in named_schedulers:
return named_schedulers[scheduler.lower()]
elif scheduler.lower() in ('dask.distributed', 'distributed'):
from distributed.worker import get_client
return get_client().get
else:
raise ValueError("Expected one of [distributed, %s]" % ', '.join(sorted(named_schedulers)))
# else: # try to connect to remote scheduler with this name
# return get_client(scheduler).get
if config.get('scheduler', None):
return get_scheduler(scheduler=config.get('scheduler', None))
if config.get('get', None):
warn_on_get(config.get('get', None))
return config.get('get', None)
if getattr(thread_state, 'key', False):
from distributed.worker import get_worker
return get_worker().client.get
if cls is not None:
return cls.__dask_scheduler__
if collections:
collections = [c for c in collections if c is not None]
if collections:
get = collections[0].__dask_scheduler__
if not all(c.__dask_scheduler__ == get for c in collections):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler=` parameter explicitly in compute or "
"globally with `set_options`.")
return get
return None
| 35.068156 | 103 | 0.621201 |
4ecf6d1624e65b5fe4bf313f786863bc9c3dfe56 | 141 | py | Python | oasysapi/common/urls.py | Deplim/Oasys | 0644d9e8a9d26baaa3b9419a0d460da61ab7472d | [
"MIT"
] | null | null | null | oasysapi/common/urls.py | Deplim/Oasys | 0644d9e8a9d26baaa3b9419a0d460da61ab7472d | [
"MIT"
] | 1 | 2022-01-25T10:42:53.000Z | 2022-01-25T10:42:53.000Z | oasysapi/common/urls.py | Deplim/Oasys | 0644d9e8a9d26baaa3b9419a0d460da61ab7472d | [
"MIT"
] | 2 | 2022-01-20T15:07:24.000Z | 2022-01-20T15:10:44.000Z | from django.urls import path
from common import views
urlpatterns = [
path('user', views.login),
path('oauth', views.oauth_init),
]
| 17.625 | 36 | 0.695035 |
2ec038d2e6ab6a4d83b228d0de7fd08c5be6c80d | 27,925 | py | Python | instagram/instagram.py | akaped/InstaDMBK | ea38abdbfc7ef8fdf1afb33e87880f2f58a1f378 | [
"MIT"
] | 1 | 2019-05-04T01:02:59.000Z | 2019-05-04T01:02:59.000Z | instagram/instagram.py | akaped/InstaDMBK | ea38abdbfc7ef8fdf1afb33e87880f2f58a1f378 | [
"MIT"
] | null | null | null | instagram/instagram.py | akaped/InstaDMBK | ea38abdbfc7ef8fdf1afb33e87880f2f58a1f378 | [
"MIT"
] | null | null | null | import utils
import urllib
import hashlib
import hmac
import requests
import json
import logging
class Instagram:
API_URL = 'https://i.instagram.com/api/v1/'
USER_AGENT = 'Instagram 64.0.0.12.96 (iPhone11,6; iOS 12_0; en_US; en-US; scale=3.00; gamut=wide; 1125x2436; 124976489)' #'Instagram 27.0.0.7.97 Android (18/4.3; 320dpi; 720x1280; Xiaomi; HM 1SW; armani; qcom; en_US)'
IG_SIG_KEY = '109513c04303341a7daf27bb41b268e633b30dcc65a3fe14503f743176113869'
EXPERIMENTS = 'ig_android_pop_probe,ig_camera_android_badge_face_effects_universe,ig_android_universe_video_production,ig_search_client_h1_2017_holdout,ig_android_stories_landscape_mode,ig_android_carousel_non_square_creation,ig_android_lazy_load_swipe_navigation_panels,ig_android_insights_account_insights_v2_universe,ig_android_live_analytics,ig_android_direct_expiring_media_view_mode_stickyness_universe,ig_android_realtime_mqtt_logging,ig_branded_content_show_settings_universe,ig_android_stories_server_coverframe,ig_android_video_captions_universe,ig_business_growth_acquisition_holdout_17h2,ig_android_ontact_invite_universe,ig_android_ad_async_ads_universe,ig_android_shopping_tag_creation_carousel_universe,ig_feed_engagement_holdout_universe,ig_direct_pending_inbox_memcache,ig_promote_guided_budget_duration_options_universe,ig_android_verified_comments_universe,ig_feed_lockdown,android_instagram_prefetch_suggestions_universe,ig_android_carousel_bumping_logging_fix,ig_android_gallery_order_by_date_taken,ig_shopping_viewer_intent_actions,ig_android_startup_prefetch,ig_android_live_640_quality,ig_android_business_post_insights_v3_universe,ig_android_custom_story_import_intent,ig_stories_gif_sticker,ig_lockdown_feed_perf,ig_video_copyright_whitelist,ig_explore_holdout_universe,ig_android_device_language_reset,ig_android_live_fault_tolerance_universe,ig_android_videocall_consumption_universe,ig_android_stories_viewer_use_thumbnail_as_fallback,ig_android_live_viewer_reshare_universe,ig_android_main_feed_seen_state_dont_send_info_on_tail_load,ig_android_face_filter_glyph_nux_animation_universe,ig_android_livewith_guest_adaptive_camera_universe,ig_android_business_new_ads_payment_universe,ig_android_audience_control,ig_android_unified_bindergroup_in_staticandpagedadapter,ig_android_log_account_switch_usable,ig_android_mas_viewer_list_megaphone_universe,ig_android_photo_fbupload_universe,ig_android_carousel_drafts,ig_android_bug_report_version_warning,ig_fbns_push,ig_android_carousel_no_buffer_10_30,ig_android_sso_family_key,ig_android_live_guest_reshare_universe,ig_android_profile_tabs_redesign_universe,ig_android_user_url_deeplink_fbpage_endpoint,ig_android_hide_post_in_feed,ig_android_shopping_thumbnail_icon,ig_search_null_state_universe,ig_android_ad_watchbrowse_universe,ig_android_search_people_tag_universe,ig_android_codec_high_profile,ig_android_ppr_on_destroy_view,ig_android_inline_appeal,ig_android_direct_camera_composer_universe,ig_android_log_mediacodec_info,ig_android_direct_expiring_media_loading_errors,ig_android_camera_face_filter_api_retry,ig_video_use_sve_universe,ig_android_skip_get_fbupload_universe,ig_android_low_data_mode,ig_android_enable_zero_rating,ig_android_force_logout_user_with_mismatched_cookie,ig_android_sample_ppr,ig_android_smartisan_app_badging,ig_android_comment_inline_composer_universe,ig_android_share_story_to_facebook_page,ig_android_direct_expiring_media_fix_duplicate_thread,ig_android_reverse_audio,ig_android_memoize_experiment_check,ig_android_comments_impression_logger,ig_android_live_encore_production_universe,ig_promote_independent_ctas_universe,ig_android_live_dash_latency_viewer,ig_android_http_stack_experiment_2017,ig_android_pending_request_search_bar,ig_promote_split_objectives_universe,ig_android_live_thread_delay_for_mute_universe,ig_android_fb_topsearch_sgp_fork_request,ig_android_heap_uploads,ig_android_stories_archive_universe,ig_android_business_ix_fb_autofill_universe,ig_android_shopping_tag_products_tooltip_not_onboarded,ig_lockdown_feed_shrink_universe,ig_android_stories_create_flow_favorites_tooltip,ig_android_direct_ephemeral_replies_with_context,ig_android_stories_text_format,ig_android_promotion_feedback_channel,ig_android_explore_in_feed_universe,ig_android_memoize_media_on_viewable,ig_android_log_failed_image_download_retries,ig_profile_holdout_2017_universe,ig_android_stories_video_loading_spinner_improvements,ig_android_direct_share_intent,ig_android_live_capture_translucent_navigation_bar,ig_android_stories_drawing_sticker,ig_android_ppr_stories_fix,ig_android_facebook_twitter_profile_photos,ig_android_shopping_tag_creation_universe,ig_android_story_decor_image_fbupload_universe,ig_android_comments_ranking_kill_switch_universe,ig_promote_profile_visit_cta_universe,ig_android_story_reactions,ig_android_ppr_main_feed_enhancements,ig_android_used_jpeg_library,ig_carousel_draft_multiselect,ig_android_live_camera_social_proof_universe,ig_android_stories_close_to_left_head,ig_android_video_playback_retry_time_threshold,ig_android_video_delay_auto_start,ig_android_live_with_invite_sheet_search_universe,ig_android_stories_archive_calendar,ig_android_effect_download_progress_universe,ig_android_ad_watchbrowse_cta_universe,ig_android_ads_manager_pause_resume_ads_universe,ig_android_main_feed_carousel_bumping,ig_stories_in_feed_unit_design_universe,ig_android_explore_iteminfo_universe_exp,ig_android_live_video_reactions_consumption_universe,ig_android_stories_hashtag_text,ig_android_live_save_to_camera_roll_universe,ig_android_sticker_region_tracking,ig_android_activity_feed_row_delete,ig_android_unified_inbox,ig_android_realtime_iris,ig_android_search_client_matching_2,ig_lockdown_notifications_universe,ig_android_task_life_detection,ig_android_feed_seen_state_with_view_info,ig_android_ppr_contextual_enhancements,ig_android_media_rows_prepare_10_31,ig_family_bridges_holdout_universe,ig_android_background_explore_fetch,ig_android_following_follower_social_context,ig_android_live_auto_collapse_comments_view_universe,ig_android_insights_relay_modern_conversion_universe,ig_android_insta_video_consumption_infra,ig_android_ad_watchlead_universe,ig_android_direct_prefetch_direct_story_json,ig_android_live_save_to_camera_roll_compatibility_filter_universe,ig_android_cache_logger_10_34,ig_android_explore_post_chaining_with_netego_universe,ig_android_stories_weblink_creation,ig_android_histogram_reporter,ig_android_network_cancellation,ig_android_shopping_show_shop_tooltip,ig_android_add_to_highlights_universe,ig_android_comment_category_filter_setting_universe,ig_promote_daily_budget_universe,ig_android_live_single_renderer,ig_android_stories_camera_enhancements,ig_android_video_use_new_logging_arch,ig_android_feed_stale_check_interval,ig_android_crop_from_inline_gallery_universe,ig_android_hashtag_following,ig_android_unsampled_appstartup_logging,ig_android_direct_reel_options_entry_point,ig_android_stories_gallery_improvements,ig_android_prefetch_notification_data,ig_android_direct_app_deeplinking,ig_android_direct_full_size_gallery_upload_universe_v2,ig_promotions_unit_in_insights_landing_page,ig_android_mqtt_delay_stop_after_background_universe,ig_android_reactive_feed_like_count,ig_android_comments_cache_perf_study_universe,ig_android_camera_ff_story_open_tray,ig_android_stories_asset_search,ig_android_constrain_image_size_universe,ig_rn_top_posts_stories_nux_universe,ig_ranking_following,ig_android_stories_archive_fast_scroll,ig_android_camera_retain_face_filter,ig_android_direct_inbox_presence,ig_android_live_comment_composer_animation_universe,ig_android_live_skin_smooth,ig_android_stories_posting_offline_ui,ig_android_sidecar_video_upload_universe,ig_android_canvas_swipe_to_open_universe,ig_android_qp_features,android_ig_stories_without_storage_permission_universe2,ig_android_spinner_during_main_feed_loading,ig_android_reel_raven_video_segmented_upload_universe,ig_android_swipe_navigation_x_angle_universe,ig_android_invite_xout_universe,ig_android_offline_mode_holdout,ig_android_live_send_user_location,ig_android_save_all,ig_android_live_report_watch_time_when_update,ig_android_family_bridge_discover,ig_android_startup_manager,instagram_search_and_coefficient_holdout,ig_android_high_res_upload_2,ig_android_dynamic_background_prefetch,ig_android_http_service_same_thread,ig_android_scroll_to_dismiss_keyboard,ig_carousel_animation,ig_android_remove_followers_universe,ig_android_skip_video_render,ig_android_crash_native_core_dumping,ig_android_one_tap_nux_upsell,ig_android_comments_composer_avatar_universe,ig_android_direct_open_thread_with_expiring_media,ig_android_post_capture_filter,ig_android_rendering_controls,ig_android_os_version_blocking,ig_android_ad_threaded_comments_universe,ig_android_no_prefetch_video_bandwidth_threshold,ig_android_encoder_width_safe_multiple_16,ig_android_warm_like_text,ig_android_request_feed_on_back,ig_comments_team_holdout_universe,ig_android_e2e_optimization_universe,ig_shopping_insights,ig_android_cache_clear_universe,ig_android_livewith_inapp_notification_universe,android_ig_camera_face_tracker_version,ig_android_disk_usage_universe,ig_android_direct_async_message_row_building_universe,ig_android_fb_connect_follow_invite_flow,ig_android_direct_24h_replays,ig_android_video_stitch_after_segmenting_universe,ig_android_instavideo_periodic_notif,ig_android_stories_camera_support_image_keyboard,ig_android_enable_swipe_to_dismiss_for_all_dialogs,ig_android_warm_start_fetch_universe,ig_android_marauder_update_frequency,ig_android_rage_shake_whitelist,ig_android_stories_emoji_asset_size,ig_android_ad_connection_manager_universe,ig_android_ad_feed_viewable_listener_lifecycle_backtest_universe,ig_android_reset_to_feed_from_background,ig_android_ad_watchbrowse_carousel_universe,ig_android_branded_content_edit_flow_universe,ig_android_video_feed_universe,ig_android_upload_reliability_universe,ig_android_arengine_bypass_pipeline_during_warmup_universe,ig_promote_guided_education_bar_universe,ig_android_direct_mutation_manager_universe,ig_android_stories_poll_result_share,ig_android_ad_show_new_bakeoff,ig_heart_with_keyboad_exposed_universe,ig_android_react_native_universe_kill_switch,ig_android_comments_composer_callout_universe,ig_android_search_hash_tag_and_username_universe,ig_android_live_disable_speed_test_ui_timeout_universe,ig_android_qp_kill_switch,ig_android_ad_switch_fragment_logging_v2_universe,ig_android_ad_leadgen_single_screen_universe,ig_android_seen_state_contains_check,ig_android_share_to_whatsapp,ig_android_direct_launch_to_stories_gallery,ig_android_live_snapshot_universe,ig_branded_content_share_to_facebook,ig_android_react_native_email_sms_settings_universe,ig_android_share_sheets_thread_count,ig_android_live_join_comment_ui_change,ig_android_business_promote_refresh_fb_access_token_universe,ig_android_camera_tap_smile_icon_to_selfie_universe,ig_android_live_dash_latency_broadcaster,ig_android_prominent_live_button_in_camera_universe,ig_android_video_upload_quality,ig_android_video_cover_frame_from_original_as_fallback,ig_android_camera_leak_detector_universe,ig_android_camera_ui_perf_universe,ig_android_story_viewer_linear_preloading_count,ig_android_shopping_react_native_catalog_selection,ig_android_threaded_comments_universe,ig_android_stories_search_reel_mentions_universe,ig_stories_end_of_tray_suggestions,ig_promote_reach_destinations_universe,ig_android_progressive_jpeg_partial_download,ig_fbns_shared,ig_android_capture_slowmo_mode,ig_android_live_ff_fill_gap,ig_promote_clicks_estimate_universe,ig_android_video_single_surface,ig_android_foreground_location_collection,ig_android_last_edits,ig_android_pending_actions_serialization,ig_android_post_live_viewer_count_privacy_universe,ig_stories_engagement_2017_h2_holdout_universe,ig_android_cache_grid_content_description,ig_android_image_cache_tweak_for_n,ig_android_direct_increased_notification_priority,ig_android_hero_player,ig_android_search_top_search_surface_universe,ig_android_live_dash_latency_manager,instagram_interests_holdout,ig_android_user_detail_endpoint,ig_android_videocall_production_universe,ig_android_ad_watchmore_entry_point_universe,ig_android_unified_video_logger,ig_android_video_detail,ig_android_low_latency_consumption_universe,ig_android_shopping_signup,ig_save_insights,ig_comments_typing_universe,ig_android_live_save_to_camera_roll_limit_by_screen_size_universe,ig_android_exoplayer_settings,ig_android_progressive_jpeg,ig_android_notification_aggregation_fix,ig_android_fblocation_universe,ig_android_direct_view_mode_toggle,ig_android_offline_story_stickers,ig_explore_android_universe,ig_android_video_prefetch_for_connectivity_type,ig_android_ppr_carousel_fix,ig_android_ad_holdout_watchandmore_universe,ig_promote_default_cta,ig_android_insta_video_abr_resize,ig_android_insta_video_sound_always_on,ig_android_in_app_notifications_queue,ig_android_live_request_to_join_consumption_universe,ig_android_ix_payment_universe,ig_android_live_follow_from_comments_universe,ig_android_comments_new_like_button_position_universe,ig_android_hyperzoom,ig_android_sidecar_photo_fbupload_universe,ig_android_live_broadcast_blacklist,ig_android_camera_perceived_perf_universe,ig_android_search_clear_layout_universe,ig_android_webrtc_h264_compatibility_filter_universe,ig_promote_reachbar_universe,ig_android_ad_one_pixel_logging_for_reel_universe,ig_android_submit_button_universe,ig_android_reel_viewer_fetch_missing_reels_universe,ig_android_arengine_separate_prepare,ig_android_direct_video_segmented_upload_universe,ig_android_direct_search_share_sheet_universe,ig_android_business_promote_tooltip,ig_android_direct_blue_tab,ig_android_instavideo_remove_nux_comments,ig_android_image_task_cancel_logic_fix_v25,ig_android_draw_rainbow_client_universe,ig_android_use_simple_video_player,ig_android_rtc_reshare,ig_android_enable_swipe_to_dismiss_for_favorites_dialogs,ig_android_auto_retry_post_mode,ig_fbns_preload_default,ig_android_emoji_sprite_sheet,ig_android_cover_frame_blacklist,ig_android_use_iterative_box_blur,ig_android_gallery_grid_column_count_universe,ig_android_live_encore_consumption_settings_universe,ig_perf_android_holdout,ig_android_list_redesign,ig_android_stories_separate_overlay_creation,ig_android_ad_show_new_interest_survey,ig_android_direct_share_story_to_messenger_nux,ig_android_live_encore_reel_chaining_universe,ig_android_vod_abr_universe,ig_android_audience_profile_icon_badge,ig_android_immersive_viewer,ig_android_hashtag_feed_tabbed,ig_android_search_normalization,ig_android_direct_thread_name_as_notification,ig_android_su_rows_preparer,ig_android_leak_detector_universe,ig_android_video_loopcount_int,ig_android_video_decoder_retry,ig_android_qp_sticky_exposure_universe,ig_android_enable_main_feed_reel_tray_preloading,ig_android_upload_retry_job_service,ig_android_camera_upsell_dialog,ig_android_live_time_adjustment_universe,ig_android_internal_research_settings,ig_android_prod_lockout_universe,ig_android_react_native_ota,ig_android_default_privacy_change,ig_android_main_camera_share_to_direct,ig_lockdown_feed_scroll_universe,ig_android_cold_start_feed_request,ig_android_fb_family_navigation_badging_user,ig_android_video_scrubber_thumbnail_universe,ig_lockdown_feed_caption_length_universe,ig_stories_music_sticker,ig_android_send_impression_via_real_time,ig_android_video_watermark_universe,ig_android_sc_ru_ig,ig_android_animation_perf_reporter_timeout,ig_android_warm_headline_text,ig_android_post_live_expanded_comments_view_universe,ig_android_new_block_flow,ig_android_shopping_profile_shoppable_feed_empty_state,ig_android_long_form_video,ig_android_stories_video_prefetch_kb,ig_android_live_stop_broadcast_on_404,android_face_filter_universe,ig_android_render_iframe_interval,ig_android_live_move_video_with_keyboard_universe,ig_android_webrtc_codec_migration_universe,ig_stories_vertical_list,ig_android_stories_server_brushes,ig_android_live_viewers_canned_comments_universe,ig_android_collections_cache,ig_android_live_face_filter,ig_android_logging_metric_universe_v2,ig_android_screen_recording_bugreport_universe,ig_android_no_cancel_launching_reel_when_scroll_universe,ig_story_camera_reverse_video_experiment,ig_android_story_gallery_folder_selection,ig_downloadable_modules_experiment,ig_direct_core_holdout_q4_2017,ig_android_search,ig_android_reduce_background_overdraw,ig_android_archive_features_holdout_universe,ig_promote_budget_duration_slider_universe,ig_android_insta_video_consumption_titles,ig_android_find_loaded_classes,ig_android_reduce_rect_allocation,ig_android_camera_universe,ig_android_original_video_report_info,ig_android_post_live_badge_universe,ig_stories_holdout_h2_2017,ig_android_video_server_coverframe,ig_promote_relay_modern,ig_android_search_users_universe,ig_android_video_controls_universe,ig_creation_growth_holdout,ig_qp_tooltip,ig_android_live_encore_consumption_universe,ig_android_experimental_filters,ig_android_shopping_tag_should_show_caret_universe,ig_android_shopping_profile_shoppable_feed,ig_timestamp_public_test,ig_android_shopping_tag_hints,ig_android_save_collection_pivots,ig_android_live_comment_fetch_frequency_universe,ig_android_business_conversion_value_prop_v2,ig_promote_guided_ad_preview_newscreen,ig_shopping_viewer_share_action,ig_android_live_wave_production_universe,ig_android_not_showing_hint_text_dimness_universe,ig_android_livewith_universe,ig_android_whatsapp_invite_option,ig_android_video_keep_screen_on,ig_android_video_no_proxy,ig_android_reel_ads_pagination_universe,ig_android_activity_feed_impression_logger,ig_android_ad_story_time_spent_logging_universe,ig_android_live_align_by_2_universe,ig_android_reorder_lowdata_check,ig_android_top_live_profile_pics_universe,ig_android_network_util_cache_info,ig_lazy_module_loading,ig_android_auto_select_face_filter_universe,ig_android_async_network_tweak_universe_15,ig_android_direct_thread_presence,ig_android_direct_init_post_launch,ig_android_camera_new_early_show_smile_icon_universe,ig_android_live_go_live_at_viewer_end_screen_universe,ig_android_live_bg_download_face_filter_assets_universe,ig_android_insta_video_audio_encoder,ig_android_video_segmented_media_needs_reupload_universe,ig_android_upload_prevent_upscale,ig_android_business_ix_universe,ig_android_direct_share_sheet_height,ig_android_instagram_card,ig_android_self_story_layout,ig_android_reduce_textureview_overdraw,ig_feed_ranking,ig_android_stories_gallery_long_term_holdout,ig_android_rtl,ig_android_business_new_navigation_universe,ig_android_comment_inline_expansion_universe,ig_android_live_request_to_join_production_universe,ig_android_share_spinner,ig_android_scroll_away_navigator,ig_android_video_resize_operation,ig_android_stories_eyedropper_color_picker,ig_android_disable_explore_prefetch,ig_android_universe_reel_video_production,ig_android_react_native_push_settings_refactor_universe,ig_android_stories_whatsapp_share,ig_android_power_metrics,ig_android_sfplt,ig_android_story_resharing_universe,ig_android_ad_collection_thumbnail_cta_universe,ig_android_direct_share_story_to_facebook,ig_android_exoplayer_creation_flow,ig_android_non_square_first,ig_android_insta_video_drawing,ig_android_swipeablefilters_universe,ig_android_direct_visual_replies_fifty_fifty,ig_android_reel_viewer_data_buffer_size,ig_android_video_segmented_upload_multi_thread_universe,ig_android_fbupload_sidecar_video_universe,ig_android_react_native_restart_after_error_universe,ig_camera_android_reactions_increase_tap_target,ig_android_direct_notification_actions,ig_android_profile,ig_android_effect_tray_background,ig_android_additional_contact_in_nux,ig_stories_selfie_sticker,ig_android_live_use_rtc_upload_universe,ig_android_story_reactions_producer_holdout,ig_android_stories_reply_composer_redesign,ig_android_story_viewer_segments_bar_universe,ig_explore_netego,ig_android_audience_control_sharecut_universe,ig_android_update_resource_configuration,ig_android_live_nerd_stats_universe,ig_android_video_cache_size_universe,ig_android_direct_fix_top_of_thread_scrolling,ig_android_conversion_back_dialog_universe,ig_video_holdout_h2_2017,ig_android_insights_metrics_graph_universe,ig_android_one_tap_send_sheet_universe,ig_android_international_add_payment_flow_universe,ig_android_live_see_fewer_videos_like_this_universe,ig_android_live_view_profile_from_comments_universe,ig_fbns_blocked,ig_android_direct_inbox_suggestions,ig_android_video_segmented_upload_universe,ig_carousel_post_creation_tag_universe,ig_android_mqtt_region_hint_universe,ig_android_suggest_password_reset_on_oneclick_login,ig_android_live_special_codec_size_list,ig_android_story_viewer_item_duration_universe,ig_android_enable_share_to_messenger,promote_media_picker,ig_android_live_video_reactions_creation_universe,ig_android_sidecar_gallery_universe,ig_android_business_id,ig_android_story_import_intent,ig_android_insta_video_broadcaster_infra_perf,ig_android_live_webrtc_livewith_params,ig_android_comment_audience_control_group_selection_universe,android_ig_fbns_kill_switch,ig_android_su_card_view_preparer_qe,ig_android_unified_camera_universe,ig_android_all_videoplayback_persisting_sound,ig_android_live_pause_upload,ig_android_stories_paging_spring_config_universe,ig_android_live_broadcaster_reshare_universe,ig_android_branded_content_brand_remove_self,ig_android_direct_search_recipients_controller_universe,ig_android_ad_show_full_name_universe,ig_android_anrwatchdog,ig_android_camera_video_universe,ig_android_2fac,ig_android_audio_segment_report_info,ig_android_scroll_main_feed,ig_android_archived_posts_sharing,ig_direct_bypass_group_size_limit_universe,ig_android_background_main_feed_fetch_v27,ig_android_qpl_executor,ig_android_story_captured_media_recovery,ig_android_skywalker_live_event_start_end,ig_android_interests_irrelevant_media_universe,ig_lockdown_feed_perf_image_cover,ig_android_direct_search_story_recipients_universe,ig_android_ad_browser_gesture_control,ig_android_grid_cell_count,ig_android_immersive_viewer_ufi_footer,ig_android_ad_watchinstall_universe,ig_android_comments_notifications_universe,ig_android_shortcuts,ig_android_archive_fetching,ig_android_new_optic,ig_android_audience_control_nux,favorites_home_inline_adding,ig_android_canvas_tilt_to_pan_universe,ig_android_save_locations,ig_internal_ui_for_lazy_loaded_modules_experiment,ig_android_direct_expiring_media_from_notification_behavior_universe,ig_android_single_tap_to_show_mask_tray_universe,ig_android_stories_sampled_progress,ig_android_fbupload_check_status_code_universe,ig_android_ad_account_top_followers_universe,ig_android_ccu_jobscheduler_outer,ig_android_offline_reel_feed,ig_android_direct_presence_setting,ig_android_stories_viewer_modal_activity,ig_android_shopping_creation_flow_onboarding_entry_point,ig_comments_feed_inline_composer_combined_universe,ig_android_activity_feed_row_click,ig_nearby_venues_location_setting,ig_android_user_behavior_prefetch,ig_android_chain_after_own_reel,ig_android_gl_drawing_marks_after_undo_backing,ig_android_story_gallery_behavior,ig_android_mark_seen_state_on_viewed_impression,ig_android_configurable_retry,ig_android_live_monotonic_pts,ig_story_ptr_timeout,ig_android_ad_pbia_header_click_universe,ig_android_comment_tweaks_universe,ig_android_data_usage_new_reporting,ig_android_location_media_count_exp_ig,ig_android_image_cache_log_mismatch_fetch,ig_android_personalized_feed_universe,ig_android_direct_double_tap_to_like_messages,ig_stories_restart_video_universe,ig_android_ccu_jobscheduler_inner,ig_android_insights_holdout,ig_use_fb_rtmp_streamer_universe,ig_android_sfplt_tombstone,ig_android_live_with_guest_viewer_list_universe,ig_android_explore_chaining_universe,ig_android_gqls_typing_indicator,ig_android_comment_audience_control_universe,ig_android_direct_show_inbox_loading_banner_universe,ig_android_near_bottom_fetch,ig_promote_guided_creation_flow,ig_ads_increase_connection_step2_v2,ig_android_draw_chalk_client_universe,ig_android_direct_keep_in_chat_ephemeral,ig_android_separate_network_executor'
SIG_KEY_VERSION = '4'
def __init__(self, username, password, debug_mode=False):
self.username = username
self.password = password
self.device_id = utils.generate_device_id(utils.md5_sum(username + password))
self.uuid = utils.generate_uuid(True)
self.s = requests.Session()
self.token = ""
self.rank_token = ""
self.username_id = ""
self.phone_id = utils.generate_uuid(True)
self.csrf_token = ""
self.debug_mode = debug_mode
def login(self):
resp = self.send_request('si/fetch_headers/?challenge_type=signup&guid=' + utils.generate_uuid(), None)
if resp.status_code != 200:
return False
data = {'phone_id': self.phone_id,
'_csrftoken': resp.cookies['csrftoken'],
'username': self.username,
'guid': self.uuid,
'device_id': self.device_id,
'password': self.password,
'login_attempt_count': '0'}
resp = self.send_request('accounts/login/', self.generate_signature(json.dumps(data)))
if resp.status_code != 200:
return False
resp_json = utils.resp_to_json(resp)
self.username_id = resp_json["logged_in_user"]["pk"]
self.rank_token = "%s_%s" % (self.username_id, self.uuid)
self.token = resp.cookies["csrftoken"]
return True
def send_request(self, endpoint, post=None):
if self.debug_mode:
logging.info('Sending request to {} , post={}'.format(endpoint, post))
self.s.headers.update({'Connection': 'close',
'Accept': '*/*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'User-Agent': self.USER_AGENT})
if post is not None:
response = self.s.post(self.API_URL + endpoint, data=post)
else:
response = self.s.get(self.API_URL + endpoint)
if self.debug_mode:
logging.info('Response is ' + response.content)
return response
def logout(self):
self.send_request('accounts/logout/')
def direct_list(self, next_page=''):
uri = 'direct_v2/inbox/'
if next_page:
uri += '?cursor=' + next_page
resp = self.send_request(uri)
if resp.status_code != 200:
return False
resp_json = utils.resp_to_json(resp)
return resp_json
def direct_thread(self, thread, next_page=''):
uri = 'direct_v2/threads/{}/'.format(thread)
if next_page:
uri += '?cursor=' + next_page
resp = self.send_request(uri)
if resp.status_code != 200:
return False
resp_json = utils.resp_to_json(resp)
return resp_json
def getUsernameInfo(self, usernameId):
return self.send_request('users/' + str(usernameId) + '/info/')
def delete_direct_message(self, thread_id, item_id):
data = {
"_uuid": self.uuid,
"_csrftoken": self.token,
}
resp = self.send_request('direct_v2/threads/{}/items/{}/delete/'.format(thread_id, item_id),
self.generate_signature(json.dumps(data)))
if resp.status_code != 200:
return False
resp_json = utils.resp_to_json(resp)
return resp_json
def generate_signature(self, data):
return 'ig_sig_key_version=' + self.SIG_KEY_VERSION + '&signed_body=' + hmac.new(
self.IG_SIG_KEY.encode('utf-8'), data.encode('utf-8'),
hashlib.sha256).hexdigest() + '.' + urllib.quote_plus(data)
| 227.03252 | 23,613 | 0.913554 |
712c17f0dc82739b4bb768dcec94705ae05a2aa2 | 2,822 | py | Python | tintlink/__init__.py | agoramachina/python-tintlink | cc3cdc31701b24b2c9ca1c5d8b41a16d5a0c04ee | [
"MIT"
] | null | null | null | tintlink/__init__.py | agoramachina/python-tintlink | cc3cdc31701b24b2c9ca1c5d8b41a16d5a0c04ee | [
"MIT"
] | null | null | null | tintlink/__init__.py | agoramachina/python-tintlink | cc3cdc31701b24b2c9ca1c5d8b41a16d5a0c04ee | [
"MIT"
] | null | null | null | # Python module for control of Tintlink bluetooth LED bulbs
#
# Copyright 2016 Matthew Garrett <mjg59@srcf.ucam.org>
#
# This code is released under the terms of the MIT license. See the LICENSE file
# for more details.
import BDAddr
from BluetoothSocket import BluetoothSocket, hci_devba
import random
import socket
import sys
import time
def send_packet(sock, handle, data):
packet = bytearray([0x12, handle, 0x00])
for item in data:
packet.append(item)
sock.send(packet)
data = sock.recv(32)
response = []
for d in data:
response.append(ord(d))
return (response)
def checksum(data):
value = 0
for i in range(1, len(data)-2):
value = value + data[i]
value = value + 85
return (value & 0xff)
def read_packet(sock, handle):
packet = bytearray([0x0a, handle, 0x00])
sock.send(packet)
data = sock.recv(32)
response = []
for d in data:
response.append(ord(d))
return (response)
class tintlink:
def __init__(self, mac):
self.mac = mac
def connect(self):
my_addr = hci_devba(0) # get from HCI0
dest = BDAddr.BDAddr(self.mac)
addr_type = BDAddr.TYPE_LE_PUBLIC
self.sock = BluetoothSocket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP)
self.sock.bind_l2(0, my_addr, cid=4, addr_type=BDAddr.TYPE_LE_RANDOM)
self.sock.connect_l2(0, dest, cid=4, addr_type=addr_type)
print (read_packet(self.sock, 0x24))
def on(self):
send_packet(self.sock, 0x21, bytearray([0xaa, 0x0a, 0xfc, 0x3a, 0x86, 0x01, 0x10, 0x01, 0x01, 0x00, 0x28, 0x0d]))
def set_brightness(self, brightness):
packet=bytearray([0xaa, 0x0a, 0xfc, 0x3a, 0x86, 0x01, 0x0c, 0x01, brightness, 0x00, 0x00, 0x0d])
packet[9] = random.randint(0, 255)
packet[10] = checksum(packet)
send_packet(self.sock, 0x21, packet)
def set_colour(self, red, green, blue):
packet=bytearray([0xaa, 0x0a, 0xfc, 0x3a, 0x86, 0x01, 0x0d, 0x06, 0x01, red, green, blue, 0x20, 0x30, 0x00, 0x00, 0x0d])
packet[14] = random.randint(0, 255)
packet[15] = checksum(packet)
send_packet(self.sock, 0x21, packet)
def set_white(self, white):
packet=bytearray([0xaa, 0x0a, 0xfc, 0x3a, 0x86, 0x01, 0x0e, 0x01, white, 0x00, 0x00, 0x0d])
packet[9] = random.randint(0, 255)
packet[10] = checksum(packet)
send_packet(self.sock, 0x21, packet)
def white_reset(self):
packet=bytearray([0xaa, 0x0a, 0xfc, 0x3a, 0x86, 0x01, 0x0d, 0x06, 0x02, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x0d])
packet[14] = random.randint(0, 255)
packet[15] = checksum(packet)
def rgb_reset(self):
packet=bytearray([0xaa, 0x0a, 0xfc, 0x3a, 0x86, 0x01, 0x0d, 0x06, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d])
packet[14] = random.randint(0, 255)
packet[15] = checksum(packet)
| 33.2 | 126 | 0.673636 |
6d1dcdb01e858726ce6f219e2c0d17886c5d2375 | 4,645 | py | Python | src/eda.py | lipcai/online_news_popularity | b170ce480a4e18a4dadf463575148f2e999b2523 | [
"MIT"
] | null | null | null | src/eda.py | lipcai/online_news_popularity | b170ce480a4e18a4dadf463575148f2e999b2523 | [
"MIT"
] | 18 | 2021-11-19T05:05:46.000Z | 2021-12-10T23:27:54.000Z | src/eda.py | lipcai/online_news_popularity | b170ce480a4e18a4dadf463575148f2e999b2523 | [
"MIT"
] | 3 | 2021-11-19T01:30:25.000Z | 2021-11-21T00:10:18.000Z | # Author: Jennifer Hoang, Nagraj Rao, Linhan Cai
# Date: Nov 26, 2021
"""Creates figures from EDA for Online News Popularity dataset:
https://archive.ics.uci.edu/ml/machine-learning-databases/00332/
Usage:
eda.py --data_path=<data_path> --figures_path=<figures_path>
Options:
--data_path=<data_path> File path to data for EDA
--figures_path=<figures_path> File path for location to save figures
"""
from docopt import docopt
import os
import altair as alt
import numpy as np
import pandas as pd
from altair_saver import save
opt = docopt(__doc__)
def main(data_path, figures_path):
# Read data
ONP_csv = pd.read_csv(data_path)
# Merge 'data channel' feature for visualization
ONP_csv = merge_data_column(ONP_csv)
# Create plot of shares per data channel topic
plot_1 = (
alt.Chart(ONP_csv)
.mark_bar()
.encode(
y=alt.Y("data_channel", title="Data Channel"),
x=alt.X(" shares", title="Shares"),
)
)
file_path_1 = os.path.join(figures_path, "01_EDA-Bar-Plot-Data-Channel.png")
plot_1.save(file_path_1)
# Create histogram of distribution of shares plot
plot_2 = (
alt.Chart(ONP_csv)
.mark_bar()
.encode(
x=alt.X(
" shares",
bin=alt.Bin(maxbins=3000),
scale=alt.Scale(domain=(0, 25000), clamp=True),
title="Shares",
),
y=alt.Y("count()", title="Count"),
)
)
file_path_2 = os.path.join(figures_path, "02_EDA-Shares-Histogram.png")
plot_2.save(file_path_2)
# Create correlation plot of features
corr_df = (
ONP_csv.select_dtypes("number")
.corr("spearman")
.stack()
.reset_index(name="corr")
)
corr_df.loc[corr_df["corr"] == 1, "corr"] = 0 # Remove diagonal
corr_df["abs"] = corr_df["corr"].abs()
plot_3 = (
alt.Chart(corr_df)
.mark_circle()
.encode(
x=alt.X("level_0", title="Feature"),
y=alt.Y("level_1", title = "Feature"),
size="abs",
color=alt.Color(
"corr", scale=alt.Scale(scheme="blueorange", domain=(-1, 1))
),
)
)
file_path_3 = os.path.join(figures_path, "03_EDA-Correlation-Plot.png")
plot_3.save(file_path_3)
def merge_data_column(ONP_csv):
"""
Function to merge 'data channel' column into one column.
"""
DataChannelMerge = ONP_csv[
[
" data_channel_is_lifestyle",
" data_channel_is_entertainment",
" data_channel_is_bus",
" data_channel_is_socmed",
" data_channel_is_tech",
" data_channel_is_world",
]
]
DataChannel_arr = []
for r in list(range(DataChannelMerge.shape[0])):
if (
((DataChannelMerge.iloc[r, 0]) == 0)
and ((DataChannelMerge.iloc[r, 1]) == 0)
and ((DataChannelMerge.iloc[r, 2]) == 0)
and ((DataChannelMerge.iloc[r, 3]) == 0)
and ((DataChannelMerge.iloc[r, 4]) == 0)
and ((DataChannelMerge.iloc[r, 5]) == 0)
):
DataChannel_arr.append("Others")
for c in list(range(DataChannelMerge.shape[1])):
if (c == 0) and (DataChannelMerge.iloc[r, c]) == 1:
DataChannel_arr.append("Lifestyle")
elif (c == 1) and (DataChannelMerge.iloc[r, c]) == 1:
DataChannel_arr.append("Entertainment")
elif (c == 2) and (DataChannelMerge.iloc[r, c]) == 1:
DataChannel_arr.append("Business")
elif (c == 3) and (DataChannelMerge.iloc[r, c]) == 1:
DataChannel_arr.append("Social Media")
elif (c == 4) and (DataChannelMerge.iloc[r, c]) == 1:
DataChannel_arr.append("Tech")
elif (c == 5) and (DataChannelMerge.iloc[r, c]) == 1:
DataChannel_arr.append("World")
ONP_csv.insert(loc=12, column="data_channel", value=DataChannel_arr)
ONP_csv.drop(
labels=[
" data_channel_is_lifestyle",
" data_channel_is_entertainment",
" data_channel_is_bus",
" data_channel_is_socmed",
" data_channel_is_tech",
" data_channel_is_world",
],
axis=1,
inplace=True,
)
return ONP_csv
if __name__ == "__main__":
main(opt["--data_path"], opt["--figures_path"])
| 31.598639 | 81 | 0.551346 |
e09676b82b22d7cd976c5ed094c9cd2566efc374 | 268 | py | Python | build_compiled_js.py | ludios/Coreweb | 735f2636553ff8bb08b86cc0687f5e8b7c484035 | [
"BSD-3-Clause"
] | null | null | null | build_compiled_js.py | ludios/Coreweb | 735f2636553ff8bb08b86cc0687f5e8b7c484035 | [
"BSD-3-Clause"
] | null | null | null | build_compiled_js.py | ludios/Coreweb | 735f2636553ff8bb08b86cc0687f5e8b7c484035 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from coreweb._closurebuild.compiler import compile
import build_depsjs
compile(
roots=["js_coreweb"]
,namespaces=["cw.tabnexus_worker"]
,output="coreweb/compiled/tabnexus_worker.js"
,output_log="coreweb/compiled/tabnexus_worker.js.log"
)
| 20.615385 | 54 | 0.783582 |
d19fd454d6000bd2951cf84f155f329aa1f3376d | 7,403 | py | Python | salt/modules/serverdensity_device.py | jmdcal/salt | ffa14cd95796c0b364cbb8afb18c2a207f39400f | [
"Apache-2.0"
] | 1 | 2016-04-26T03:42:32.000Z | 2016-04-26T03:42:32.000Z | salt/modules/serverdensity_device.py | apergos/salt | 106c715d495a9c2bd747c8ca75745236b0d7fb41 | [
"Apache-2.0"
] | null | null | null | salt/modules/serverdensity_device.py | apergos/salt | 106c715d495a9c2bd747c8ca75745236b0d7fb41 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Wrapper around Server Density API
=================================
.. versionadded:: 2014.7.0
'''
from __future__ import absolute_import
import requests
import json
import logging
from salt.ext.six.moves import map
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def get_sd_auth(val, sd_auth_pillar_name='serverdensity'):
'''
Returns requested Server Density authentication value from pillar.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.get_sd_auth <val>
'''
sd_pillar = __pillar__.get(sd_auth_pillar_name)
log.debug('Server Density Pillar: {0}'.format(sd_pillar))
if not sd_pillar:
log.error('Cloud not load {0} pillar'.format(sd_auth_pillar_name))
raise CommandExecutionError(
'{0} pillar is required for authentication'.format(sd_auth_pillar_name)
)
try:
return sd_pillar[val]
except KeyError:
log.error('Cloud not find value {0} in pillar'.format(val))
raise CommandExecutionError('{0} value was not found in pillar'.format(val))
def _clean_salt_variables(params, variable_prefix="__"):
'''
Pops out variables from params which starts with `variable_prefix`.
'''
list(list(map(params.pop, [k for k in params if k.startswith(variable_prefix)])))
return params
def create(name, **params):
'''
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
'''
log.debug('Server Density params: {0}'.format(params))
params = _clean_salt_variables(params)
params['name'] = name
api_response = requests.post(
'https://api.serverdensity.io/inventory/devices/',
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def delete(device_id):
'''
Delete a device from Server Density. For more information, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Deleting
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.delete 51f7eafcdba4bb235e000ae4
'''
api_response = requests.delete(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: {0}'.format(api_response.content))
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def ls(**params):
'''
List devices in Server Density
Results will be filtered by any params passed to this function. For more
information, see the API docs on listing_ and searching_.
.. _listing: https://apidocs.serverdensity.com/Inventory/Devices/Listing
.. _searching: https://apidocs.serverdensity.com/Inventory/Devices/Searching
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.ls
salt '*' serverdensity_device.ls name=lama
salt '*' serverdensity_device.ls name=lama group=lama_band installedRAM=32768
'''
params = _clean_salt_variables(params)
endpoint = 'devices'
# Change endpoint if there are params to filter by:
if params:
endpoint = 'resources'
# Convert all ints to strings:
for k, v in params.items():
params[k] = str(v)
api_response = requests.get(
'https://api.serverdensity.io/inventory/{0}'.format(endpoint),
params={'token': get_sd_auth('api_token'), 'filter': json.dumps(params)}
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, Server Density API Response: {0}'
.format(api_response)
)
else:
return None
def update(device_id, **params):
'''
Updates device information in Server Density. For more information see the
`API docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Updating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=lama group=lama_band
salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=better_lama group=rock_lamas swapSpace=512
'''
params = _clean_salt_variables(params)
api_response = requests.put(
'https://api.serverdensity.io/inventory/devices/' + device_id,
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: {0}'.format(api_response))
log.debug('Server Density API Response content: {0}'.format(api_response.content))
if api_response.status_code == 200:
try:
return json.loads(api_response.content)
except ValueError:
log.error(
'Could not parse Server Density API Response content: {0}'
.format(api_response.content)
)
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None
def install_agent(agent_key):
'''
Function downloads Server Density installation agent, and installs sd-agent
with agent_key.
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498
'''
work_dir = '/tmp/'
account_url = get_sd_auth('account_url')
__salt__['cmd.run'](
cmd='curl https://www.serverdensity.com/downloads/agent-install.sh > install.sh',
cwd=work_dir
)
__salt__['cmd.run'](cmd='chmod +x install.sh', cwd=work_dir)
return __salt__['cmd.run'](
cmd='./install.sh -a {account_url} -k {agent_key}'.format(
account_url=account_url, agent_key=agent_key),
cwd=work_dir
)
| 31.236287 | 117 | 0.649872 |
39917dd1873a66861b7bbebc13158f99d838d1a8 | 1,490 | py | Python | Prog/Python/Architecture_Patterns_with_Python/cap02/source_Base/tests/unit/test_batches.py | unimauro/Courses | 81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2 | [
"Apache-2.0"
] | 1 | 2020-07-25T04:56:55.000Z | 2020-07-25T04:56:55.000Z | Prog/Python/Architecture_Patterns_with_Python/cap03/tests/unit/test_batches.py | unimauro/Courses | 81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2 | [
"Apache-2.0"
] | 2 | 2020-06-15T04:42:00.000Z | 2021-08-29T03:48:28.000Z | Prog/Python/Architecture_Patterns_with_Python/cap03/tests/unit/test_batches.py | unimauro/Courses | 81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2 | [
"Apache-2.0"
] | null | null | null | from datetime import date
from allocation.domain.model import Batch, OrderLine
def test_allocating_to_a_batch_reduces_the_available_quantity():
batch = Batch("batch-001", "SMALL-TABLE", qty=20, eta=date.today())
line = OrderLine('order-ref', "SMALL-TABLE", 2)
batch.allocate(line)
assert batch.available_quantity == 18
def make_batch_and_line(sku, batch_qty, line_qty):
return (
Batch("batch-001", sku, batch_qty, eta=date.today()),
OrderLine("order-123", sku, line_qty)
)
def test_can_allocate_if_available_greater_than_required():
large_batch, small_line = make_batch_and_line("ELEGANT-LAMP", 20, 2)
assert large_batch.can_allocate(small_line)
def test_cannot_allocate_if_available_smaller_than_required():
small_batch, large_line = make_batch_and_line("ELEGANT-LAMP", 2, 20)
assert small_batch.can_allocate(large_line) is False
def test_can_allocate_if_available_equal_to_required():
batch, line = make_batch_and_line("ELEGANT-LAMP", 2, 2)
assert batch.can_allocate(line)
def test_cannot_allocate_if_skus_do_not_match():
batch = Batch("batch-001", "UNCOMFORTABLE-CHAIR", 100, eta=None)
different_sku_line = OrderLine("order-123", "EXPENSIVE-TOASTER", 10)
assert batch.can_allocate(different_sku_line) is False
def test_allocation_is_idempotent():
batch, line = make_batch_and_line("ANGULAR-DESK", 20, 2)
batch.allocate(line)
batch.allocate(line)
assert batch.available_quantity == 18
| 35.47619 | 72 | 0.751678 |
45e23f73602d9be6aa4e76f779cf8fc9e2b30aaa | 747 | py | Python | src/somos/urls.py | johncurcio/somos-api | 0f17938c0b5c642d12d20444524eeb101b6ad2bb | [
"MIT"
] | null | null | null | src/somos/urls.py | johncurcio/somos-api | 0f17938c0b5c642d12d20444524eeb101b6ad2bb | [
"MIT"
] | 1 | 2022-02-26T02:13:14.000Z | 2022-02-26T02:13:14.000Z | src/somos/urls.py | johncurcio/somos-api | 0f17938c0b5c642d12d20444524eeb101b6ad2bb | [
"MIT"
] | null | null | null | """somos URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 33.954545 | 77 | 0.708166 |
0c2ca0de565a105c3a24248b830b1755f2f37b3e | 1,056 | py | Python | Contributor Corner/AfraMP/SwapInPairs/SwapInPairs.py | hitu1304/interview-corner | 97503d1967c646f731275ae3665f142814c6a9d7 | [
"MIT"
] | 39 | 2020-11-01T13:58:48.000Z | 2021-02-12T08:39:37.000Z | Contributor Corner/AfraMP/SwapInPairs/SwapInPairs.py | hitu1304/interview-corner | 97503d1967c646f731275ae3665f142814c6a9d7 | [
"MIT"
] | 86 | 2020-09-25T07:20:40.000Z | 2021-02-18T20:36:29.000Z | Contributor Corner/AfraMP/SwapInPairs/SwapInPairs.py | hitu1304/interview-corner | 97503d1967c646f731275ae3665f142814c6a9d7 | [
"MIT"
] | 43 | 2020-12-18T03:32:42.000Z | 2021-02-19T18:08:19.000Z | # A linked list node
class Node:
def __init__(self, data=None, next=None):
self.data = data
self.next = next
# Helper function to print given linked list
def printList(head):
ptr = head
while ptr:
print(ptr.data, end=" ")
ptr = ptr.next
print("")
# Function to pairwise swap adjacent nodes of a linked list
def rearrange(head):
# if list is empty or contains just one node
if head is None or head.next is None:
return head
curr = head
prev = None
# consider two nodes at a time and swap their links
while curr and curr.next:
temp = curr.next
curr.next = temp.next
temp.next = curr
if prev is None:
head = temp
else:
prev.next = temp
prev = curr
curr = curr.next
return head
if __name__ == '__main__':
head = None
a = list(map(int, input().split()))
for i in reversed(a):
head = Node(i, head)
head = rearrange(head)
printList(head)
| 19.2 | 59 | 0.567235 |
ee7a3674577057df59feff5d07d1fd2a6c5b2f12 | 2,007 | py | Python | contrib/seeds/convert_ipv4hex.py | mattt21/zcoin | 730f0c39ba47f762627f13a1e9ea66d76b57a57d | [
"MIT"
] | 582 | 2016-09-26T00:08:04.000Z | 2020-10-25T19:07:24.000Z | contrib/seeds/convert_ipv4hex.py | mattt21/zcoin | 730f0c39ba47f762627f13a1e9ea66d76b57a57d | [
"MIT"
] | 570 | 2016-09-28T07:29:53.000Z | 2020-10-26T10:24:19.000Z | contrib/seeds/convert_ipv4hex.py | mattt21/zcoin | 730f0c39ba47f762627f13a1e9ea66d76b57a57d | [
"MIT"
] | 409 | 2016-09-21T12:37:31.000Z | 2020-10-18T14:54:17.000Z | import socket
import struct
addr_long = int("0200A8C0", 16)
hex(addr_long)
struct.pack("<L", addr_long)
socket.inet_ntoa(struct.pack("<L", addr_long))
'192.168.0.2'
pnSeed = [
0x34bb2958, 0x34b22272, 0x284c48d2, 0x010a8878,
0x010a8854, 0x68ed0421, 0x6e5001fa, 0x704a39c2,
0x714efdbe, 0x72d72ed0, 0x73ca9f1c, 0x73d7e9a2,
0x73d8b17e, 0x7596b911, 0x76be4d45, 0x7782e589,
0x77893560, 0x7789374c, 0x773150e0, 0x784d393f,
0x79db7b43, 0x7928763e, 0x7b740689, 0x7d6f8d02,
0x7d242fef, 0x0d5b2dfa, 0x8682ac58, 0x86c417ee,
0x88f3329f, 0x8b3be656, 0x8ce0749d, 0x904ced27,
0x9e457e9c, 0x9e45f85d, 0xac682d73, 0xadefd429,
0xae4c9ec2, 0xaf9157c8, 0xb0e28fc7, 0xb637d267,
0xb7595fb4, 0xb8a481ca, 0xb98d1b95, 0xb959f43f,
0xc2e40b90, 0xd5fe6742, 0xd86364ca, 0xd94f2e9f,
0xd94f2e9f, 0xdb837aee, 0xdc82eae2, 0xdca0c426,
0xdd0b1685, 0xdea11aeb, 0xde5c48ae, 0xdf49ea1c,
0x1b985be9, 0x1f0a9de8, 0x1f862769, 0x22c182dd,
0x22ca06ce, 0x22e083f8, 0x239c545a, 0x242f89ed,
0x253b180f, 0x276d7d49, 0x284c48d2, 0x2be98289,
0x2e00c010, 0x2e79c64a, 0x2ea6cc3e, 0x2f95241d,
0x2f3400c1, 0x2f5a1703, 0x31230c06, 0x34a60b6c,
0x34a90b6d, 0x34b22272, 0x34bb1634, 0x34bb2958,
0x34063da3, 0x3ba8a876, 0x3d64127d, 0x41be2593,
0x420bb276, 0x497840f9, 0x4ad058a2, 0x4e9d1850,
0x4e5e20c2, 0x5096a322, 0x54d03240, 0x54195ae4,
0x598e28dc, 0x5c3f39aa, 0x5d327298
]
default_port = '8168'
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
def hexToIP(hexNumber):
addr_long = hexNumber
hex(addr_long)
struct.pack(">L", addr_long)
return socket.inet_ntoa(struct.pack(">L", addr_long))
def ipToV6HexString(addr, port):
arrays = pchIPv4 + bytearray((int(x) for x in addr.split('.')))
s = '{{'
for number in arrays:
s += str(hex(number))
s += ','
s = s[:-1] + '},' + str(port) + '},'
return s
for number in pnSeed:
print(ipToV6HexString(hexToIP(number), default_port)) | 35.210526 | 67 | 0.713004 |
3d8256b97ea01060053dd1a25850b43bf843207b | 335 | py | Python | behavioral/command/receiver.py | pascalweiss/gof_design_patterns | d142ebf21bb1a1e7925b0e7915eb6d857df58299 | [
"Apache-2.0"
] | null | null | null | behavioral/command/receiver.py | pascalweiss/gof_design_patterns | d142ebf21bb1a1e7925b0e7915eb6d857df58299 | [
"Apache-2.0"
] | null | null | null | behavioral/command/receiver.py | pascalweiss/gof_design_patterns | d142ebf21bb1a1e7925b0e7915eb6d857df58299 | [
"Apache-2.0"
] | null | null | null |
class Receiver1:
# action
def step_left(self):
print("Receiver1 steps left")
def step_right(self):
print("Receiver1 steps right")
class Receiver2:
# action
def step_forward(self):
print("Receiver2 steps forward")
def step_backward(self):
print("Receiver2 steps backwards")
| 17.631579 | 42 | 0.635821 |
e26096c56512e8d3047b77a93472203f80396328 | 72,809 | py | Python | test/dependencies/cxxtest/python/python3/cxxtest/cxx_parser.py | merly-ai/subprocess | bddef925a19a8b1035799bfd5a2163311750f4f4 | [
"MIT"
] | 111 | 2020-04-04T23:50:23.000Z | 2022-03-26T21:01:03.000Z | test/dependencies/cxxtest/python/python3/cxxtest/cxx_parser.py | merly-ai/subprocess | bddef925a19a8b1035799bfd5a2163311750f4f4 | [
"MIT"
] | 5 | 2021-01-25T04:39:13.000Z | 2022-01-19T23:17:52.000Z | test/dependencies/cxxtest/python/python3/cxxtest/cxx_parser.py | merly-ai/subprocess | bddef925a19a8b1035799bfd5a2163311750f4f4 | [
"MIT"
] | 12 | 2020-07-26T07:48:07.000Z | 2022-02-17T17:53:26.000Z | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
# vim: fileencoding=utf-8
#
# This is a PLY parser for the entire ANSI C++ grammar. This grammar was
# adapted from the FOG grammar developed by E. D. Willink. See
#
# http://www.computing.surrey.ac.uk/research/dsrg/fog/
#
# for further details.
#
# The goal of this grammar is to extract information about class, function and
# class method declarations, along with their associated scope. Thus, this
# grammar can be used to analyze classes in an inheritance heirarchy, and then
# enumerate the methods in a derived class.
#
# This grammar parses blocks of <>, (), [] and {} in a generic manner. Thus,
# There are several capabilities that this grammar does not support:
#
# 1. Ambiguous template specification. This grammar cannot parse template
# specifications that do not have paired <>'s in their declaration. In
# particular, ambiguous declarations like
#
# foo<A, c<3 >();
#
# cannot be correctly parsed.
#
# 2. Template class specialization. Although the goal of this grammar is to
# extract class information, specialization of templated classes is
# not supported. When a template class definition is parsed, it's
# declaration is archived without information about the template
# parameters. Class specializations will be stored separately, and
# thus they can be processed after the fact. However, this grammar
# does not attempt to correctly process properties of class inheritence
# when template class specialization is employed.
#
#
# TODO: document usage of this file
#
import os
import ply.lex as lex
import ply.yacc as yacc
import re
try:
from collections import OrderedDict
except ImportError: #pragma: no cover
from ordereddict import OrderedDict
# global data
lexer = None
scope_lineno = 0
identifier_lineno = {}
_parse_info=None
_parsedata=None
noExceptionLogic = True
def ply_init(data):
global _parsedata
_parsedata=data
class Scope(object):
def __init__(self,name,abs_name,scope_t,base_classes,lineno):
self.function=[]
self.name=name
self.scope_t=scope_t
self.sub_scopes=[]
self.base_classes=base_classes
self.abs_name=abs_name
self.lineno=lineno
def insert(self,scope):
self.sub_scopes.append(scope)
class CppInfo(object):
def __init__(self, filter=None):
self.verbose=0
if filter is None:
self.filter=re.compile("[Tt][Ee][Ss][Tt]|createSuite|destroySuite")
else:
self.filter=filter
self.scopes=[""]
self.index=OrderedDict()
self.index[""]=Scope("","::","namespace",[],1)
self.function=[]
def push_scope(self,ns,scope_t,base_classes=[]):
name = self.scopes[-1]+"::"+ns
if self.verbose>=2:
print("-- Starting "+scope_t+" "+name)
self.scopes.append(name)
self.index[name] = Scope(ns,name,scope_t,base_classes,scope_lineno-1)
def pop_scope(self):
scope = self.scopes.pop()
if self.verbose>=2:
print("-- Stopping "+scope)
return scope
def add_function(self, fn):
fn = str(fn)
if self.filter.search(fn):
self.index[self.scopes[-1]].function.append((fn, identifier_lineno.get(fn,lexer.lineno-1)))
tmp = self.scopes[-1]+"::"+fn
if self.verbose==2:
print("-- Function declaration "+fn+" "+tmp)
elif self.verbose==1:
print("-- Function declaration "+tmp)
def get_functions(self,name,quiet=False):
if name == "::":
name = ""
scope = self.index[name]
fns=scope.function
for key in scope.base_classes:
cname = self.find_class(key,scope)
if cname is None:
if not quiet:
print("Defined classes: ",list(self.index.keys()))
print("WARNING: Unknown class "+key)
else:
fns += self.get_functions(cname,quiet)
return fns
def find_class(self,name,scope):
if ':' in name:
if name in self.index:
return name
else:
return None
tmp = scope.abs_name.split(':')
name1 = ":".join(tmp[:-1] + [name])
if name1 in self.index:
return name1
name2 = "::"+name
if name2 in self.index:
return name2
return None
def __repr__(self):
return str(self)
def is_baseclass(self,cls,base):
'''Returns true if base is a base-class of cls'''
if cls in self.index:
bases = self.index[cls]
elif "::"+cls in self.index:
bases = self.index["::"+cls]
else:
return False
#raise IOError, "Unknown class "+cls
if base in bases.base_classes:
return True
for name in bases.base_classes:
if self.is_baseclass(name,base):
return True
return False
def __str__(self):
ans=""
keys = list(self.index.keys())
keys.sort()
for key in keys:
scope = self.index[key]
ans += scope.scope_t+" "+scope.abs_name+"\n"
if scope.scope_t == "class":
ans += " Base Classes: "+str(scope.base_classes)+"\n"
for fn in self.get_functions(scope.abs_name):
ans += " "+fn+"\n"
else:
for fn in scope.function:
ans += " "+fn+"\n"
return ans
def flatten(x):
"""Flatten nested list"""
try:
strtypes = str
except: # for python3 etc
strtypes = (str, bytes)
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, strtypes):
result.extend(flatten(el))
else:
result.append(el)
return result
#
# The lexer (and/or a preprocessor) is expected to identify the following
#
# Punctuation:
#
#
literals = "+-*/%^&|~!<>=:()?.\'\"\\@$;,"
#
reserved = {
'private' : 'PRIVATE',
'protected' : 'PROTECTED',
'public' : 'PUBLIC',
'bool' : 'BOOL',
'char' : 'CHAR',
'double' : 'DOUBLE',
'float' : 'FLOAT',
'int' : 'INT',
'long' : 'LONG',
'short' : 'SHORT',
'signed' : 'SIGNED',
'unsigned' : 'UNSIGNED',
'void' : 'VOID',
'wchar_t' : 'WCHAR_T',
'class' : 'CLASS',
'enum' : 'ENUM',
'namespace' : 'NAMESPACE',
'struct' : 'STRUCT',
'typename' : 'TYPENAME',
'union' : 'UNION',
'const' : 'CONST',
'volatile' : 'VOLATILE',
'auto' : 'AUTO',
'explicit' : 'EXPLICIT',
'export' : 'EXPORT',
'extern' : 'EXTERN',
'__extension__' : 'EXTENSION',
'friend' : 'FRIEND',
'inline' : 'INLINE',
'mutable' : 'MUTABLE',
'register' : 'REGISTER',
'static' : 'STATIC',
'template' : 'TEMPLATE',
'typedef' : 'TYPEDEF',
'using' : 'USING',
'virtual' : 'VIRTUAL',
'asm' : 'ASM',
'break' : 'BREAK',
'case' : 'CASE',
'catch' : 'CATCH',
'const_cast' : 'CONST_CAST',
'continue' : 'CONTINUE',
'default' : 'DEFAULT',
'delete' : 'DELETE',
'do' : 'DO',
'dynamic_cast' : 'DYNAMIC_CAST',
'else' : 'ELSE',
'false' : 'FALSE',
'for' : 'FOR',
'goto' : 'GOTO',
'if' : 'IF',
'new' : 'NEW',
'operator' : 'OPERATOR',
'reinterpret_cast' : 'REINTERPRET_CAST',
'return' : 'RETURN',
'sizeof' : 'SIZEOF',
'static_cast' : 'STATIC_CAST',
'switch' : 'SWITCH',
'this' : 'THIS',
'throw' : 'THROW',
'true' : 'TRUE',
'try' : 'TRY',
'typeid' : 'TYPEID',
'while' : 'WHILE',
'"C"' : 'CLiteral',
'"C++"' : 'CppLiteral',
'__attribute__' : 'ATTRIBUTE',
'__cdecl__' : 'CDECL',
'__typeof' : 'uTYPEOF',
'typeof' : 'TYPEOF',
'CXXTEST_STD' : 'CXXTEST_STD'
}
tokens = [
"CharacterLiteral",
"FloatingLiteral",
"Identifier",
"IntegerLiteral",
"StringLiteral",
"RBRACE",
"LBRACE",
"RBRACKET",
"LBRACKET",
"ARROW",
"ARROW_STAR",
"DEC",
"EQ",
"GE",
"INC",
"LE",
"LOG_AND",
"LOG_OR",
"NE",
"SHL",
"SHR",
"ASS_ADD",
"ASS_AND",
"ASS_DIV",
"ASS_MOD",
"ASS_MUL",
"ASS_OR",
"ASS_SHL",
"ASS_SHR",
"ASS_SUB",
"ASS_XOR",
"DOT_STAR",
"ELLIPSIS",
"SCOPE",
] + list(reserved.values())
t_ignore = " \t\r"
t_LBRACE = r"(\{)|(<%)"
t_RBRACE = r"(\})|(%>)"
t_LBRACKET = r"(\[)|(<:)"
t_RBRACKET = r"(\])|(:>)"
t_ARROW = r"->"
t_ARROW_STAR = r"->\*"
t_DEC = r"--"
t_EQ = r"=="
t_GE = r">="
t_INC = r"\+\+"
t_LE = r"<="
t_LOG_AND = r"&&"
t_LOG_OR = r"\|\|"
t_NE = r"!="
t_SHL = r"<<"
t_SHR = r">>"
t_ASS_ADD = r"\+="
t_ASS_AND = r"&="
t_ASS_DIV = r"/="
t_ASS_MOD = r"%="
t_ASS_MUL = r"\*="
t_ASS_OR = r"\|="
t_ASS_SHL = r"<<="
t_ASS_SHR = r">>="
t_ASS_SUB = r"-="
t_ASS_XOR = r"^="
t_DOT_STAR = r"\.\*"
t_ELLIPSIS = r"\.\.\."
t_SCOPE = r"::"
# Discard comments
def t_COMMENT(t):
r'(/\*(.|\n)*?\*/)|(//.*?\n)|(\#.*?\n)'
t.lexer.lineno += t.value.count("\n")
t_IntegerLiteral = r'(0x[0-9A-F]+)|([0-9]+(L){0,1})'
t_FloatingLiteral = r"[0-9]+[eE\.\+-]+[eE\.\+\-0-9]+"
t_CharacterLiteral = r'\'([^\'\\]|\\.)*\''
#t_StringLiteral = r'"([^"\\]|\\.)*"'
def t_StringLiteral(t):
r'"([^"\\]|\\.)*"'
t.type = reserved.get(t.value,'StringLiteral')
return t
def t_Identifier(t):
r"[a-zA-Z_][a-zA-Z_0-9\.]*"
t.type = reserved.get(t.value,'Identifier')
return t
def t_error(t):
print("Illegal character '%s'" % t.value[0])
#raise IOError, "Parse error"
#t.lexer.skip()
def t_newline(t):
r'[\n]+'
t.lexer.lineno += len(t.value)
precedence = (
( 'right', 'SHIFT_THERE', 'REDUCE_HERE_MOSTLY', 'SCOPE'),
( 'nonassoc', 'ELSE', 'INC', 'DEC', '+', '-', '*', '&', 'LBRACKET', 'LBRACE', '<', ':', ')')
)
start = 'translation_unit'
#
# The %prec resolves the 14.2-3 ambiguity:
# Identifier '<' is forced to go through the is-it-a-template-name test
# All names absorb TEMPLATE with the name, so that no template_test is
# performed for them. This requires all potential declarations within an
# expression to perpetuate this policy and thereby guarantee the ultimate
# coverage of explicit_instantiation.
#
# The %prec also resolves a conflict in identifier : which is forced to be a
# shift of a label for a labeled-statement rather than a reduction for the
# name of a bit-field or generalised constructor. This is pretty dubious
# syntactically but correct for all semantic possibilities. The shift is
# only activated when the ambiguity exists at the start of a statement.
# In this context a bit-field declaration or constructor definition are not
# allowed.
#
def p_identifier(p):
'''identifier : Identifier
| CXXTEST_STD '(' Identifier ')'
'''
if p[1][0] in ('t','T','c','d'):
identifier_lineno[p[1]] = p.lineno(1)
p[0] = p[1]
def p_id(p):
'''id : identifier %prec SHIFT_THERE
| template_decl
| TEMPLATE id
'''
p[0] = get_rest(p)
def p_global_scope(p):
'''global_scope : SCOPE
'''
p[0] = get_rest(p)
def p_id_scope(p):
'''id_scope : id SCOPE'''
p[0] = get_rest(p)
def p_id_scope_seq(p):
'''id_scope_seq : id_scope
| id_scope id_scope_seq
'''
p[0] = get_rest(p)
#
# A :: B :: C; is ambiguous How much is type and how much name ?
# The %prec maximises the (type) length which is the 7.1-2 semantic constraint.
#
def p_nested_id(p):
'''nested_id : id %prec SHIFT_THERE
| id_scope nested_id
'''
p[0] = get_rest(p)
def p_scoped_id(p):
'''scoped_id : nested_id
| global_scope nested_id
| id_scope_seq
| global_scope id_scope_seq
'''
global scope_lineno
scope_lineno = lexer.lineno
data = flatten(get_rest(p))
if data[0] != None:
p[0] = "".join(data)
#
# destructor_id has to be held back to avoid a conflict with a one's
# complement as per 5.3.1-9, It gets put back only when scoped or in a
# declarator_id, which is only used as an explicit member name.
# Declarations of an unscoped destructor are always parsed as a one's
# complement.
#
def p_destructor_id(p):
'''destructor_id : '~' id
| TEMPLATE destructor_id
'''
p[0]=get_rest(p)
#def p_template_id(p):
# '''template_id : empty
# | TEMPLATE
# '''
# pass
def p_template_decl(p):
'''template_decl : identifier '<' nonlgt_seq_opt '>'
'''
#
# WEH: should we include the lt/gt symbols to indicate that this is a
# template class? How is that going to be used later???
#
#p[0] = [p[1] ,"<",">"]
p[0] = p[1]
def p_special_function_id(p):
'''special_function_id : conversion_function_id
| operator_function_id
| TEMPLATE special_function_id
'''
p[0]=get_rest(p)
def p_nested_special_function_id(p):
'''nested_special_function_id : special_function_id
| id_scope destructor_id
| id_scope nested_special_function_id
'''
p[0]=get_rest(p)
def p_scoped_special_function_id(p):
'''scoped_special_function_id : nested_special_function_id
| global_scope nested_special_function_id
'''
p[0]=get_rest(p)
# declarator-id is all names in all scopes, except reserved words
def p_declarator_id(p):
'''declarator_id : scoped_id
| scoped_special_function_id
| destructor_id
'''
p[0]=p[1]
#
# The standard defines pseudo-destructors in terms of type-name, which is
# class/enum/typedef, of which class-name is covered by a normal destructor.
# pseudo-destructors are supposed to support ~int() in templates, so the
# grammar here covers built-in names. Other names are covered by the lack
# of identifier/type discrimination.
#
def p_built_in_type_id(p):
'''built_in_type_id : built_in_type_specifier
| built_in_type_id built_in_type_specifier
'''
pass
def p_pseudo_destructor_id(p):
'''pseudo_destructor_id : built_in_type_id SCOPE '~' built_in_type_id
| '~' built_in_type_id
| TEMPLATE pseudo_destructor_id
'''
pass
def p_nested_pseudo_destructor_id(p):
'''nested_pseudo_destructor_id : pseudo_destructor_id
| id_scope nested_pseudo_destructor_id
'''
pass
def p_scoped_pseudo_destructor_id(p):
'''scoped_pseudo_destructor_id : nested_pseudo_destructor_id
| global_scope scoped_pseudo_destructor_id
'''
pass
#-------------------------------------------------------------------------------
# A.2 Lexical conventions
#-------------------------------------------------------------------------------
#
def p_literal(p):
'''literal : IntegerLiteral
| CharacterLiteral
| FloatingLiteral
| StringLiteral
| TRUE
| FALSE
'''
pass
#-------------------------------------------------------------------------------
# A.3 Basic concepts
#-------------------------------------------------------------------------------
def p_translation_unit(p):
'''translation_unit : declaration_seq_opt
'''
pass
#-------------------------------------------------------------------------------
# A.4 Expressions
#-------------------------------------------------------------------------------
#
# primary_expression covers an arbitrary sequence of all names with the
# exception of an unscoped destructor, which is parsed as its unary expression
# which is the correct disambiguation (when ambiguous). This eliminates the
# traditional A(B) meaning A B ambiguity, since we never have to tack an A
# onto the front of something that might start with (. The name length got
# maximised ab initio. The downside is that semantic interpretation must split
# the names up again.
#
# Unification of the declaration and expression syntax means that unary and
# binary pointer declarator operators:
# int * * name
# are parsed as binary and unary arithmetic operators (int) * (*name). Since
# type information is not used
# ambiguities resulting from a cast
# (cast)*(value)
# are resolved to favour the binary rather than the cast unary to ease AST
# clean-up. The cast-call ambiguity must be resolved to the cast to ensure
# that (a)(b)c can be parsed.
#
# The problem of the functional cast ambiguity
# name(arg)
# as call or declaration is avoided by maximising the name within the parsing
# kernel. So primary_id_expression picks up
# extern long int const var = 5;
# as an assignment to the syntax parsed as "extern long int const var". The
# presence of two names is parsed so that "extern long into const" is
# distinguished from "var" considerably simplifying subsequent
# semantic resolution.
#
# The generalised name is a concatenation of potential type-names (scoped
# identifiers or built-in sequences) plus optionally one of the special names
# such as an operator-function-id, conversion-function-id or destructor as the
# final name.
#
def get_rest(p):
return [p[i] for i in range(1, len(p))]
def p_primary_expression(p):
'''primary_expression : literal
| THIS
| suffix_decl_specified_ids
| abstract_expression %prec REDUCE_HERE_MOSTLY
'''
p[0] = get_rest(p)
#
# Abstract-expression covers the () and [] of abstract-declarators.
#
def p_abstract_expression(p):
'''abstract_expression : parenthesis_clause
| LBRACKET bexpression_opt RBRACKET
| TEMPLATE abstract_expression
'''
pass
def p_postfix_expression(p):
'''postfix_expression : primary_expression
| postfix_expression parenthesis_clause
| postfix_expression LBRACKET bexpression_opt RBRACKET
| postfix_expression LBRACKET bexpression_opt RBRACKET attributes
| postfix_expression '.' declarator_id
| postfix_expression '.' scoped_pseudo_destructor_id
| postfix_expression ARROW declarator_id
| postfix_expression ARROW scoped_pseudo_destructor_id
| postfix_expression INC
| postfix_expression DEC
| DYNAMIC_CAST '<' nonlgt_seq_opt '>' '(' expression ')'
| STATIC_CAST '<' nonlgt_seq_opt '>' '(' expression ')'
| REINTERPRET_CAST '<' nonlgt_seq_opt '>' '(' expression ')'
| CONST_CAST '<' nonlgt_seq_opt '>' '(' expression ')'
| TYPEID parameters_clause
'''
#print "HERE",str(p[1])
p[0] = get_rest(p)
def p_bexpression_opt(p):
'''bexpression_opt : empty
| bexpression
'''
pass
def p_bexpression(p):
'''bexpression : nonbracket_seq
| nonbracket_seq bexpression_seq bexpression_clause nonbracket_seq_opt
| bexpression_seq bexpression_clause nonbracket_seq_opt
'''
pass
def p_bexpression_seq(p):
'''bexpression_seq : empty
| bexpression_seq bexpression_clause nonbracket_seq_opt
'''
pass
def p_bexpression_clause(p):
'''bexpression_clause : LBRACKET bexpression_opt RBRACKET
'''
pass
def p_expression_list_opt(p):
'''expression_list_opt : empty
| expression_list
'''
pass
def p_expression_list(p):
'''expression_list : assignment_expression
| expression_list ',' assignment_expression
'''
pass
def p_unary_expression(p):
'''unary_expression : postfix_expression
| INC cast_expression
| DEC cast_expression
| ptr_operator cast_expression
| suffix_decl_specified_scope star_ptr_operator cast_expression
| '+' cast_expression
| '-' cast_expression
| '!' cast_expression
| '~' cast_expression
| SIZEOF unary_expression
| new_expression
| global_scope new_expression
| delete_expression
| global_scope delete_expression
'''
p[0] = get_rest(p)
def p_delete_expression(p):
'''delete_expression : DELETE cast_expression
'''
pass
def p_new_expression(p):
'''new_expression : NEW new_type_id new_initializer_opt
| NEW parameters_clause new_type_id new_initializer_opt
| NEW parameters_clause
| NEW parameters_clause parameters_clause new_initializer_opt
'''
pass
def p_new_type_id(p):
'''new_type_id : type_specifier ptr_operator_seq_opt
| type_specifier new_declarator
| type_specifier new_type_id
'''
pass
def p_new_declarator(p):
'''new_declarator : ptr_operator new_declarator
| direct_new_declarator
'''
pass
def p_direct_new_declarator(p):
'''direct_new_declarator : LBRACKET bexpression_opt RBRACKET
| direct_new_declarator LBRACKET bexpression RBRACKET
'''
pass
def p_new_initializer_opt(p):
'''new_initializer_opt : empty
| '(' expression_list_opt ')'
'''
pass
#
# cast-expression is generalised to support a [] as well as a () prefix. This covers the omission of
# DELETE[] which when followed by a parenthesised expression was ambiguous. It also covers the gcc
# indexed array initialisation for free.
#
def p_cast_expression(p):
'''cast_expression : unary_expression
| abstract_expression cast_expression
'''
p[0] = get_rest(p)
def p_pm_expression(p):
'''pm_expression : cast_expression
| pm_expression DOT_STAR cast_expression
| pm_expression ARROW_STAR cast_expression
'''
p[0] = get_rest(p)
def p_multiplicative_expression(p):
'''multiplicative_expression : pm_expression
| multiplicative_expression star_ptr_operator pm_expression
| multiplicative_expression '/' pm_expression
| multiplicative_expression '%' pm_expression
'''
p[0] = get_rest(p)
def p_additive_expression(p):
'''additive_expression : multiplicative_expression
| additive_expression '+' multiplicative_expression
| additive_expression '-' multiplicative_expression
'''
p[0] = get_rest(p)
def p_shift_expression(p):
'''shift_expression : additive_expression
| shift_expression SHL additive_expression
| shift_expression SHR additive_expression
'''
p[0] = get_rest(p)
# | relational_expression '<' shift_expression
# | relational_expression '>' shift_expression
# | relational_expression LE shift_expression
# | relational_expression GE shift_expression
def p_relational_expression(p):
'''relational_expression : shift_expression
'''
p[0] = get_rest(p)
def p_equality_expression(p):
'''equality_expression : relational_expression
| equality_expression EQ relational_expression
| equality_expression NE relational_expression
'''
p[0] = get_rest(p)
def p_and_expression(p):
'''and_expression : equality_expression
| and_expression '&' equality_expression
'''
p[0] = get_rest(p)
def p_exclusive_or_expression(p):
'''exclusive_or_expression : and_expression
| exclusive_or_expression '^' and_expression
'''
p[0] = get_rest(p)
def p_inclusive_or_expression(p):
'''inclusive_or_expression : exclusive_or_expression
| inclusive_or_expression '|' exclusive_or_expression
'''
p[0] = get_rest(p)
def p_logical_and_expression(p):
'''logical_and_expression : inclusive_or_expression
| logical_and_expression LOG_AND inclusive_or_expression
'''
p[0] = get_rest(p)
def p_logical_or_expression(p):
'''logical_or_expression : logical_and_expression
| logical_or_expression LOG_OR logical_and_expression
'''
p[0] = get_rest(p)
def p_conditional_expression(p):
'''conditional_expression : logical_or_expression
| logical_or_expression '?' expression ':' assignment_expression
'''
p[0] = get_rest(p)
#
# assignment-expression is generalised to cover the simple assignment of a braced initializer in order to
# contribute to the coverage of parameter-declaration and init-declaration.
#
# | logical_or_expression assignment_operator assignment_expression
def p_assignment_expression(p):
'''assignment_expression : conditional_expression
| logical_or_expression assignment_operator nonsemicolon_seq
| logical_or_expression '=' braced_initializer
| throw_expression
'''
p[0]=get_rest(p)
def p_assignment_operator(p):
'''assignment_operator : '='
| ASS_ADD
| ASS_AND
| ASS_DIV
| ASS_MOD
| ASS_MUL
| ASS_OR
| ASS_SHL
| ASS_SHR
| ASS_SUB
| ASS_XOR
'''
pass
#
# expression is widely used and usually single-element, so the reductions are arranged so that a
# single-element expression is returned as is. Multi-element expressions are parsed as a list that
# may then behave polymorphically as an element or be compacted to an element.
#
def p_expression(p):
'''expression : assignment_expression
| expression_list ',' assignment_expression
'''
p[0] = get_rest(p)
def p_constant_expression(p):
'''constant_expression : conditional_expression
'''
pass
#---------------------------------------------------------------------------------------------------
# A.5 Statements
#---------------------------------------------------------------------------------------------------
# Parsing statements is easy once simple_declaration has been generalised to cover expression_statement.
#
#
# The use of extern here is a hack. The 'extern "C" {}' block gets parsed
# as a function, so when nested 'extern "C"' declarations exist, they don't
# work because the block is viewed as a list of statements... :(
#
def p_statement(p):
'''statement : compound_statement
| declaration_statement
| try_block
| labeled_statement
| selection_statement
| iteration_statement
| jump_statement
'''
pass
def p_compound_statement(p):
'''compound_statement : LBRACE statement_seq_opt RBRACE
'''
pass
def p_statement_seq_opt(p):
'''statement_seq_opt : empty
| statement_seq_opt statement
'''
pass
#
# The dangling else conflict is resolved to the innermost if.
#
def p_selection_statement(p):
'''selection_statement : IF '(' condition ')' statement %prec SHIFT_THERE
| IF '(' condition ')' statement ELSE statement
| SWITCH '(' condition ')' statement
'''
pass
def p_condition_opt(p):
'''condition_opt : empty
| condition
'''
pass
def p_condition(p):
'''condition : nonparen_seq
| nonparen_seq condition_seq parameters_clause nonparen_seq_opt
| condition_seq parameters_clause nonparen_seq_opt
'''
pass
def p_condition_seq(p):
'''condition_seq : empty
| condition_seq parameters_clause nonparen_seq_opt
'''
pass
def p_labeled_statement(p):
'''labeled_statement : identifier ':' statement
| CASE constant_expression ':' statement
| DEFAULT ':' statement
'''
pass
def p_try_block(p):
'''try_block : TRY compound_statement handler_seq
'''
global noExceptionLogic
noExceptionLogic=False
def p_jump_statement(p):
'''jump_statement : BREAK ';'
| CONTINUE ';'
| RETURN nonsemicolon_seq ';'
| GOTO identifier ';'
'''
pass
def p_iteration_statement(p):
'''iteration_statement : WHILE '(' condition ')' statement
| DO statement WHILE '(' expression ')' ';'
| FOR '(' nonparen_seq_opt ')' statement
'''
pass
def p_declaration_statement(p):
'''declaration_statement : block_declaration
'''
pass
#---------------------------------------------------------------------------------------------------
# A.6 Declarations
#---------------------------------------------------------------------------------------------------
def p_compound_declaration(p):
'''compound_declaration : LBRACE declaration_seq_opt RBRACE
'''
pass
def p_declaration_seq_opt(p):
'''declaration_seq_opt : empty
| declaration_seq_opt declaration
'''
pass
def p_declaration(p):
'''declaration : block_declaration
| function_definition
| template_declaration
| explicit_specialization
| specialised_declaration
'''
pass
def p_specialised_declaration(p):
'''specialised_declaration : linkage_specification
| namespace_definition
| TEMPLATE specialised_declaration
'''
pass
def p_block_declaration(p):
'''block_declaration : simple_declaration
| specialised_block_declaration
'''
pass
def p_specialised_block_declaration(p):
'''specialised_block_declaration : asm_definition
| namespace_alias_definition
| using_declaration
| using_directive
| TEMPLATE specialised_block_declaration
'''
pass
def p_simple_declaration(p):
'''simple_declaration : ';'
| init_declaration ';'
| init_declarations ';'
| decl_specifier_prefix simple_declaration
'''
global _parse_info
if len(p) == 3:
if p[2] == ";":
decl = p[1]
else:
decl = p[2]
if decl is not None:
fp = flatten(decl)
if len(fp) >= 2 and fp[0] is not None and fp[0]!="operator" and fp[1] == '(':
p[0] = fp[0]
_parse_info.add_function(fp[0])
#
# A decl-specifier following a ptr_operator provokes a shift-reduce conflict for * const name which is resolved in favour of the pointer, and implemented by providing versions of decl-specifier guaranteed not to start with a cv_qualifier. decl-specifiers are implemented type-centrically. That is the semantic constraint that there must be a type is exploited to impose structure, but actually eliminate very little syntax. built-in types are multi-name and so need a different policy.
#
# non-type decl-specifiers are bound to the left-most type in a decl-specifier-seq, by parsing from the right and attaching suffixes to the right-hand type. Finally residual prefixes attach to the left.
#
def p_suffix_built_in_decl_specifier_raw(p):
'''suffix_built_in_decl_specifier_raw : built_in_type_specifier
| suffix_built_in_decl_specifier_raw built_in_type_specifier
| suffix_built_in_decl_specifier_raw decl_specifier_suffix
'''
pass
def p_suffix_built_in_decl_specifier(p):
'''suffix_built_in_decl_specifier : suffix_built_in_decl_specifier_raw
| TEMPLATE suffix_built_in_decl_specifier
'''
pass
# | id_scope_seq
# | SCOPE id_scope_seq
def p_suffix_named_decl_specifier(p):
'''suffix_named_decl_specifier : scoped_id
| elaborate_type_specifier
| suffix_named_decl_specifier decl_specifier_suffix
'''
p[0]=get_rest(p)
def p_suffix_named_decl_specifier_bi(p):
'''suffix_named_decl_specifier_bi : suffix_named_decl_specifier
| suffix_named_decl_specifier suffix_built_in_decl_specifier_raw
'''
p[0] = get_rest(p)
#print "HERE",get_rest(p)
def p_suffix_named_decl_specifiers(p):
'''suffix_named_decl_specifiers : suffix_named_decl_specifier_bi
| suffix_named_decl_specifiers suffix_named_decl_specifier_bi
'''
p[0] = get_rest(p)
def p_suffix_named_decl_specifiers_sf(p):
'''suffix_named_decl_specifiers_sf : scoped_special_function_id
| suffix_named_decl_specifiers
| suffix_named_decl_specifiers scoped_special_function_id
'''
#print "HERE",get_rest(p)
p[0] = get_rest(p)
def p_suffix_decl_specified_ids(p):
'''suffix_decl_specified_ids : suffix_built_in_decl_specifier
| suffix_built_in_decl_specifier suffix_named_decl_specifiers_sf
| suffix_named_decl_specifiers_sf
'''
if len(p) == 3:
p[0] = p[2]
else:
p[0] = p[1]
def p_suffix_decl_specified_scope(p):
'''suffix_decl_specified_scope : suffix_named_decl_specifiers SCOPE
| suffix_built_in_decl_specifier suffix_named_decl_specifiers SCOPE
| suffix_built_in_decl_specifier SCOPE
'''
p[0] = get_rest(p)
def p_decl_specifier_affix(p):
'''decl_specifier_affix : storage_class_specifier
| function_specifier
| FRIEND
| TYPEDEF
| cv_qualifier
'''
pass
def p_decl_specifier_suffix(p):
'''decl_specifier_suffix : decl_specifier_affix
'''
pass
def p_decl_specifier_prefix(p):
'''decl_specifier_prefix : decl_specifier_affix
| TEMPLATE decl_specifier_prefix
'''
pass
def p_storage_class_specifier(p):
'''storage_class_specifier : REGISTER
| STATIC
| MUTABLE
| EXTERN %prec SHIFT_THERE
| EXTENSION
| AUTO
'''
pass
def p_function_specifier(p):
'''function_specifier : EXPLICIT
| INLINE
| VIRTUAL
'''
pass
def p_type_specifier(p):
'''type_specifier : simple_type_specifier
| elaborate_type_specifier
| cv_qualifier
'''
pass
def p_elaborate_type_specifier(p):
'''elaborate_type_specifier : class_specifier
| enum_specifier
| elaborated_type_specifier
| TEMPLATE elaborate_type_specifier
'''
pass
def p_simple_type_specifier(p):
'''simple_type_specifier : scoped_id
| scoped_id attributes
| built_in_type_specifier
'''
p[0] = p[1]
def p_built_in_type_specifier(p):
'''built_in_type_specifier : Xbuilt_in_type_specifier
| Xbuilt_in_type_specifier attributes
'''
pass
def p_attributes(p):
'''attributes : attribute
| attributes attribute
'''
pass
def p_attribute(p):
'''attribute : ATTRIBUTE '(' parameters_clause ')'
'''
def p_Xbuilt_in_type_specifier(p):
'''Xbuilt_in_type_specifier : CHAR
| WCHAR_T
| BOOL
| SHORT
| INT
| LONG
| SIGNED
| UNSIGNED
| FLOAT
| DOUBLE
| VOID
| uTYPEOF parameters_clause
| TYPEOF parameters_clause
'''
pass
#
# The over-general use of declaration_expression to cover decl-specifier-seq_opt declarator in a function-definition means that
# class X { };
# could be a function-definition or a class-specifier.
# enum X { };
# could be a function-definition or an enum-specifier.
# The function-definition is not syntactically valid so resolving the false conflict in favour of the
# elaborated_type_specifier is correct.
#
def p_elaborated_type_specifier(p):
'''elaborated_type_specifier : class_key scoped_id %prec SHIFT_THERE
| elaborated_enum_specifier
| TYPENAME scoped_id
'''
pass
def p_elaborated_enum_specifier(p):
'''elaborated_enum_specifier : ENUM scoped_id %prec SHIFT_THERE
'''
pass
def p_enum_specifier(p):
'''enum_specifier : ENUM scoped_id enumerator_clause
| ENUM enumerator_clause
'''
pass
def p_enumerator_clause(p):
'''enumerator_clause : LBRACE enumerator_list_ecarb
| LBRACE enumerator_list enumerator_list_ecarb
| LBRACE enumerator_list ',' enumerator_definition_ecarb
'''
pass
def p_enumerator_list_ecarb(p):
'''enumerator_list_ecarb : RBRACE
'''
pass
def p_enumerator_definition_ecarb(p):
'''enumerator_definition_ecarb : RBRACE
'''
pass
def p_enumerator_definition_filler(p):
'''enumerator_definition_filler : empty
'''
pass
def p_enumerator_list_head(p):
'''enumerator_list_head : enumerator_definition_filler
| enumerator_list ',' enumerator_definition_filler
'''
pass
def p_enumerator_list(p):
'''enumerator_list : enumerator_list_head enumerator_definition
'''
pass
def p_enumerator_definition(p):
'''enumerator_definition : enumerator
| enumerator '=' constant_expression
'''
pass
def p_enumerator(p):
'''enumerator : identifier
'''
pass
def p_namespace_definition(p):
'''namespace_definition : NAMESPACE scoped_id push_scope compound_declaration
| NAMESPACE push_scope compound_declaration
'''
global _parse_info
scope = _parse_info.pop_scope()
def p_namespace_alias_definition(p):
'''namespace_alias_definition : NAMESPACE scoped_id '=' scoped_id ';'
'''
pass
def p_push_scope(p):
'''push_scope : empty'''
global _parse_info
if p[-2] == "namespace":
scope=p[-1]
else:
scope=""
_parse_info.push_scope(scope,"namespace")
def p_using_declaration(p):
'''using_declaration : USING declarator_id ';'
| USING TYPENAME declarator_id ';'
'''
pass
def p_using_directive(p):
'''using_directive : USING NAMESPACE scoped_id ';'
'''
pass
# '''asm_definition : ASM '(' StringLiteral ')' ';'
def p_asm_definition(p):
'''asm_definition : ASM '(' nonparen_seq_opt ')' ';'
'''
pass
def p_linkage_specification(p):
'''linkage_specification : EXTERN CLiteral declaration
| EXTERN CLiteral compound_declaration
| EXTERN CppLiteral declaration
| EXTERN CppLiteral compound_declaration
'''
pass
#---------------------------------------------------------------------------------------------------
# A.7 Declarators
#---------------------------------------------------------------------------------------------------
#
# init-declarator is named init_declaration to reflect the embedded decl-specifier-seq_opt
#
def p_init_declarations(p):
'''init_declarations : assignment_expression ',' init_declaration
| init_declarations ',' init_declaration
'''
p[0]=get_rest(p)
def p_init_declaration(p):
'''init_declaration : assignment_expression
'''
p[0]=get_rest(p)
def p_star_ptr_operator(p):
'''star_ptr_operator : '*'
| star_ptr_operator cv_qualifier
'''
pass
def p_nested_ptr_operator(p):
'''nested_ptr_operator : star_ptr_operator
| id_scope nested_ptr_operator
'''
pass
def p_ptr_operator(p):
'''ptr_operator : '&'
| nested_ptr_operator
| global_scope nested_ptr_operator
'''
pass
def p_ptr_operator_seq(p):
'''ptr_operator_seq : ptr_operator
| ptr_operator ptr_operator_seq
'''
pass
#
# Independently coded to localise the shift-reduce conflict: sharing just needs another %prec
#
def p_ptr_operator_seq_opt(p):
'''ptr_operator_seq_opt : empty %prec SHIFT_THERE
| ptr_operator ptr_operator_seq_opt
'''
pass
def p_cv_qualifier_seq_opt(p):
'''cv_qualifier_seq_opt : empty
| cv_qualifier_seq_opt cv_qualifier
'''
pass
# TODO: verify that we should include attributes here
def p_cv_qualifier(p):
'''cv_qualifier : CONST
| VOLATILE
| attributes
'''
pass
def p_type_id(p):
'''type_id : type_specifier abstract_declarator_opt
| type_specifier type_id
'''
pass
def p_abstract_declarator_opt(p):
'''abstract_declarator_opt : empty
| ptr_operator abstract_declarator_opt
| direct_abstract_declarator
'''
pass
def p_direct_abstract_declarator_opt(p):
'''direct_abstract_declarator_opt : empty
| direct_abstract_declarator
'''
pass
def p_direct_abstract_declarator(p):
'''direct_abstract_declarator : direct_abstract_declarator_opt parenthesis_clause
| direct_abstract_declarator_opt LBRACKET RBRACKET
| direct_abstract_declarator_opt LBRACKET bexpression RBRACKET
'''
pass
def p_parenthesis_clause(p):
'''parenthesis_clause : parameters_clause cv_qualifier_seq_opt
| parameters_clause cv_qualifier_seq_opt exception_specification
'''
p[0] = ['(',')']
def p_parameters_clause(p):
'''parameters_clause : '(' condition_opt ')'
'''
p[0] = ['(',')']
#
# A typed abstract qualifier such as
# Class * ...
# looks like a multiply, so pointers are parsed as their binary operation equivalents that
# ultimately terminate with a degenerate right hand term.
#
def p_abstract_pointer_declaration(p):
'''abstract_pointer_declaration : ptr_operator_seq
| multiplicative_expression star_ptr_operator ptr_operator_seq_opt
'''
pass
def p_abstract_parameter_declaration(p):
'''abstract_parameter_declaration : abstract_pointer_declaration
| and_expression '&'
| and_expression '&' abstract_pointer_declaration
'''
pass
def p_special_parameter_declaration(p):
'''special_parameter_declaration : abstract_parameter_declaration
| abstract_parameter_declaration '=' assignment_expression
| ELLIPSIS
'''
pass
def p_parameter_declaration(p):
'''parameter_declaration : assignment_expression
| special_parameter_declaration
| decl_specifier_prefix parameter_declaration
'''
pass
#
# function_definition includes constructor, destructor, implicit int definitions too. A local destructor is successfully parsed as a function-declaration but the ~ was treated as a unary operator. constructor_head is the prefix ambiguity between a constructor and a member-init-list starting with a bit-field.
#
def p_function_definition(p):
'''function_definition : ctor_definition
| func_definition
'''
pass
def p_func_definition(p):
'''func_definition : assignment_expression function_try_block
| assignment_expression function_body
| decl_specifier_prefix func_definition
'''
global _parse_info
if p[2] is not None and p[2][0] == '{':
decl = flatten(p[1])
#print "HERE",decl
if decl[-1] == ')':
decl=decl[-3]
else:
decl=decl[-1]
p[0] = decl
if decl != "operator":
_parse_info.add_function(decl)
else:
p[0] = p[2]
def p_ctor_definition(p):
'''ctor_definition : constructor_head function_try_block
| constructor_head function_body
| decl_specifier_prefix ctor_definition
'''
if p[2] is None or p[2][0] == "try" or p[2][0] == '{':
p[0]=p[1]
else:
p[0]=p[1]
def p_constructor_head(p):
'''constructor_head : bit_field_init_declaration
| constructor_head ',' assignment_expression
'''
p[0]=p[1]
def p_function_try_block(p):
'''function_try_block : TRY function_block handler_seq
'''
global noExceptionLogic
noExceptionLogic=False
p[0] = ['try']
def p_function_block(p):
'''function_block : ctor_initializer_opt function_body
'''
pass
def p_function_body(p):
'''function_body : LBRACE nonbrace_seq_opt RBRACE
'''
p[0] = ['{','}']
def p_initializer_clause(p):
'''initializer_clause : assignment_expression
| braced_initializer
'''
pass
def p_braced_initializer(p):
'''braced_initializer : LBRACE initializer_list RBRACE
| LBRACE initializer_list ',' RBRACE
| LBRACE RBRACE
'''
pass
def p_initializer_list(p):
'''initializer_list : initializer_clause
| initializer_list ',' initializer_clause
'''
pass
#---------------------------------------------------------------------------------------------------
# A.8 Classes
#---------------------------------------------------------------------------------------------------
#
# An anonymous bit-field declaration may look very like inheritance:
# const int B = 3;
# class A : B ;
# The two usages are too distant to try to create and enforce a common prefix so we have to resort to
# a parser hack by backtracking. Inheritance is much the most likely so we mark the input stream context
# and try to parse a base-clause. If we successfully reach a { the base-clause is ok and inheritance was
# the correct choice so we unmark and continue. If we fail to find the { an error token causes
# back-tracking to the alternative parse in elaborated_type_specifier which regenerates the : and
# declares unconditional success.
#
def p_class_specifier_head(p):
'''class_specifier_head : class_key scoped_id ':' base_specifier_list LBRACE
| class_key ':' base_specifier_list LBRACE
| class_key scoped_id LBRACE
| class_key LBRACE
'''
global _parse_info
base_classes=[]
if len(p) == 6:
scope = p[2]
base_classes = p[4]
elif len(p) == 4:
scope = p[2]
elif len(p) == 5:
base_classes = p[3]
else:
scope = ""
_parse_info.push_scope(scope,p[1],base_classes)
def p_class_key(p):
'''class_key : CLASS
| STRUCT
| UNION
'''
p[0] = p[1]
def p_class_specifier(p):
'''class_specifier : class_specifier_head member_specification_opt RBRACE
'''
scope = _parse_info.pop_scope()
def p_member_specification_opt(p):
'''member_specification_opt : empty
| member_specification_opt member_declaration
'''
pass
def p_member_declaration(p):
'''member_declaration : accessibility_specifier
| simple_member_declaration
| function_definition
| using_declaration
| template_declaration
'''
p[0] = get_rest(p)
#print "Decl",get_rest(p)
#
# The generality of constructor names (there need be no parenthesised argument list) means that that
# name : f(g), h(i)
# could be the start of a constructor or the start of an anonymous bit-field. An ambiguity is avoided by
# parsing the ctor-initializer of a function_definition as a bit-field.
#
def p_simple_member_declaration(p):
'''simple_member_declaration : ';'
| assignment_expression ';'
| constructor_head ';'
| member_init_declarations ';'
| decl_specifier_prefix simple_member_declaration
'''
global _parse_info
decl = flatten(get_rest(p))
if len(decl) >= 4 and decl[-3] == "(":
_parse_info.add_function(decl[-4])
def p_member_init_declarations(p):
'''member_init_declarations : assignment_expression ',' member_init_declaration
| constructor_head ',' bit_field_init_declaration
| member_init_declarations ',' member_init_declaration
'''
pass
def p_member_init_declaration(p):
'''member_init_declaration : assignment_expression
| bit_field_init_declaration
'''
pass
def p_accessibility_specifier(p):
'''accessibility_specifier : access_specifier ':'
'''
pass
def p_bit_field_declaration(p):
'''bit_field_declaration : assignment_expression ':' bit_field_width
| ':' bit_field_width
'''
if len(p) == 4:
p[0]=p[1]
def p_bit_field_width(p):
'''bit_field_width : logical_or_expression
| logical_or_expression '?' bit_field_width ':' bit_field_width
'''
pass
def p_bit_field_init_declaration(p):
'''bit_field_init_declaration : bit_field_declaration
| bit_field_declaration '=' initializer_clause
'''
pass
#---------------------------------------------------------------------------------------------------
# A.9 Derived classes
#---------------------------------------------------------------------------------------------------
def p_base_specifier_list(p):
'''base_specifier_list : base_specifier
| base_specifier_list ',' base_specifier
'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]+[p[3]]
def p_base_specifier(p):
'''base_specifier : scoped_id
| access_specifier base_specifier
| VIRTUAL base_specifier
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_access_specifier(p):
'''access_specifier : PRIVATE
| PROTECTED
| PUBLIC
'''
pass
#---------------------------------------------------------------------------------------------------
# A.10 Special member functions
#---------------------------------------------------------------------------------------------------
def p_conversion_function_id(p):
'''conversion_function_id : OPERATOR conversion_type_id
'''
p[0] = ['operator']
def p_conversion_type_id(p):
'''conversion_type_id : type_specifier ptr_operator_seq_opt
| type_specifier conversion_type_id
'''
pass
#
# Ctor-initialisers can look like a bit field declaration, given the generalisation of names:
# Class(Type) : m1(1), m2(2) { }
# NonClass(bit_field) : int(2), second_variable, ...
# The grammar below is used within a function_try_block or function_definition.
# See simple_member_declaration for use in normal member function_definition.
#
def p_ctor_initializer_opt(p):
'''ctor_initializer_opt : empty
| ctor_initializer
'''
pass
def p_ctor_initializer(p):
'''ctor_initializer : ':' mem_initializer_list
'''
pass
def p_mem_initializer_list(p):
'''mem_initializer_list : mem_initializer
| mem_initializer_list_head mem_initializer
'''
pass
def p_mem_initializer_list_head(p):
'''mem_initializer_list_head : mem_initializer_list ','
'''
pass
def p_mem_initializer(p):
'''mem_initializer : mem_initializer_id '(' expression_list_opt ')'
'''
pass
def p_mem_initializer_id(p):
'''mem_initializer_id : scoped_id
'''
pass
#---------------------------------------------------------------------------------------------------
# A.11 Overloading
#---------------------------------------------------------------------------------------------------
def p_operator_function_id(p):
'''operator_function_id : OPERATOR operator
| OPERATOR '(' ')'
| OPERATOR LBRACKET RBRACKET
| OPERATOR '<'
| OPERATOR '>'
| OPERATOR operator '<' nonlgt_seq_opt '>'
'''
p[0] = ["operator"]
#
# It is not clear from the ANSI standard whether spaces are permitted in delete[]. If not then it can
# be recognised and returned as DELETE_ARRAY by the lexer. Assuming spaces are permitted there is an
# ambiguity created by the over generalised nature of expressions. operator new is a valid delarator-id
# which we may have an undimensioned array of. Semantic rubbish, but syntactically valid. Since the
# array form is covered by the declarator consideration we can exclude the operator here. The need
# for a semantic rescue can be eliminated at the expense of a couple of shift-reduce conflicts by
# removing the comments on the next four lines.
#
def p_operator(p):
'''operator : NEW
| DELETE
| '+'
| '-'
| '*'
| '/'
| '%'
| '^'
| '&'
| '|'
| '~'
| '!'
| '='
| ASS_ADD
| ASS_SUB
| ASS_MUL
| ASS_DIV
| ASS_MOD
| ASS_XOR
| ASS_AND
| ASS_OR
| SHL
| SHR
| ASS_SHR
| ASS_SHL
| EQ
| NE
| LE
| GE
| LOG_AND
| LOG_OR
| INC
| DEC
| ','
| ARROW_STAR
| ARROW
'''
p[0]=p[1]
# | IF
# | SWITCH
# | WHILE
# | FOR
# | DO
def p_reserved(p):
'''reserved : PRIVATE
| CLiteral
| CppLiteral
| IF
| SWITCH
| WHILE
| FOR
| DO
| PROTECTED
| PUBLIC
| BOOL
| CHAR
| DOUBLE
| FLOAT
| INT
| LONG
| SHORT
| SIGNED
| UNSIGNED
| VOID
| WCHAR_T
| CLASS
| ENUM
| NAMESPACE
| STRUCT
| TYPENAME
| UNION
| CONST
| VOLATILE
| AUTO
| EXPLICIT
| EXPORT
| EXTERN
| FRIEND
| INLINE
| MUTABLE
| REGISTER
| STATIC
| TEMPLATE
| TYPEDEF
| USING
| VIRTUAL
| ASM
| BREAK
| CASE
| CATCH
| CONST_CAST
| CONTINUE
| DEFAULT
| DYNAMIC_CAST
| ELSE
| FALSE
| GOTO
| OPERATOR
| REINTERPRET_CAST
| RETURN
| SIZEOF
| STATIC_CAST
| THIS
| THROW
| TRUE
| TRY
| TYPEID
| ATTRIBUTE
| CDECL
| TYPEOF
| uTYPEOF
'''
if p[1] in ('try', 'catch', 'throw'):
global noExceptionLogic
noExceptionLogic=False
#---------------------------------------------------------------------------------------------------
# A.12 Templates
#---------------------------------------------------------------------------------------------------
def p_template_declaration(p):
'''template_declaration : template_parameter_clause declaration
| EXPORT template_declaration
'''
pass
def p_template_parameter_clause(p):
'''template_parameter_clause : TEMPLATE '<' nonlgt_seq_opt '>'
'''
pass
#
# Generalised naming makes identifier a valid declaration, so TEMPLATE identifier is too.
# The TEMPLATE prefix is therefore folded into all names, parenthesis_clause and decl_specifier_prefix.
#
# explicit_instantiation: TEMPLATE declaration
#
def p_explicit_specialization(p):
'''explicit_specialization : TEMPLATE '<' '>' declaration
'''
pass
#---------------------------------------------------------------------------------------------------
# A.13 Exception Handling
#---------------------------------------------------------------------------------------------------
def p_handler_seq(p):
'''handler_seq : handler
| handler handler_seq
'''
pass
def p_handler(p):
'''handler : CATCH '(' exception_declaration ')' compound_statement
'''
global noExceptionLogic
noExceptionLogic=False
def p_exception_declaration(p):
'''exception_declaration : parameter_declaration
'''
pass
def p_throw_expression(p):
'''throw_expression : THROW
| THROW assignment_expression
'''
global noExceptionLogic
noExceptionLogic=False
def p_exception_specification(p):
'''exception_specification : THROW '(' ')'
| THROW '(' type_id_list ')'
'''
global noExceptionLogic
noExceptionLogic=False
def p_type_id_list(p):
'''type_id_list : type_id
| type_id_list ',' type_id
'''
pass
#---------------------------------------------------------------------------------------------------
# Misc productions
#---------------------------------------------------------------------------------------------------
def p_nonsemicolon_seq(p):
'''nonsemicolon_seq : empty
| nonsemicolon_seq nonsemicolon
'''
pass
def p_nonsemicolon(p):
'''nonsemicolon : misc
| '('
| ')'
| '<'
| '>'
| LBRACKET nonbracket_seq_opt RBRACKET
| LBRACE nonbrace_seq_opt RBRACE
'''
pass
def p_nonparen_seq_opt(p):
'''nonparen_seq_opt : empty
| nonparen_seq_opt nonparen
'''
pass
def p_nonparen_seq(p):
'''nonparen_seq : nonparen
| nonparen_seq nonparen
'''
pass
def p_nonparen(p):
'''nonparen : misc
| '<'
| '>'
| ';'
| LBRACKET nonbracket_seq_opt RBRACKET
| LBRACE nonbrace_seq_opt RBRACE
'''
pass
def p_nonbracket_seq_opt(p):
'''nonbracket_seq_opt : empty
| nonbracket_seq_opt nonbracket
'''
pass
def p_nonbracket_seq(p):
'''nonbracket_seq : nonbracket
| nonbracket_seq nonbracket
'''
pass
def p_nonbracket(p):
'''nonbracket : misc
| '<'
| '>'
| '('
| ')'
| ';'
| LBRACKET nonbracket_seq_opt RBRACKET
| LBRACE nonbrace_seq_opt RBRACE
'''
pass
def p_nonbrace_seq_opt(p):
'''nonbrace_seq_opt : empty
| nonbrace_seq_opt nonbrace
'''
pass
def p_nonbrace(p):
'''nonbrace : misc
| '<'
| '>'
| '('
| ')'
| ';'
| LBRACKET nonbracket_seq_opt RBRACKET
| LBRACE nonbrace_seq_opt RBRACE
'''
pass
def p_nonlgt_seq_opt(p):
'''nonlgt_seq_opt : empty
| nonlgt_seq_opt nonlgt
'''
pass
def p_nonlgt(p):
'''nonlgt : misc
| '('
| ')'
| LBRACKET nonbracket_seq_opt RBRACKET
| '<' nonlgt_seq_opt '>'
| ';'
'''
pass
def p_misc(p):
'''misc : operator
| identifier
| IntegerLiteral
| CharacterLiteral
| FloatingLiteral
| StringLiteral
| reserved
| '?'
| ':'
| '.'
| SCOPE
| ELLIPSIS
| EXTENSION
'''
pass
def p_empty(p):
'''empty : '''
pass
#
# Compute column.
# input is the input text string
# token is a token instance
#
def _find_column(input,token):
''' TODO '''
i = token.lexpos
while i > 0:
if input[i] == '\n': break
i -= 1
column = (token.lexpos - i)+1
return column
def p_error(p):
if p is None:
tmp = "Syntax error at end of file."
else:
tmp = "Syntax error at token "
if p.type is "":
tmp = tmp + "''"
else:
tmp = tmp + str(p.type)
tmp = tmp + " with value '"+str(p.value)+"'"
tmp = tmp + " in line " + str(lexer.lineno-1)
tmp = tmp + " at column "+str(_find_column(_parsedata,p))
raise IOError( tmp )
#
# The function that performs the parsing
#
def parse_cpp(data=None, filename=None, debug=0, optimize=0, verbose=False, func_filter=None):
#
# Reset global data
#
global lexer
lexer = None
global scope_lineno
scope_lineno = 0
global indentifier_lineno
identifier_lineno = {}
global _parse_info
_parse_info=None
global _parsedata
_parsedata=None
global noExceptionLogic
noExceptionLogic = True
#
if debug > 0:
print("Debugging parse_cpp!")
#
# Always remove the parser.out file, which is generated to create debugging
#
if os.path.exists("parser.out"):
os.remove("parser.out")
#
# Remove the parsetab.py* files. These apparently need to be removed
# to ensure the creation of a parser.out file.
#
if os.path.exists("parsetab.py"):
os.remove("parsetab.py")
if os.path.exists("parsetab.pyc"):
os.remove("parsetab.pyc")
global debugging
debugging=True
#
# Build lexer
#
lexer = lex.lex()
#
# Initialize parse object
#
_parse_info = CppInfo(filter=func_filter)
_parse_info.verbose=verbose
#
# Build yaccer
#
write_table = not os.path.exists("parsetab.py")
yacc.yacc(debug=debug, optimize=optimize, write_tables=write_table)
#
# Parse the file
#
if not data is None:
_parsedata=data
ply_init(_parsedata)
yacc.parse(data,debug=debug)
elif not filename is None:
f = open(filename)
data = f.read()
f.close()
_parsedata=data
ply_init(_parsedata)
yacc.parse(data, debug=debug)
else:
return None
#
if not noExceptionLogic:
_parse_info.noExceptionLogic = False
else:
for key in identifier_lineno:
if 'ASSERT_THROWS' in key:
_parse_info.noExceptionLogic = False
break
_parse_info.noExceptionLogic = True
#
return _parse_info
import sys
if __name__ == '__main__': #pragma: no cover
#
# This MAIN routine parses a sequence of files provided at the command
# line. If '-v' is included, then a verbose parsing output is
# generated.
#
for arg in sys.argv[1:]:
if arg == "-v":
continue
print("Parsing file '"+arg+"'")
if '-v' in sys.argv:
parse_cpp(filename=arg,debug=2,verbose=2)
else:
parse_cpp(filename=arg,verbose=2)
#
# Print the _parse_info object summary for this file.
# This illustrates how class inheritance can be used to
# deduce class members.
#
print(str(_parse_info))
| 33.019955 | 487 | 0.504347 |
316bbb9f047e438a68ce341cdcc6421cb884e96a | 803 | py | Python | output/models/saxon_data/id/id002_xsd/id002.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/saxon_data/id/id002_xsd/id002.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/saxon_data/id/id002_xsd/id002.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Dict, List, Optional
@dataclass
class Para:
class Meta:
name = "para"
value: str = field(
default="",
metadata={
"required": True,
}
)
id_one: Optional[str] = field(
default=None,
metadata={
"name": "id-one",
"type": "Attribute",
}
)
any_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##any",
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
para: List[Para] = field(
default_factory=list,
metadata={
"type": "Element",
"min_occurs": 1,
}
)
| 18.25 | 43 | 0.480697 |
330af0828571cf263b3f32179d072348c8c26f51 | 2,464 | py | Python | layint_scan_api/models/policies.py | LayeredInsight/layint_scan_api_python | bc258b2af5d2211b986e32fedea95fcfc7de80ff | [
"Apache-2.0"
] | 1 | 2018-03-26T23:55:00.000Z | 2018-03-26T23:55:00.000Z | layint_scan_api/models/policies.py | LayeredInsight/layint_scan_api_python | bc258b2af5d2211b986e32fedea95fcfc7de80ff | [
"Apache-2.0"
] | null | null | null | layint_scan_api/models/policies.py | LayeredInsight/layint_scan_api_python | bc258b2af5d2211b986e32fedea95fcfc7de80ff | [
"Apache-2.0"
] | 2 | 2020-11-04T02:56:33.000Z | 2020-11-05T08:12:01.000Z | # coding: utf-8
"""
Layered Insight Scan
Layered Insight Scan performs static vulnerability analysis, license and package compliance. You can find out more about Scan at http://layeredinsight.com.
OpenAPI spec version: 0.9.4
Contact: help@layeredinsight.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Policies(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
Policies - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Policies):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.64 | 160 | 0.534091 |
ad8f42daf6de6428fa2f18a5a2d3759a9f839140 | 1,217 | py | Python | C++/python_test/bifa/test_bifa_score.py | JohnReid/biopsy | 1eeb714ba5b53f2ecf776d865d32e2078cbc0338 | [
"MIT"
] | null | null | null | C++/python_test/bifa/test_bifa_score.py | JohnReid/biopsy | 1eeb714ba5b53f2ecf776d865d32e2078cbc0338 | [
"MIT"
] | null | null | null | C++/python_test/bifa/test_bifa_score.py | JohnReid/biopsy | 1eeb714ba5b53f2ecf776d865d32e2078cbc0338 | [
"MIT"
] | null | null | null | #
# Copyright John Reid 2010, 2011
#
import _biopsy as biopsy
import _bifa as bifa
import numpy as N
print 'BiFa C++ module version %s' % bifa.version()
# make a PSSM
pssm = N.log(
N.array((
(.7, .1, .1, .1),
(.1, .7, .1, .1),
(.1, .1, .7, .1),
(.1, .1, .1, .7),
(.25, .25, .25, .25)
))
)
print pssm
# make sure score is correct when given int sequence
assert 3.05 == bifa.score_word(N.exp(pssm), [0, 1, 2, 3, 4])
# make sure score is correct when given string sequence
assert 3.05 == bifa.score_word(N.exp(pssm), 'acgtn')
# try too short sequence
try: bifa.score_word(pssm, [1, 1, 1, 1])
except RuntimeError: pass
else: assert not "Should have thrown error"
# hide non-int in sequence
try: bifa.score_word(pssm, [1, .25, 1, 1, 1])
except TypeError: pass
else: assert not "Should have thrown error"
# try non-int sequence
try: bifa.score_word(pssm, [.25, .25, .25, .25, .25])
except RuntimeError: pass
else: assert not "Should have thrown error"
print bifa.score_one_strand(pssm, [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
for hit in bifa.score_sequence(pssm, [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]):
print hit.p_binding, hit.location.position, hit.location.positive_strand
| 25.354167 | 76 | 0.634347 |
8844b20b9e0f00a4a313ce5abca342c4862e2740 | 5,334 | py | Python | sdk/storagepool/azure-mgmt-storagepool/azure/mgmt/storagepool/aio/operations/_resource_skus_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/storagepool/azure-mgmt-storagepool/azure/mgmt/storagepool/aio/operations/_resource_skus_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/storagepool/azure-mgmt-storagepool/azure/mgmt/storagepool/aio/operations/_resource_skus_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResourceSkusOperations:
"""ResourceSkusOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~storage_pool_management.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.ResourceSkuListResult"]:
"""Lists available StoragePool resources and skus in an Azure location.
:param location: The location of the resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceSkuListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_pool_management.models.ResourceSkuListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceSkuListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceSkuListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StoragePool/locations/{location}/skus'} # type: ignore
| 46.789474 | 141 | 0.660292 |
f3ef3cdb67d8ebaf5bd7b8d382e51dcba6bceb08 | 2,038 | py | Python | read_structure_step/write.py | paulsaxe/read_structure_step | 335c4eb39ad8556070e769fa9491ec5de22ee455 | [
"BSD-3-Clause"
] | null | null | null | read_structure_step/write.py | paulsaxe/read_structure_step | 335c4eb39ad8556070e769fa9491ec5de22ee455 | [
"BSD-3-Clause"
] | 9 | 2020-01-19T01:14:43.000Z | 2022-01-29T14:25:05.000Z | read_structure_step/write.py | paulsaxe/read_structure_step | 335c4eb39ad8556070e769fa9491ec5de22ee455 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T21:50:37.000Z | 2022-01-14T21:50:37.000Z | """
The public interface to the read_structure_step SEAMM plugin
"""
from . import formats
import os
def write(
file_name,
configurations,
extension=None,
remove_hydrogens="no",
printer=None,
references=None,
bibliography=None,
):
"""
Calls the appropriate functions to parse the requested file.
Parameters
----------
file_name : str
Name of the file
configurations : [Configuration]
The SEAMM configuration(s) to write
extension : str, optional, default: None
The extension, including initial dot, defining the format.
remove_hydrogens : str = "no"
Whether to remove hydrogen atoms before writing the structure to file.
printer : Logger or Printer
A function that prints to the appropriate place, used for progress.
references : ReferenceHandler = None
The reference handler object or None
bibliography : dict
The bibliography as a dictionary.
The list of configurations created.
"""
if type(file_name) is not str:
raise TypeError(
"""write_structure_step: The file name must be a string, but a
%s was given. """
% str(type(file_name))
)
if file_name == "":
raise NameError(
"""write_structure_step: The file name for the structure file
was not specified."""
)
file_name = os.path.abspath(file_name)
if extension is None:
raise NameError("Extension could not be identified")
if extension not in formats.registries.REGISTERED_WRITERS.keys():
raise KeyError(
"write_structure_step: the file format %s was not recognized." % extension
)
writer = formats.registries.REGISTERED_WRITERS[extension]["function"]
writer(
file_name,
configurations,
extension=extension,
remove_hydrogens=remove_hydrogens,
printer=printer,
references=references,
bibliography=bibliography,
)
| 25.475 | 86 | 0.641315 |
4e829194d7c3a424afe350da61ac752e09fb4cd2 | 715 | py | Python | 1-100 game.py | mkingopng/game_theory | a7027e490d745d9e793155205666a85cd0e5dde4 | [
"MIT"
] | null | null | null | 1-100 game.py | mkingopng/game_theory | a7027e490d745d9e793155205666a85cd0e5dde4 | [
"MIT"
] | null | null | null | 1-100 game.py | mkingopng/game_theory | a7027e490d745d9e793155205666a85cd0e5dde4 | [
"MIT"
] | null | null | null | """
"""
import numpy as np
import nashpy as nash
from random import *
# can we model the 1 - 100 game with 100 players
chosen_number = {}
players = range(1, 100)
number = randint(1, 100)
# print(numbers)
for player in players:
chosen_number.append({player, number})
print(number)
pyBNEq
# player_1 = np.array([[3, 4], [2, 4]]) # the row player
# player_2 = np.array([[10, 7], [0, 2]]) # the column player
# game_11 = nash.Game(player_1, player_2)
# print(game_11)
#
# # find the Nash equilibrium with support enumeration
# equilibria= game_11.support_enumeration()
# for eq in equilibria:
# print(f"the equilibria for q11 are: {eq}")
#
# # answer: true, AA is one of 2 Nash equilibria in this game
| 21.666667 | 61 | 0.682517 |
24a9511c888a97deda6a2288d736592a5e0ab56f | 1,327 | py | Python | django/contrib/gis/tests/maps/tests.py | PirosB3/django | 9b729ddd8f2040722971ccfb3b12f7d8162633d1 | [
"BSD-3-Clause"
] | 2 | 2015-01-21T15:45:07.000Z | 2015-02-21T02:38:13.000Z | env/lib/python2.7/site-packages/django/contrib/gis/tests/maps/tests.py | luiscarlosgph/nas | e5acee61e8bbf12c34785fe971ce7df8dee775d4 | [
"MIT"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | env/lib/python2.7/site-packages/django/contrib/gis/tests/maps/tests.py | luiscarlosgph/nas | e5acee61e8bbf12c34785fe971ce7df8dee775d4 | [
"MIT"
] | 1 | 2015-02-21T07:59:08.000Z | 2015-02-21T07:59:08.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.geos import HAS_GEOS
from django.test import TestCase
from django.test.utils import override_settings
GOOGLE_MAPS_API_KEY = 'XXXX'
@skipUnless(HAS_GEOS, 'Geos is required.')
class GoogleMapsTest(TestCase):
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_google_map_scripts(self):
"""
Testing GoogleMap.scripts() output. See #20773.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap
google_map = GoogleMap()
scripts = google_map.scripts
self.assertIn(GOOGLE_MAPS_API_KEY, scripts)
self.assertIn("new GMap2", scripts)
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_unicode_in_google_maps(self):
"""
Test that GoogleMap doesn't crash with non-ASCII content.
"""
from django.contrib.gis.geos import Point
from django.contrib.gis.maps.google.gmap import GoogleMap, GMarker
center = Point(6.146805, 46.227574)
marker = GMarker(center,
title='En français !')
google_map = GoogleMap(center=center, zoom=18, markers=[marker])
self.assertIn("En français", google_map.scripts)
| 32.365854 | 74 | 0.69254 |
8f6167912a1860e96ed43e77da0e52cc089bb5e3 | 129,521 | py | Python | tensorflow/python/framework/ops_test.py | Elizaaaaa/tensorflow | a15068b1a67175d6e95f33f8ec119ae9017f8534 | [
"Apache-2.0"
] | 2 | 2019-09-09T06:32:20.000Z | 2019-10-21T13:20:57.000Z | tensorflow/python/framework/ops_test.py | Elizaaaaa/tensorflow | a15068b1a67175d6e95f33f8ec119ae9017f8534 | [
"Apache-2.0"
] | 1 | 2019-08-15T02:49:21.000Z | 2019-09-04T10:10:59.000Z | tensorflow/python/framework/ops_test.py | Elizaaaaa/tensorflow | a15068b1a67175d6e95f33f8ec119ae9017f8534 | [
"Apache-2.0"
] | 1 | 2020-05-22T09:23:59.000Z | 2020-05-22T09:23:59.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import gc
import numpy as np
import os
import threading
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
ops._set_call_cpp_shape_fn(common_shapes.call_cpp_shape_fn)
class ResourceTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBuildGraph(self):
with self.cached_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
@test_util.run_deprecated_v1
def testInitialize(self):
with self.cached_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
if not context.executing_eagerly():
self.skipTest("Eager-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "Cannot iterate"):
next(iter(t))
def testIterableGraph(self):
if context.executing_eagerly():
self.skipTest("Graph-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "iterating.*not allowed in Graph"):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
next(iter(t))
def testImplicitBool(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.bool])
t = op.outputs[0]
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*not allowed in Graph"):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
bool(t)
def testAddShape(self):
with self.cached_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
@test_util.run_deprecated_v1
def testUnknownDim(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.TensorShape([]), c.shape)
@test_util.run_deprecated_v1
def testShapeFunctionError(self):
with self.cached_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegexp(
ValueError, r"Dimensions must be equal, but are 2 and 5 for 'add' "
r"\(op: 'Add(V2)?'\) with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
def testNumpyArray(self):
with ops.Graph().as_default():
x = array_ops.ones((3, 4), name="test_ones")
with self.assertRaisesRegexp(NotImplementedError,
r"Cannot convert a symbolic.+test_ones"):
np.array(x)
with self.assertRaisesRegexp(TypeError, "not well defined.+test_ones"):
len(x)
# EagerTensors should still behave as numpy arrays.
with context.eager_mode():
x = array_ops.ones((3, 4))
self.assertAllEqual(x, np.ones((3, 4)))
self.assertAllEqual(np.array(x), np.ones((3, 4)))
self.assertEqual(len(x), 3)
def testRef(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x1.experimental_ref())
self.assertEqual(x2.experimental_ref(), x2.experimental_ref())
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
self.assertEqual(y.experimental_ref(), y.experimental_ref())
self.assertEqual(z.experimental_ref(), z.experimental_ref())
self.assertEqual(w.experimental_ref(), w.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), y.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), z.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), w.experimental_ref())
self.assertNotEqual(y.experimental_ref(), z.experimental_ref())
self.assertNotEqual(y.experimental_ref(), w.experimental_ref())
self.assertNotEqual(z.experimental_ref(), w.experimental_ref())
def testRefDeref(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertIs(x1, x1.experimental_ref().deref())
self.assertIs(x2, x2.experimental_ref().deref())
self.assertIs(x1, x2.experimental_ref().deref())
self.assertIs(x2, x1.experimental_ref().deref())
self.assertIs(y, y.experimental_ref().deref())
self.assertIs(z, z.experimental_ref().deref())
self.assertIsNot(x1, y.experimental_ref().deref())
self.assertIsNot(x1, z.experimental_ref().deref())
self.assertIsNot(x1, w.experimental_ref().deref())
self.assertIsNot(y, z.experimental_ref().deref())
self.assertIsNot(y, w.experimental_ref().deref())
self.assertIsNot(z, w.experimental_ref().deref())
def testRefInSet(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_set = {
x1.experimental_ref(),
x2.experimental_ref(),
y.experimental_ref(),
z.experimental_ref(),
w.experimental_ref(),
}
self.assertEqual(len(tensor_set), 4)
self.assertIn(x1.experimental_ref(), tensor_set)
self.assertIn(x2.experimental_ref(), tensor_set)
self.assertIn(y.experimental_ref(), tensor_set)
self.assertIn(z.experimental_ref(), tensor_set)
self.assertIn(w.experimental_ref(), tensor_set)
def testRefInDict(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_dict = {
x1.experimental_ref(): "x1",
y.experimental_ref(): "y",
z.experimental_ref(): "z",
w.experimental_ref(): "w",
}
self.assertEqual(len(tensor_dict), 4)
# Overwriting x1
tensor_dict[x2.experimental_ref()] = "x2"
self.assertEqual(len(tensor_dict), 4)
self.assertEqual(tensor_dict[x1.experimental_ref()], "x2")
self.assertEqual(tensor_dict[x2.experimental_ref()], "x2")
self.assertEqual(tensor_dict[y.experimental_ref()], "y")
self.assertEqual(tensor_dict[z.experimental_ref()], "z")
self.assertEqual(tensor_dict[w.experimental_ref()], "w")
def testTensorRefStrong(self):
x = constant_op.constant(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
def testVariableRefStrong(self):
x = variables.Variable(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
class IndexedSlicesTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testToTensor(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
dense_shape = constant_op.constant([3, 2])
x = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]])
@test_util.run_gpu_only
def testEagerCopy(self):
with context.eager_mode():
var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor")
with backprop.GradientTape() as tape:
a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1])
b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1])
r = special_math_ops.einsum("ij,ij->i", a, b)
g = tape.gradient(r, [var])[0]
values = g.values if isinstance(g, ops.IndexedSlices) else g
self.assertAllEqual(values.get_shape(), [4, 1])
@test_util.run_deprecated_v1
def testNegation(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values.eval(), [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.run_deprecated_v1
def testScalarMul(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values.eval(), [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1._shape.rank, None)
self.assertEqual(spec1._values_dtype, dtypes.float32)
self.assertEqual(spec1._indices_dtype, dtypes.int64)
self.assertEqual(spec1._dense_shape_dtype, None)
self.assertEqual(spec1._indices_shape.as_list(), [None])
spec2 = indexed_slices.IndexedSlicesSpec([None, None], dtypes.string,
dtypes.int32, dtypes.int64, [10])
self.assertEqual(spec2._shape.as_list(), [None, None])
self.assertEqual(spec2._values_dtype, dtypes.string)
self.assertEqual(spec2._indices_dtype, dtypes.int32)
self.assertEqual(spec2._dense_shape_dtype, dtypes.int64)
self.assertEqual(spec2._indices_shape.as_list(), [10])
def testValueType(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1.value_type, ops.IndexedSlices)
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32,
dtypes.int64, None, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int64),
(tensor_shape.TensorShape(None), dtypes.int32, dtypes.int64,
dtypes.int64, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(indices_shape=[100]),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([100]))),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(dtype=dtypes.string), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
)),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.string, dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec([None, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32,
indices_shape=[20]), (
tensor_spec.TensorSpec([20, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([20], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
"spec": indexed_slices.IndexedSlicesSpec(),
"values": [3.0, 5.0],
"indices": [5, 10]
},
{
"spec":
indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32),
"values": [3.0, 5.0],
"indices": [5, 10],
"dense_shape": [100]
},
])
def testToFromComponents(self, spec, indices, values, dense_shape=None):
x = ops.IndexedSlices(indices, values, dense_shape)
actual_components = spec._to_components(x)
if dense_shape is None:
self.assertAllTensorsEqual(actual_components, [indices, values])
else:
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(x.indices, st_reconstructed.indices)
self.assertAllEqual(x.values, st_reconstructed.values)
if dense_shape is None:
self.assertIs(st_reconstructed.dense_shape, None)
else:
self.assertAllEqual(x.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("IndexedSlicesValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([3, 8])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec1 = indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32)
st1 = spec1._from_components((values, indices, dense_shape))
self.assertIsInstance(st1, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st1.indices, indices)
self.assertAllEqual(st1.values, values)
self.assertAllEqual(st1.dense_shape, dense_shape)
spec2 = indexed_slices.IndexedSlicesSpec()
st2 = spec2._from_components((values, indices))
self.assertIsInstance(st2, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st2.indices, indices)
self.assertAllEqual(st2.values, values)
self.assertIs(st2.dense_shape, None)
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEquals([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEquals([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToLongLongTensorType(self):
tensor = ops.convert_to_tensor(
# Get a numpy array of dtype NPY_LONGLONG
np.prod(constant_op.constant([1])._shape_tuple()))
self.assertEqual(dtypes.int64, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegexp(TypeError,
r"Can't convert Operation '.*' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEquals will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegexp(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [y, y])
self.assertEquals(x.consumers(), [])
self.assertEquals(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."
):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
if forward_compat.forward_compatible(2019, 8, 23):
@eager_function.defun
def test():
output = control_flow_ops.while_loop(lambda x: x < 3, lambda x: x + 1,
[1])
while_op = output.op
self.assertEqual(while_op.type, "StatelessWhile")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T",
[t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertEqual(len(while_op.inputs), orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertEqual(len(x.op.op_def.input_arg), 0)
self.assertEqual(len(x.op.op_def.output_arg), 1)
self.assertRegexpMatches(z.op.op_def.name, "Add(V2)?")
self.assertEqual(len(z.op.op_def.input_arg), 2)
self.assertEqual(len(z.op.op_def.output_arg), 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegexp(
AttributeError, "'tuple' object has no attribute 'append'"):
op.inputs.append(None)
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.TensorShape([2, 3]))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
@test_util.run_deprecated_v1
def testNameAndVariableScope(self):
with self.cached_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testUniqueNameCaseInsensitivity(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("Foo_1", g.unique_name("Foo"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
with g.name_scope("Bar"):
self.assertEqual("Bar_1/foo", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testEagerBackingDevice(self):
with context.eager_mode():
with ops.device("/device:CPU:0"):
t = constant_op.constant(1.0)
self.assertRegexpMatches(t.device, "/device:CPU:0")
self.assertRegexpMatches(t.backing_device, "/device:CPU:0")
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNestingErrorGraph(self):
g = ops.Graph()
scope = g.device("/device:GPU:8")
scope.__enter__()
with g.device("/device:GPU:9"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNestingErrorEager(self):
with context.eager_mode():
scope = ops.device("/device:CPU:0")
scope.__enter__()
with ops.device(None):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEquals("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEquals("foo" + s + "/FloatOutput_1", t.result[1].name)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def test_defun(self):
with context.eager_mode():
@eager_function.defun
def defun():
ops.add_to_collection("int", 1)
ops.add_to_collection("tensor", constant_op.constant(2))
@eager_function.defun
def inner_defun():
self.assertEqual(ops.get_collection("int"), [1])
three = ops.get_collection("tensor")[0] + ops.get_collection("int")[0]
ops.add_to_collection("int", 2)
self.assertEqual(ops.get_collection("int"), [1, 2])
ops.add_to_collection("foo", "bar")
self.assertEqual(ops.get_collection("foo"), ["bar"])
return three
self.assertEqual(ops.get_collection("int"), [1])
three = inner_defun()
self.assertEqual(ops.get_collection("int"), [1])
self.assertEqual(ops.get_collection("foo"), [])
return three
three = defun()
self.assertEqual(three.numpy(), 3)
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("") as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/") as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("") as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4") as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//") as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6") as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/") as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//") as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c") as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c") as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default") as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2") as scope2:
self.assertEqual(scope2, "default/default2/")
@test_util.run_in_graph_and_eager_modes
def testNameScopeV2IsReEntrant(self):
foo = ops.name_scope_v2("foo")
bar = ops.name_scope_v2("bar")
with foo as scope_name:
self.assertEqual("foo/", scope_name)
with foo as scope_name:
self.assertEqual("foo/foo/", scope_name)
with bar as scope_name:
self.assertEqual("foo/bar/", scope_name)
with foo as scope_name:
self.assertEqual("foo/bar/foo/", scope_name)
with bar as scope_name:
self.assertEqual("bar/", scope_name)
@test_util.run_deprecated_v1
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
@test_util.run_deprecated_v1
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
@test_util.run_deprecated_v1
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with self.assertRaises(TypeError):
with ops.name_scope(scope_name, [a, b]):
pass
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
@test_util.run_deprecated_v1
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
@test_util.run_deprecated_v1
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
@test_util.run_deprecated_v1
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertEqual(len(g3.get_operations()), 1)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(3)
return self.v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.cached_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegexp(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(1)
return self.v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
self.v0 = resource_variable_ops.ResourceVariable(0)
return self.v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with fn_graph.as_default():
self.assertEqual(len(ops._default_graph_stack.stack), 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertEqual(len(ops._default_graph_stack.stack), 1)
# Note that the global graph is _not_ on the graph stack.
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner"), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
RuntimeError, "Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaisesRegexp(
TypeError, "Graph tensors"):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner"), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManagerCancelsEager(self):
with context.eager_mode():
with ops.Graph().as_default():
self.assertFalse(context.executing_eagerly())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
g0 = ops.Graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self.assertTrue(ops.has_default_graph())
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
self.assertFalse(ops.has_default_graph())
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
@test_util.run_deprecated_v1
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
self.evaluate(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual((None, None), self._get_test_attrs())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class DeviceStackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicDeviceAssignmentMetadata(self):
def device_func(unused_op):
return "/cpu:*"
const_zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
const_two = constant_op.constant([2.0], name="two")
with ops.device(device_func):
const_three = constant_op.constant(3.0, name="three")
self.assertEqual(0, len(const_zero.op._device_assignments))
one_list = const_one.op._device_assignments
self.assertEqual(1, len(one_list))
self.assertEqual("/cpu", one_list[0].obj)
self.assertEqual("ops_test.py", os.path.basename(one_list[0].filename))
two_list = const_two.op._device_assignments
self.assertEqual(2, len(two_list))
devices = [t.obj for t in two_list]
self.assertEqual(set(["/cpu", "/cpu:0"]), set(devices))
three_list = const_three.op._device_assignments
self.assertEqual(1, len(three_list))
func_description = three_list[0].obj
expected_regex = r"device_func<.*ops_test.py, [0-9]+"
self.assertRegexpMatches(func_description, expected_regex)
@test_util.run_deprecated_v1
def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.get_default_graph().device("/cpu"):
const_two = constant_op.constant([2.0], name="two")
one_metadata = const_one.op._device_assignments[0]
two_metadata = const_two.op._device_assignments[0]
# Verify both types of device assignment return the right stack info.
self.assertRegexpMatches("ops_test.py",
os.path.basename(one_metadata.filename))
self.assertEqual(one_metadata.filename, two_metadata.filename)
self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
class ColocationGroupTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)
@def_function.function
def f():
with ops.colocate_with(v):
return array_ops.ones([], name="output")
f()
graph_def = f.get_concrete_function().graph.as_graph_def()
wrap_function.function_from_graph_def(graph_def, [], ["output"])
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegexp(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegexp(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
class _TupleTensor(composite_tensor.CompositeTensor):
"""`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading."""
def __init__(self, components):
super(_TupleTensor, self).__init__()
self._components = tuple(ops.convert_to_tensor(c) for c in components)
@property
def _type_spec(self):
return _TupleTensorSpec(type_spec.from_value(c) for c in self._components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
class _TupleTensorSpec(type_spec.TypeSpec):
def __init__(self, specs):
self._specs = specs
value_type = property(lambda self: _TupleTensor)
_component_specs = property(lambda self: self._specs)
def _to_components(self, value):
return value._components
def _from_components(self, components):
return _TupleTensor(*components)
def _serialize(self):
return (self._specs,)
class _MyTuple(object):
"""Pretend user-side class for `ConvertToCompositeTensorTest ."""
def __init__(self, components):
super(_MyTuple, self).__init__()
self._components = tuple(components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
ops.register_tensor_conversion_function(
_MyTuple, conversion_func=lambda x, *_, **__: _TupleTensor(x))
class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
def testCompositeTensorConversion(self):
"""Tests that a user can register a CompositeTensor converter."""
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
y = ops.convert_to_tensor_or_composite(x)
self.assertFalse(tensor_util.is_tensor(y))
self.assertIsInstance(y, _TupleTensor)
self.assertLen(y, len(x))
for x_, y_ in zip(x, y):
self.assertIsInstance(y_, ops.Tensor)
self.assertTrue(tensor_util.is_tensor(y_))
self.assertAllEqual(x_, tensor_util.constant_value(y_))
if __name__ == "__main__":
googletest.main()
| 37.607724 | 106 | 0.656295 |
df2021c916c81085b56b41c67e0c68ecd96dc62d | 34,502 | py | Python | src/silx/io/spech5.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 94 | 2016-03-04T17:25:53.000Z | 2022-03-18T18:05:23.000Z | src/silx/io/spech5.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 2,841 | 2016-01-21T09:06:49.000Z | 2022-03-18T14:53:56.000Z | src/silx/io/spech5.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 71 | 2015-09-30T08:35:35.000Z | 2022-03-16T07:16:28.000Z | # coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""This module provides a h5py-like API to access SpecFile data.
API description
+++++++++++++++
Specfile data structure exposed by this API:
::
/
1.1/
title = "…"
start_time = "…"
instrument/
specfile/
file_header = "…"
scan_header = "…"
positioners/
motor_name = value
…
mca_0/
data = …
calibration = …
channels = …
preset_time = …
elapsed_time = …
live_time = …
mca_1/
…
…
measurement/
colname0 = …
colname1 = …
…
mca_0/
data -> /1.1/instrument/mca_0/data
info -> /1.1/instrument/mca_0/
…
sample/
ub_matrix = …
unit_cell = …
unit_cell_abc = …
unit_cell_alphabetagamma = …
2.1/
…
``file_header`` and ``scan_header`` are the raw headers as they
appear in the original file, as a string of lines separated by newline (``\\n``) characters.
The title is the content of the ``#S`` scan header line without the leading
``#S`` and without the scan number (e.g ``"ascan ss1vo -4.55687 -0.556875 40 0.2"``).
The start time is converted to ISO8601 format (``"2016-02-23T22:49:05Z"``),
if the original date format is standard.
Numeric datasets are stored in *float32* format, except for scalar integers
which are stored as *int64*.
Motor positions (e.g. ``/1.1/instrument/positioners/motor_name``) can be
1D numpy arrays if they are measured as scan data, or else scalars as defined
on ``#P`` scan header lines. A simple test is done to check if the motor name
is also a data column header defined in the ``#L`` scan header line.
Scan data (e.g. ``/1.1/measurement/colname0``) is accessed by column,
the dataset name ``colname0`` being the column label as defined in the ``#L``
scan header line.
If a ``/`` character is present in a column label or in a motor name in the
original SPEC file, it will be substituted with a ``%`` character in the
corresponding dataset name.
MCA data is exposed as a 2D numpy array containing all spectra for a given
analyser. The number of analysers is calculated as the number of MCA spectra
per scan data line. Demultiplexing is then performed to assign the correct
spectra to a given analyser.
MCA calibration is an array of 3 scalars, from the ``#@CALIB`` header line.
It is identical for all MCA analysers, as there can be only one
``#@CALIB`` line per scan.
MCA channels is an array containing all channel numbers. This information is
computed from the ``#@CHANN`` scan header line (if present), or computed from
the shape of the first spectrum in a scan (``[0, … len(first_spectrum] - 1]``).
Accessing data
++++++++++++++
Data and groups are accessed in :mod:`h5py` fashion::
from silx.io.spech5 import SpecH5
# Open a SpecFile
sfh5 = SpecH5("test.dat")
# using SpecH5 as a regular group to access scans
scan1group = sfh5["1.1"]
instrument_group = scan1group["instrument"]
# alternative: full path access
measurement_group = sfh5["/1.1/measurement"]
# accessing a scan data column by name as a 1D numpy array
data_array = measurement_group["Pslit HGap"]
# accessing all mca-spectra for one MCA device
mca_0_spectra = measurement_group["mca_0/data"]
:class:`SpecH5` files and groups provide a :meth:`keys` method::
>>> sfh5.keys()
['96.1', '97.1', '98.1']
>>> sfh5['96.1'].keys()
['title', 'start_time', 'instrument', 'measurement']
They can also be treated as iterators:
.. code-block:: python
from silx.io import is_dataset
for scan_group in SpecH5("test.dat"):
dataset_names = [item.name in scan_group["measurement"] if
is_dataset(item)]
print("Found data columns in scan " + scan_group.name)
print(", ".join(dataset_names))
You can test for existence of data or groups::
>>> "/1.1/measurement/Pslit HGap" in sfh5
True
>>> "positioners" in sfh5["/2.1/instrument"]
True
>>> "spam" in sfh5["1.1"]
False
.. note::
Text used to be stored with a dtype ``numpy.string_`` in silx versions
prior to *0.7.0*. The type ``numpy.string_`` is a byte-string format.
The consequence of this is that you had to decode strings before using
them in **Python 3**::
>>> from silx.io.spech5 import SpecH5
>>> sfh5 = SpecH5("31oct98.dat")
>>> sfh5["/68.1/title"]
b'68 ascan tx3 -28.5 -24.5 20 0.5'
>>> sfh5["/68.1/title"].decode()
'68 ascan tx3 -28.5 -24.5 20 0.5'
From silx version *0.7.0* onwards, text is now stored as unicode. This
corresponds to the default text type in python 3, and to the *unicode*
type in Python 2.
To be on the safe side, you can test for the presence of a *decode*
attribute, to ensure that you always work with unicode text::
>>> title = sfh5["/68.1/title"]
>>> if hasattr(title, "decode"):
... title = title.decode()
"""
import datetime
import logging
import re
import io
import h5py
import numpy
from silx import version as silx_version
from .specfile import SpecFile, SfErrColNotFound
from . import commonh5
__authors__ = ["P. Knobel", "D. Naudet"]
__license__ = "MIT"
__date__ = "17/07/2018"
logger1 = logging.getLogger(__name__)
text_dtype = h5py.special_dtype(vlen=str)
def to_h5py_utf8(str_list):
"""Convert a string or a list of strings to a numpy array of
unicode strings that can be written to HDF5 as utf-8.
This ensures that the type will be consistent between python 2 and
python 3, if attributes or datasets are saved to an HDF5 file.
"""
return numpy.array(str_list, dtype=text_dtype)
def _get_number_of_mca_analysers(scan):
"""
:param SpecFile sf: :class:`SpecFile` instance
"""
number_of_mca_spectra = len(scan.mca)
# Scan.data is transposed
number_of_data_lines = scan.data.shape[1]
if not number_of_data_lines == 0:
# Number of MCA spectra must be a multiple of number of data lines
assert number_of_mca_spectra % number_of_data_lines == 0
return number_of_mca_spectra // number_of_data_lines
elif number_of_mca_spectra:
# Case of a scan without data lines, only MCA.
# Our only option is to assume that the number of analysers
# is the number of #@CHANN lines
return len(scan.mca.channels)
else:
return 0
def _motor_in_scan(sf, scan_key, motor_name):
"""
:param sf: :class:`SpecFile` instance
:param scan_key: Scan identification key (e.g. ``1.1``)
:param motor_name: Name of motor as defined in file header lines
:return: ``True`` if motor exists in scan, else ``False``
:raise: ``KeyError`` if scan_key not found in SpecFile
"""
if scan_key not in sf:
raise KeyError("Scan key %s " % scan_key +
"does not exist in SpecFile %s" % sf.filename)
ret = motor_name in sf[scan_key].motor_names
if not ret and "%" in motor_name:
motor_name = motor_name.replace("%", "/")
ret = motor_name in sf[scan_key].motor_names
return ret
def _column_label_in_scan(sf, scan_key, column_label):
"""
:param sf: :class:`SpecFile` instance
:param scan_key: Scan identification key (e.g. ``1.1``)
:param column_label: Column label as defined in scan header
:return: ``True`` if data column label exists in scan, else ``False``
:raise: ``KeyError`` if scan_key not found in SpecFile
"""
if scan_key not in sf:
raise KeyError("Scan key %s " % scan_key +
"does not exist in SpecFile %s" % sf.filename)
ret = column_label in sf[scan_key].labels
if not ret and "%" in column_label:
column_label = column_label.replace("%", "/")
ret = column_label in sf[scan_key].labels
return ret
def _parse_UB_matrix(header_line):
"""Parse G3 header line and return UB matrix
:param str header_line: G3 header line
:return: UB matrix
:raises ValueError: For malformed UB matrix header line
"""
values = list(map(float, header_line.split())) # Can raise ValueError
if len(values) < 9:
raise ValueError("Not enough values in UB matrix")
return numpy.array(values).reshape((1, 3, 3))
def _ub_matrix_in_scan(scan):
"""Return True if scan header has a G3 line and all values are not 0.
:param scan: specfile.Scan instance
:return: True or False
"""
header_line = scan.scan_header_dict.get("G3", None)
if header_line is None:
return False
try:
ub_matrix = _parse_UB_matrix(header_line)
except ValueError:
logger1.warning("Malformed G3 header line")
return False
return numpy.any(ub_matrix)
def _parse_unit_cell(header_line):
"""Parse G1 header line and return unit cell
:param str header_line: G1 header line
:return: unit cell
:raises ValueError: For malformed unit cell header line
"""
values = list(map(float, header_line.split()[0:6])) # can raise ValueError
if len(values) < 6:
raise ValueError("Not enough values in unit cell")
return numpy.array(values).reshape((1, 6))
def _unit_cell_in_scan(scan):
"""Return True if scan header has a G1 line and all values are not 0.
:param scan: specfile.Scan instance
:return: True or False
"""
header_line = scan.scan_header_dict.get("G1", None)
if header_line is None:
return False
try:
unit_cell = _parse_unit_cell(header_line)
except ValueError:
logger1.warning("Malformed G1 header line")
return False
return numpy.any(unit_cell)
def _parse_ctime(ctime_lines, analyser_index=0):
"""
:param ctime_lines: e.g ``@CTIME %f %f %f``, first word ``@CTIME`` optional
When multiple CTIME lines are present in a scan header, this argument
is a concatenation of them separated by a ``\\n`` character.
:param analyser_index: MCA device/analyser index, when multiple devices
are in a scan.
:return: (preset_time, live_time, elapsed_time)
"""
ctime_lines = ctime_lines.lstrip("@CTIME ")
ctimes_lines_list = ctime_lines.split("\n")
if len(ctimes_lines_list) == 1:
# single @CTIME line for all devices
ctime_line = ctimes_lines_list[0]
else:
ctime_line = ctimes_lines_list[analyser_index]
if not len(ctime_line.split()) == 3:
raise ValueError("Incorrect format for @CTIME header line " +
'(expected "@CTIME %f %f %f").')
return list(map(float, ctime_line.split()))
def spec_date_to_iso8601(date, zone=None):
"""Convert SpecFile date to Iso8601.
:param date: Date (see supported formats below)
:type date: str
:param zone: Time zone as it appears in a ISO8601 date
Supported formats:
* ``DDD MMM dd hh:mm:ss YYYY``
* ``DDD YYYY/MM/dd hh:mm:ss YYYY``
where `DDD` is the abbreviated weekday, `MMM` is the month abbreviated
name, `MM` is the month number (zero padded), `dd` is the weekday number
(zero padded) `YYYY` is the year, `hh` the hour (zero padded), `mm` the
minute (zero padded) and `ss` the second (zero padded).
All names are expected to be in english.
Examples::
>>> spec_date_to_iso8601("Thu Feb 11 09:54:35 2016")
'2016-02-11T09:54:35'
>>> spec_date_to_iso8601("Sat 2015/03/14 03:53:50")
'2015-03-14T03:53:50'
"""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
days_rx = '(?P<day>' + '|'.join(days) + ')'
months_rx = '(?P<month>' + '|'.join(months) + ')'
year_rx = r'(?P<year>\d{4})'
day_nb_rx = r'(?P<day_nb>[0-3 ]\d)'
month_nb_rx = r'(?P<month_nb>[0-1]\d)'
hh_rx = r'(?P<hh>[0-2]\d)'
mm_rx = r'(?P<mm>[0-5]\d)'
ss_rx = r'(?P<ss>[0-5]\d)'
tz_rx = r'(?P<tz>[+-]\d\d:\d\d){0,1}'
# date formats must have either month_nb (1..12) or month (Jan, Feb, ...)
re_tpls = ['{days} {months} {day_nb} {hh}:{mm}:{ss}{tz} {year}',
'{days} {year}/{month_nb}/{day_nb} {hh}:{mm}:{ss}{tz}']
grp_d = None
for rx in re_tpls:
full_rx = rx.format(days=days_rx,
months=months_rx,
year=year_rx,
day_nb=day_nb_rx,
month_nb=month_nb_rx,
hh=hh_rx,
mm=mm_rx,
ss=ss_rx,
tz=tz_rx)
m = re.match(full_rx, date)
if m:
grp_d = m.groupdict()
break
if not grp_d:
raise ValueError('Date format not recognized : {0}'.format(date))
year = grp_d['year']
month = grp_d.get('month_nb')
if not month:
month = '{0:02d}'.format(months.index(grp_d.get('month')) + 1)
day = grp_d['day_nb']
tz = grp_d['tz']
if not tz:
tz = zone
time = '{0}:{1}:{2}'.format(grp_d['hh'],
grp_d['mm'],
grp_d['ss'])
full_date = '{0}-{1}-{2}T{3}{4}'.format(year,
month,
day,
time,
tz if tz else '')
return full_date
def _demultiplex_mca(scan, analyser_index):
"""Return MCA data for a single analyser.
Each MCA spectrum is a 1D array. For each analyser, there is one
spectrum recorded per scan data line. When there are more than a single
MCA analyser in a scan, the data will be multiplexed. For instance if
there are 3 analysers, the consecutive spectra for the first analyser must
be accessed as ``mca[0], mca[3], mca[6]…``.
:param scan: :class:`Scan` instance containing the MCA data
:param analyser_index: 0-based index referencing the analyser
:type analyser_index: int
:return: 2D numpy array containing all spectra for one analyser
"""
number_of_analysers = _get_number_of_mca_analysers(scan)
number_of_spectra = len(scan.mca)
number_of_spectra_per_analyser = number_of_spectra // number_of_analysers
len_spectrum = len(scan.mca[analyser_index])
mca_array = numpy.empty((number_of_spectra_per_analyser, len_spectrum))
for i in range(number_of_spectra_per_analyser):
mca_array[i, :] = scan.mca[analyser_index + i * number_of_analysers]
return mca_array
# Node classes
class SpecH5Dataset(object):
"""This convenience class is to be inherited by all datasets, for
compatibility purpose with code that tests for
``isinstance(obj, SpecH5Dataset)``.
This legacy behavior is deprecated. The correct way to test
if an object is a dataset is to use :meth:`silx.io.utils.is_dataset`.
Datasets must also inherit :class:`SpecH5NodeDataset` or
:class:`SpecH5LazyNodeDataset` which actually implement all the
API."""
pass
class SpecH5NodeDataset(commonh5.Dataset, SpecH5Dataset):
"""This class inherits :class:`commonh5.Dataset`, to which it adds
little extra functionality. The main additional functionality is the
proxy behavior that allows to mimic the numpy array stored in this
class.
"""
def __init__(self, name, data, parent=None, attrs=None):
# get proper value types, to inherit from numpy
# attributes (dtype, shape, size)
if isinstance(data, str):
# use unicode (utf-8 when saved to HDF5 output)
value = to_h5py_utf8(data)
elif isinstance(data, float):
# use 32 bits for float scalars
value = numpy.float32(data)
elif isinstance(data, int):
value = numpy.int_(data)
else:
# Enforce numpy array
array = numpy.array(data)
data_kind = array.dtype.kind
if data_kind in ["S", "U"]:
value = numpy.asarray(array,
dtype=text_dtype)
elif data_kind in ["f"]:
value = numpy.asarray(array, dtype=numpy.float32)
else:
value = array
commonh5.Dataset.__init__(self, name, value, parent, attrs)
def __getattr__(self, item):
"""Proxy to underlying numpy array methods.
"""
if hasattr(self[()], item):
return getattr(self[()], item)
raise AttributeError("SpecH5Dataset has no attribute %s" % item)
class SpecH5LazyNodeDataset(commonh5.LazyLoadableDataset, SpecH5Dataset):
"""This class inherits :class:`commonh5.LazyLoadableDataset`,
to which it adds a proxy behavior that allows to mimic the numpy
array stored in this class.
The class has to be inherited and the :meth:`_create_data` method has to be
implemented to return the numpy data exposed by the dataset. This factory
method is only called once, when the data is needed.
"""
def __getattr__(self, item):
"""Proxy to underlying numpy array methods.
"""
if hasattr(self[()], item):
return getattr(self[()], item)
raise AttributeError("SpecH5Dataset has no attribute %s" % item)
def _create_data(self):
"""
Factory to create the data exposed by the dataset when it is needed.
It has to be implemented for the class to work.
:rtype: numpy.ndarray
"""
raise NotImplementedError()
class SpecH5Group(object):
"""This convenience class is to be inherited by all groups, for
compatibility purposes with code that tests for
``isinstance(obj, SpecH5Group)``.
This legacy behavior is deprecated. The correct way to test
if an object is a group is to use :meth:`silx.io.utils.is_group`.
Groups must also inherit :class:`silx.io.commonh5.Group`, which
actually implements all the methods and attributes."""
pass
class SpecH5(commonh5.File, SpecH5Group):
"""This class opens a SPEC file and exposes it as a *h5py.File*.
It inherits :class:`silx.io.commonh5.Group` (via :class:`commonh5.File`),
which implements most of its API.
"""
def __init__(self, filename):
"""
:param filename: Path to SpecFile in filesystem
:type filename: str
"""
if isinstance(filename, io.IOBase):
# see https://github.com/silx-kit/silx/issues/858
filename = filename.name
self._sf = SpecFile(filename)
attrs = {"NX_class": to_h5py_utf8("NXroot"),
"file_time": to_h5py_utf8(
datetime.datetime.now().isoformat()),
"file_name": to_h5py_utf8(filename),
"creator": to_h5py_utf8("silx spech5 %s" % silx_version)}
commonh5.File.__init__(self, filename, attrs=attrs)
for scan_key in self._sf.keys():
scan = self._sf[scan_key]
scan_group = ScanGroup(scan_key, parent=self, scan=scan)
self.add_node(scan_group)
def close(self):
self._sf.close()
self._sf = None
class ScanGroup(commonh5.Group, SpecH5Group):
def __init__(self, scan_key, parent, scan):
"""
:param parent: parent Group
:param str scan_key: Scan key (e.g. "1.1")
:param scan: specfile.Scan object
"""
commonh5.Group.__init__(self, scan_key, parent=parent,
attrs={"NX_class": to_h5py_utf8("NXentry")})
# take title in #S after stripping away scan number and spaces
s_hdr_line = scan.scan_header_dict["S"]
title = s_hdr_line.lstrip("0123456789").lstrip()
self.add_node(SpecH5NodeDataset(name="title",
data=to_h5py_utf8(title),
parent=self))
if "D" in scan.scan_header_dict:
try:
start_time_str = spec_date_to_iso8601(scan.scan_header_dict["D"])
except (IndexError, ValueError):
logger1.warning("Could not parse date format in scan %s header." +
" Using original date not converted to ISO-8601",
scan_key)
start_time_str = scan.scan_header_dict["D"]
elif "D" in scan.file_header_dict:
logger1.warning("No #D line in scan %s header. " +
"Using file header for start_time.",
scan_key)
try:
start_time_str = spec_date_to_iso8601(scan.file_header_dict["D"])
except (IndexError, ValueError):
logger1.warning("Could not parse date format in scan %s header. " +
"Using original date not converted to ISO-8601",
scan_key)
start_time_str = scan.file_header_dict["D"]
else:
logger1.warning("No #D line in %s header. Setting date to empty string.",
scan_key)
start_time_str = ""
self.add_node(SpecH5NodeDataset(name="start_time",
data=to_h5py_utf8(start_time_str),
parent=self))
self.add_node(InstrumentGroup(parent=self, scan=scan))
self.add_node(MeasurementGroup(parent=self, scan=scan))
if _unit_cell_in_scan(scan) or _ub_matrix_in_scan(scan):
self.add_node(SampleGroup(parent=self, scan=scan))
class InstrumentGroup(commonh5.Group, SpecH5Group):
def __init__(self, parent, scan):
"""
:param parent: parent Group
:param scan: specfile.Scan object
"""
commonh5.Group.__init__(self, name="instrument", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXinstrument")})
self.add_node(InstrumentSpecfileGroup(parent=self, scan=scan))
self.add_node(PositionersGroup(parent=self, scan=scan))
num_analysers = _get_number_of_mca_analysers(scan)
for anal_idx in range(num_analysers):
self.add_node(InstrumentMcaGroup(parent=self,
analyser_index=anal_idx,
scan=scan))
class InstrumentSpecfileGroup(commonh5.Group, SpecH5Group):
def __init__(self, parent, scan):
commonh5.Group.__init__(self, name="specfile", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXcollection")})
self.add_node(SpecH5NodeDataset(
name="file_header",
data=to_h5py_utf8(scan.file_header),
parent=self,
attrs={}))
self.add_node(SpecH5NodeDataset(
name="scan_header",
data=to_h5py_utf8(scan.scan_header),
parent=self,
attrs={}))
class PositionersGroup(commonh5.Group, SpecH5Group):
def __init__(self, parent, scan):
commonh5.Group.__init__(self, name="positioners", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXcollection")})
dataset_info = [] # Store list of positioner's (name, value)
is_error = False # True if error encountered
for motor_name in scan.motor_names:
safe_motor_name = motor_name.replace("/", "%")
if motor_name in scan.labels and scan.data.shape[0] > 0:
# return a data column if one has the same label as the motor
motor_value = scan.data_column_by_name(motor_name)
else:
# Take value from #P scan header.
# (may return float("inf") if #P line is missing from scan hdr)
try:
motor_value = scan.motor_position_by_name(motor_name)
except SfErrColNotFound:
is_error = True
motor_value = float('inf')
dataset_info.append((safe_motor_name, motor_value))
if is_error: # Filter-out scalar values
logger1.warning("Mismatching number of elements in #P and #O: Ignoring")
dataset_info = [
(name, value) for name, value in dataset_info
if not isinstance(value, float)]
for name, value in dataset_info:
self.add_node(SpecH5NodeDataset(
name=name,
data=value,
parent=self))
class InstrumentMcaGroup(commonh5.Group, SpecH5Group):
def __init__(self, parent, analyser_index, scan):
name = "mca_%d" % analyser_index
commonh5.Group.__init__(self, name=name, parent=parent,
attrs={"NX_class": to_h5py_utf8("NXdetector")})
mcaDataDataset = McaDataDataset(parent=self,
analyser_index=analyser_index,
scan=scan)
self.add_node(mcaDataDataset)
spectrum_length = mcaDataDataset.shape[-1]
mcaDataDataset = None
if len(scan.mca.channels) == 1:
# single @CALIB line applying to multiple devices
calibration_dataset = scan.mca.calibration[0]
channels_dataset = scan.mca.channels[0]
else:
calibration_dataset = scan.mca.calibration[analyser_index]
channels_dataset = scan.mca.channels[analyser_index]
channels_length = len(channels_dataset)
if (channels_length > 1) and (spectrum_length > 0):
logger1.info("Spectrum and channels length mismatch")
# this should always be the case
if channels_length > spectrum_length:
channels_dataset = channels_dataset[:spectrum_length]
elif channels_length < spectrum_length:
# only trust first channel and increment
channel0 = channels_dataset[0]
increment = channels_dataset[1] - channels_dataset[0]
channels_dataset = numpy.linspace(channel0,
channel0 + increment * spectrum_length,
spectrum_length, endpoint=False)
self.add_node(SpecH5NodeDataset(name="calibration",
data=calibration_dataset,
parent=self))
self.add_node(SpecH5NodeDataset(name="channels",
data=channels_dataset,
parent=self))
if "CTIME" in scan.mca_header_dict:
ctime_line = scan.mca_header_dict['CTIME']
preset_time, live_time, elapsed_time = _parse_ctime(ctime_line, analyser_index)
self.add_node(SpecH5NodeDataset(name="preset_time",
data=preset_time,
parent=self))
self.add_node(SpecH5NodeDataset(name="live_time",
data=live_time,
parent=self))
self.add_node(SpecH5NodeDataset(name="elapsed_time",
data=elapsed_time,
parent=self))
class McaDataDataset(SpecH5LazyNodeDataset):
"""Lazy loadable dataset for MCA data"""
def __init__(self, parent, analyser_index, scan):
commonh5.LazyLoadableDataset.__init__(
self, name="data", parent=parent,
attrs={"interpretation": to_h5py_utf8("spectrum"),})
self._scan = scan
self._analyser_index = analyser_index
self._shape = None
self._num_analysers = _get_number_of_mca_analysers(self._scan)
def _create_data(self):
return _demultiplex_mca(self._scan, self._analyser_index)
@property
def shape(self):
if self._shape is None:
num_spectra_in_file = len(self._scan.mca)
num_spectra_per_analyser = num_spectra_in_file // self._num_analysers
len_spectrum = len(self._scan.mca[self._analyser_index])
self._shape = num_spectra_per_analyser, len_spectrum
return self._shape
@property
def size(self):
return numpy.prod(self.shape, dtype=numpy.intp)
@property
def dtype(self):
# we initialize the data with numpy.empty() without specifying a dtype
# in _demultiplex_mca()
return numpy.empty((1, )).dtype
def __len__(self):
return self.shape[0]
def __getitem__(self, item):
# optimization for fetching a single spectrum if data not already loaded
if not self._is_initialized:
if isinstance(item, int):
if item < 0:
# negative indexing
item += len(self)
return self._scan.mca[self._analyser_index +
item * self._num_analysers]
# accessing a slice or element of a single spectrum [i, j:k]
try:
spectrum_idx, channel_idx_or_slice = item
assert isinstance(spectrum_idx, int)
except (ValueError, TypeError, AssertionError):
pass
else:
if spectrum_idx < 0:
item += len(self)
idx = self._analyser_index + spectrum_idx * self._num_analysers
return self._scan.mca[idx][channel_idx_or_slice]
return super(McaDataDataset, self).__getitem__(item)
class MeasurementGroup(commonh5.Group, SpecH5Group):
def __init__(self, parent, scan):
"""
:param parent: parent Group
:param scan: specfile.Scan object
"""
commonh5.Group.__init__(self, name="measurement", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXcollection"),})
for label in scan.labels:
safe_label = label.replace("/", "%")
self.add_node(SpecH5NodeDataset(name=safe_label,
data=scan.data_column_by_name(label),
parent=self))
num_analysers = _get_number_of_mca_analysers(scan)
for anal_idx in range(num_analysers):
self.add_node(MeasurementMcaGroup(parent=self, analyser_index=anal_idx))
class MeasurementMcaGroup(commonh5.Group, SpecH5Group):
def __init__(self, parent, analyser_index):
basename = "mca_%d" % analyser_index
commonh5.Group.__init__(self, name=basename, parent=parent,
attrs={})
target_name = self.name.replace("measurement", "instrument")
self.add_node(commonh5.SoftLink(name="data",
path=target_name + "/data",
parent=self))
self.add_node(commonh5.SoftLink(name="info",
path=target_name,
parent=self))
class SampleGroup(commonh5.Group, SpecH5Group):
def __init__(self, parent, scan):
"""
:param parent: parent Group
:param scan: specfile.Scan object
"""
commonh5.Group.__init__(self, name="sample", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXsample"),})
if _unit_cell_in_scan(scan):
self.add_node(SpecH5NodeDataset(name="unit_cell",
data=_parse_unit_cell(scan.scan_header_dict["G1"]),
parent=self,
attrs={"interpretation": to_h5py_utf8("scalar")}))
self.add_node(SpecH5NodeDataset(name="unit_cell_abc",
data=_parse_unit_cell(scan.scan_header_dict["G1"])[0, 0:3],
parent=self,
attrs={"interpretation": to_h5py_utf8("scalar")}))
self.add_node(SpecH5NodeDataset(name="unit_cell_alphabetagamma",
data=_parse_unit_cell(scan.scan_header_dict["G1"])[0, 3:6],
parent=self,
attrs={"interpretation": to_h5py_utf8("scalar")}))
if _ub_matrix_in_scan(scan):
self.add_node(SpecH5NodeDataset(name="ub_matrix",
data=_parse_UB_matrix(scan.scan_header_dict["G3"]),
parent=self,
attrs={"interpretation": to_h5py_utf8("scalar")}))
| 37.997797 | 103 | 0.591096 |
7c804855a3a8953dc42b3e823dc6cc3e3156edb2 | 156 | py | Python | src/vulcan_scraper/enum.py | drobotk/vulcan-sdk-py | 53358a64ae6618ce853ae470f94e32fc853ad44f | [
"Apache-2.0"
] | 2 | 2021-12-19T09:17:08.000Z | 2022-02-11T14:51:03.000Z | src/vulcan_scraper/enum.py | drobotk/vulcan-sdk-py | 53358a64ae6618ce853ae470f94e32fc853ad44f | [
"Apache-2.0"
] | null | null | null | src/vulcan_scraper/enum.py | drobotk/vulcan-sdk-py | 53358a64ae6618ce853ae470f94e32fc853ad44f | [
"Apache-2.0"
] | null | null | null | from enum import Enum, auto
class LoginType(Enum):
UNKNOWN = auto()
CUFS = auto()
ADFS = auto()
ADFSCards = auto()
ADFSLight = auto()
| 15.6 | 27 | 0.596154 |
966f7b1d914306b55949971e2fd562fed2dd2795 | 3,900 | py | Python | games/chess/player.py | winkelmantanner/mmai20-dumbsophomores | bd2c88ea34cf9be83e2efd59abf2adf9a1a056fa | [
"MIT"
] | null | null | null | games/chess/player.py | winkelmantanner/mmai20-dumbsophomores | bd2c88ea34cf9be83e2efd59abf2adf9a1a056fa | [
"MIT"
] | null | null | null | games/chess/player.py | winkelmantanner/mmai20-dumbsophomores | bd2c88ea34cf9be83e2efd59abf2adf9a1a056fa | [
"MIT"
] | null | null | null | # Player: A player in this game. Every AI controls one player.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from games.chess.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Player(GameObject):
"""The class representing the Player in the Chess game.
A player in this game. Every AI controls one player.
"""
def __init__(self):
"""Initializes a Player with basic logic as provided by the Creer code generator."""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._client_type = ""
self._color = ""
self._in_check = False
self._lost = False
self._made_move = False
self._name = "Anonymous"
self._opponent = None
self._pieces = []
self._rank_direction = 0
self._reason_lost = ""
self._reason_won = ""
self._time_remaining = 0
self._won = False
@property
def client_type(self):
"""What type of client this is, e.g. 'Python', 'JavaScript', or some other language. For potential data mining purposes.
:rtype: str
"""
return self._client_type
@property
def color(self):
"""The color (side) of this player. Either 'White' or 'Black', with the 'White' player having the first move.
:rtype: str
"""
return self._color
@property
def in_check(self):
"""True if this player is currently in check, and must move out of check, False otherwise.
:rtype: bool
"""
return self._in_check
@property
def lost(self):
"""If the player lost the game or not.
:rtype: bool
"""
return self._lost
@property
def made_move(self):
"""If the Player has made their move for the turn. True means they can no longer move a Piece this turn.
:rtype: bool
"""
return self._made_move
@property
def name(self):
"""The name of the player.
:rtype: str
"""
return self._name
@property
def opponent(self):
"""This player's opponent in the game.
:rtype: Player
"""
return self._opponent
@property
def pieces(self):
"""All the uncaptured chess Pieces owned by this player.
:rtype: list[Piece]
"""
return self._pieces
@property
def rank_direction(self):
"""The direction your Pieces must go along the rank axis until they reach the other side.
:rtype: int
"""
return self._rank_direction
@property
def reason_lost(self):
"""The reason why the player lost the game.
:rtype: str
"""
return self._reason_lost
@property
def reason_won(self):
"""The reason why the player won the game.
:rtype: str
"""
return self._reason_won
@property
def time_remaining(self):
"""The amount of time (in ns) remaining for this AI to send commands.
:rtype: float
"""
return self._time_remaining
@property
def won(self):
"""If the player won the game or not.
:rtype: bool
"""
return self._won
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| 26.896552 | 135 | 0.606154 |
9deaf11081ca9e5acc4e9e72644cbefdf1edb8d3 | 5,422 | py | Python | packets/Reader/PacketResolver.py | kawatapw/kuriso | a24baa12ccfdaab7cd0772985a1ada5a6a09f0d7 | [
"MIT"
] | null | null | null | packets/Reader/PacketResolver.py | kawatapw/kuriso | a24baa12ccfdaab7cd0772985a1ada5a6a09f0d7 | [
"MIT"
] | null | null | null | packets/Reader/PacketResolver.py | kawatapw/kuriso | a24baa12ccfdaab7cd0772985a1ada5a6a09f0d7 | [
"MIT"
] | null | null | null | from typing import List, Tuple
from objects.TypedDicts import TypedPresence, TypedReadMatch
from objects.constants.GameModes import GameModes
from objects.constants.Modificators import Mods
from objects.constants.Slots import SlotStatus, SlotTeams
from objects.constants.multiplayer import (
MatchTypes,
MatchScoringTypes,
MatchTeamTypes,
MultiSpecialModes,
)
from objects.Multiplayer import Slot
from objects.BanchoObjects import Message
from packets.Reader.index import KurisoBuffer
class PacketResolver:
@staticmethod
async def read_new_presence(data: bytes) -> TypedPresence:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return {
"action": await buffer.read_byte(),
"action_text": await buffer.read_osu_string(),
"map_md5": await buffer.read_osu_string(),
"mods": await buffer.read_u_int_32(),
"mode": await buffer.read_byte(),
"map_id": await buffer.read_int_32(),
}
@staticmethod
async def read_request_users_stats(data: bytes) -> List[int]:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_i32_list()
@staticmethod
async def read_pr_filter(data: bytes) -> int:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32()
@staticmethod
async def read_slot_index(data: bytes) -> int:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32()
@staticmethod
async def read_message(data: bytes) -> Message:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return Message(
sender=await buffer.read_osu_string(),
body=await buffer.read_osu_string(),
to=await buffer.read_osu_string(),
client_id=await buffer.read_int_32(),
)
@staticmethod
async def read_channel_name(data: bytes) -> str:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_osu_string()
@staticmethod
async def read_specatator_id(data: bytes) -> int:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32()
@staticmethod
async def read_friend_id(data: bytes) -> int:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32()
@staticmethod
async def read_match(data: bytes) -> TypedReadMatch:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
await buffer.read_int_16() # skip 3 bytes for id and inProgress because default is False
await buffer.read_byte()
match_type = MatchTypes(await buffer.read_byte())
mods = Mods(await buffer.read_int_32())
name = await buffer.read_osu_string()
password = await buffer.read_osu_string()
beatmap_name = await buffer.read_osu_string()
beatmap_id = await buffer.read_int_32()
beatmap_md5 = await buffer.read_osu_string()
slots = [Slot() for _ in range(0, 16)] # make slots
for slot in slots:
slot.status = SlotStatus(await buffer.read_byte())
for slot in slots:
slot.team = SlotTeams(await buffer.read_byte())
for slot in slots:
if slot.status.value & SlotStatus.HasPlayer:
await buffer.read_int_32()
host_id = await buffer.read_int_32()
play_mode = GameModes(await buffer.read_byte())
scoring_type = MatchScoringTypes(await buffer.read_byte())
team_type = MatchTeamTypes(await buffer.read_byte())
is_freemod = await buffer.read_bool()
match_freemod = MultiSpecialModes(int(is_freemod))
if is_freemod:
for slot in slots:
slot.mods = Mods(await buffer.read_int_32())
seed = await buffer.read_int_32()
t_dict = {
"match_type": match_type,
"mods": mods,
"name": name,
"password": password,
"beatmap_name": beatmap_name,
"beatmap_id": beatmap_id,
"beatmap_md5": beatmap_md5,
"slots": slots,
"host_id": host_id,
"play_mode": play_mode,
"scoring_type": scoring_type,
"team_type": team_type,
"match_freemod": match_freemod,
"seed": seed,
}
return t_dict
@staticmethod
async def read_mp_join_data(data: bytes) -> Tuple[int, str]:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32(), await buffer.read_osu_string()
@staticmethod
async def read_mods(data: bytes) -> int:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32()
@staticmethod
async def read_user_id(data: bytes) -> int:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32()
@staticmethod
async def read_match_id(data: bytes) -> int:
buffer = KurisoBuffer(None)
await buffer.write_to_buffer(data)
return await buffer.read_int_32()
| 33.060976 | 97 | 0.645518 |
a63e46fcd494006ab38503ec55e6207955d3fa87 | 782 | py | Python | 104_operator_over_loading.py | AmreshTripathy/Python | e86420fef7f52da393be5b50ac2f13bddfeb3306 | [
"Apache-2.0"
] | 4 | 2021-05-27T05:06:09.000Z | 2021-06-12T17:12:47.000Z | 104_operator_over_loading.py | AmreshTripathy/Python | e86420fef7f52da393be5b50ac2f13bddfeb3306 | [
"Apache-2.0"
] | null | null | null | 104_operator_over_loading.py | AmreshTripathy/Python | e86420fef7f52da393be5b50ac2f13bddfeb3306 | [
"Apache-2.0"
] | null | null | null | class Number:
def __init__(self, num):
self.num = num
def __add__(self, num1):
print ('Lets add')
return self.num + num1.num
def __Sub__(self, num1):
print ('Lets Substract')
return self.num - num1.num
def __mul__(self, num1):
print ('Lets multiply')
return self.num * num1.num
def __truediv__(self, num1):
print ('Lets divide')
return self.num / num1.num
def __floordiv__(self, num1):
print ('Lets do Floor Division')
return self.num // num1.num
n1 = Number(6)
n2 = Number(4)
# sum = n1 + n2
print (n1.__add__(n2))
# mul = n1 * n2
print (n1.__Sub__(n2))
print (n1.__mul__(n2))
print (n1.__truediv__(n2))
print (n1.__floordiv__(n2)) | 23.69697 | 41 | 0.569054 |
48d734db8add44c2c56a39bc47e728926f2f645d | 485 | py | Python | alipay/aop/api/response/AlipayCommerceEducateCampusBiztaskFinishResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayCommerceEducateCampusBiztaskFinishResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayCommerceEducateCampusBiztaskFinishResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEducateCampusBiztaskFinishResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateCampusBiztaskFinishResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateCampusBiztaskFinishResponse, self).parse_response_content(response_content)
| 30.3125 | 121 | 0.8 |
16bb94dfc31aa52ca5cc2e49720e6867d26446e3 | 2,387 | py | Python | main.py | eidhernan/python | 05fdaf848ae317a9141ce123e72e56fae155152f | [
"MIT"
] | null | null | null | main.py | eidhernan/python | 05fdaf848ae317a9141ce123e72e56fae155152f | [
"MIT"
] | null | null | null | main.py | eidhernan/python | 05fdaf848ae317a9141ce123e72e56fae155152f | [
"MIT"
] | null | null | null | from blessed import Terminal
t = Terminal()
class Symbol:
def __init__(self):
self.heart = u"\u2665"
self.larrow = u"\u2190"
self.rarrow = u"\u2B95"
self.darrow = u"\u2193"
self.uarrow = u"\u2191"
s = Symbol()
class Menu:
def __init__(self):
from blessed import Terminal
t = Terminal()
self.cursor_color = t.color(196)
self.cursor_str = s.rarrow
self.cursorPosition = 1
def refreshCursor(self):
self.cursorPosition = 1
def cls(self):
import os
os.system("clear")
def vert(self, title, *content):
with t.hidden_cursor():
while True:
self.cls() #clears the screen
self.iteration = 0 #this attribute holds the choice # in the menu
print(title) #prints out the menu title
for msg in content:
self.iteration += 1 #increments to the next iteration
if self.iteration == self.cursorPosition: #check if the cursor position matches the choice #
print("{} {}".format(self.cursor_color(self.cursor_str), self.cursor_color(msg)))#prints out the choice, with a marker
else:
print("{} {}".format(" "*len(self.cursor_str), msg)) #prints a plain choice
with t.cbreak(): #wait for keyboard input
k = t.inkey(timeout=None) #read for keypresses
if k.name == "KEY_UP":
if self.cursorPosition > 1:
self.cursorPosition -= 1
elif self.cursorPosition <= 1:
self.cursorPosition = len(content)
continue
elif k.name == "KEY_DOWN":
if self.cursorPosition < len(content):
self.cursorPosition +=1
elif self.cursorPosition >= len(content):
self.cursorPosition = 1
continue
elif k.name == "KEY_ENTER" or k == "z":
return self.cursorPosition
m = Menu()
x = m.vert("You are defusing a bomb. Cut a wire","Red wire","Blue wire")
if x == 1:
print("You successfully defused the bomb.")
elif x == 2:
print("You failed. BOOM!") | 42.625 | 142 | 0.514453 |
43a9afa75bef671b21c00644b66ae005c6b696bf | 566 | py | Python | toolz/tests/test_curried.py | larsmans/toolz | 13e703cf1722ad0fd9941f54e15b8e4d841b0fc2 | [
"BSD-3-Clause"
] | null | null | null | toolz/tests/test_curried.py | larsmans/toolz | 13e703cf1722ad0fd9941f54e15b8e4d841b0fc2 | [
"BSD-3-Clause"
] | null | null | null | toolz/tests/test_curried.py | larsmans/toolz | 13e703cf1722ad0fd9941f54e15b8e4d841b0fc2 | [
"BSD-3-Clause"
] | 1 | 2019-12-02T03:17:12.000Z | 2019-12-02T03:17:12.000Z | import toolz
import toolz.curried
from toolz.curried import take, first, second, sorted, merge_with, reduce
from operator import add
def test_take():
assert list(take(2)([1, 2, 3])) == [1, 2]
def test_first():
assert first is toolz.itertoolz.first
def test_merge_with():
assert merge_with(sum)({1: 1}, {1: 2}) == {1: 3}
def test_sorted():
assert sorted(key=second)([(1, 2), (2, 1)]) == [(2, 1), (1, 2)]
def test_reduce():
assert reduce(add)((1, 2, 3)) == 6
def test_module_name():
assert toolz.curried.__name__ == 'toolz.curried'
| 19.517241 | 73 | 0.639576 |
3b537c19fc4ef0aeb835a10c3eca6adfa1ee8adb | 2,449 | py | Python | nqueen/nqueen_steepest_ascent_hill_climbing.py | kmhasan-class/spring2018ai | 80e78bc29a306aadbaa6358841141c25b082d233 | [
"MIT"
] | null | null | null | nqueen/nqueen_steepest_ascent_hill_climbing.py | kmhasan-class/spring2018ai | 80e78bc29a306aadbaa6358841141c25b082d233 | [
"MIT"
] | null | null | null | nqueen/nqueen_steepest_ascent_hill_climbing.py | kmhasan-class/spring2018ai | 80e78bc29a306aadbaa6358841141c25b082d233 | [
"MIT"
] | null | null | null | import random
def get_random_board(n):
#homework write the body of this function
#return a random board of size n x n
#board = [1, 0, 2, 2]
board = []
for i in range(0, n):
row = random.randint(0, n - 1)
board.append(row)
print("Randomly generated board: ", board)
return board
def count_conflicts(board):
#homework make this function efficient
n = len(board)
#for (int i = 0; i < n; i++)
# printf("%d %d", i, board[i]);
row_conflicts = 0
diag_conflicts = 0
inv_diag_conflicts = 0
for i in range(0, n):
# q1 is queen 1 at (r1, c1)
r1 = board[i]
c1 = i
for j in range(i + 1, n):
# q2 is queen 2 at (r2, c2)
r2 = board[j]
c2 = j
if r1 == r2:
row_conflicts += 1
if r1 - c1 == r2 - c2:
diag_conflicts += 1
if r1 + c1 == r2 + c2:
inv_diag_conflicts += 1
total_conflicts = row_conflicts + diag_conflicts + inv_diag_conflicts
#print(row_conflicts, diag_conflicts, inv_diag_conflicts)
return total_conflicts
def get_next_board(board):
better_board = []
least_conflicts = count_conflicts(board)
for c in range(0, len(board)):
current_row = board[c]
for r in range(0, len(board)):
board[c] = r
new_conflicts = count_conflicts(board)
if new_conflicts < least_conflicts:
least_conflicts = new_conflicts
better_board = list(board)
#print("Col: ", c, "Row: ", r, "Conflicts: ", new_conflicts)
board[c] = current_row
#print('\n')
#print(better_board, least_conflicts)
return better_board, least_conflicts
board = get_random_board(8)
best_board = list(board)
least_conflicts = count_conflicts(best_board)
print("Initial board: ", board, " Conflicts: ", least_conflicts)
while True:
better_board, conflicts = get_next_board(best_board)
print("New board:", better_board, " Conflicts: ", conflicts)
if conflicts < least_conflicts:
least_conflicts = conflicts
best_board = list(better_board)
else:
#homework modify this part
#when we get stuck we should restart from a random board
#make sure you put a fixed count on how many times we do this
break
print("Best board: ", best_board, " Conflicts: ", least_conflicts)
| 29.865854 | 73 | 0.596162 |
4f4fa2e0290b3afce7e9be65da528daa7b5f3104 | 419 | py | Python | query_processor/migrations/0008_remove_platformrequest_resolution_state.py | shashank-iitj/traibot | 30676413e30a0f7dc651f1918b33892728a01c1b | [
"Apache-2.0"
] | null | null | null | query_processor/migrations/0008_remove_platformrequest_resolution_state.py | shashank-iitj/traibot | 30676413e30a0f7dc651f1918b33892728a01c1b | [
"Apache-2.0"
] | null | null | null | query_processor/migrations/0008_remove_platformrequest_resolution_state.py | shashank-iitj/traibot | 30676413e30a0f7dc651f1918b33892728a01c1b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-27 09:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('query_processor', '0007_auto_20160416_1715'),
]
operations = [
migrations.RemoveField(
model_name='platformrequest',
name='resolution_state',
),
]
| 20.95 | 55 | 0.637232 |
af8da8a92023354b0db314433dcc902f8067d9e1 | 16,606 | py | Python | dropblock/dropblock.py | transcendentsky/dropblock | d84871b5dc515c4ac24aa0338f03ec08bf800079 | [
"MIT"
] | 2 | 2018-12-24T06:05:42.000Z | 2019-03-18T08:16:35.000Z | dropblock/dropblock.py | transcendentsky/dropblock | d84871b5dc515c4ac24aa0338f03ec08bf800079 | [
"MIT"
] | null | null | null | dropblock/dropblock.py | transcendentsky/dropblock | d84871b5dc515c4ac24aa0338f03ec08bf800079 | [
"MIT"
] | 2 | 2018-12-18T07:57:09.000Z | 2019-05-21T13:17:16.000Z | #coding:utf-8
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.distributions import Bernoulli
import numpy as np
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, H, W)`
- Output: `(N, C, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size, test=False, gkernel=False):
super(DropBlock2D, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
self.test = test
self.gkernel = gkernel
if gkernel == True:
print("[*] Using Gaussian-like Kernel")
print("[*] Gkernel size =", block_size)
x, y = np.meshgrid(np.linspace(-1, 1, block_size), np.linspace(-1, 1, block_size))
d = np.sqrt(x * x + y * y)
# hyper-parameter
sigma, mu = 0.75, 0.0
g = np.clip(np.exp(-((d - mu) ** 2 / (2.0 * sigma ** 2))) * 1.25, 0.0, 1.0)
self.g = g[np.newaxis, np.newaxis, :, :]
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# sample from a mask
mask_reduction = self.block_size // 2
mask_height = x.shape[-2] - mask_reduction
mask_width = x.shape[-1] - mask_reduction
mask_sizes = [mask_height, mask_width]
if any([x <= 0 for x in mask_sizes]):
raise ValueError('Input of shape {} is too small for block_size {}'
.format(tuple(x.shape), self.block_size))
# get gamma value
gamma = self._compute_gamma(x, mask_sizes)
if self.test: print("--- gamma ---\n", gamma)
# sample mask
mask = Bernoulli(gamma).sample((x.shape[0], *mask_sizes))
if self.test: print("--- mask ---\n", mask)
# place mask on input device
mask = mask.to(x.device) # mask.cuda()
# compute block mask
block_mask = self._compute_block_mask(mask)
if self.test: print("--- block mask ---\n", block_mask)
# apply block mask
out = x * block_mask[:, None, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
if self.gkernel == True:
kernel = torch.from_numpy(self.g).to(mask.device)
else:
kernel = torch.ones((1, 1, self.block_size, self.block_size)).to(
mask.device)
block_mask = F.conv2d(mask[:, None, :, :],kernel,
padding=int(np.ceil(self.block_size // 2) + 1))
delta = self.block_size // 2
input_height = mask.shape[-2] + delta
input_width = mask.shape[-1] + delta
height_to_crop = block_mask.shape[-2] - input_height
width_to_crop = block_mask.shape[-1] - input_width
if height_to_crop != 0:
block_mask = block_mask[:, :, :-height_to_crop, :]
if width_to_crop != 0:
block_mask = block_mask[:, :, :, :-width_to_crop]
block_mask = (block_mask >= 1).to(device=block_mask.device, dtype=block_mask.dtype)
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x, mask_sizes):
feat_area = x.shape[-2] * x.shape[-1]
mask_area = mask_sizes[-2] * mask_sizes[-1]
return (self.drop_prob / (self.block_size ** 2)) * (feat_area / mask_area)
class DropBlock3D(DropBlock2D):
r"""Randomly zeroes 3D spatial blocks of the input tensor.
An extension to the concept described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, D, H, W)`
- Output: `(N, C, D, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock3D, self).__init__(drop_prob, block_size)
def forward(self, x):
# shape: (bsize, channels, depth, height, width)
assert x.dim() == 5, \
"Expected input with 5 dimensions (bsize, channels, depth, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
mask_reduction = self.block_size // 2
mask_depth = x.shape[-3] - mask_reduction
mask_height = x.shape[-2] - mask_reduction
mask_width = x.shape[-1] - mask_reduction
mask_sizes = [mask_depth, mask_height, mask_width]
if any([x <= 0 for x in mask_sizes]):
raise ValueError('Input of shape {} is too small for block_size {}'
.format(tuple(x.shape), self.block_size))
# get gamma value
gamma = self._compute_gamma(x, mask_sizes)
# sample mask
mask = Bernoulli(gamma).sample((x.shape[0], *mask_sizes))
# place mask on input device
mask = mask.to(x.device)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.conv3d(mask[:, None, :, :, :],
torch.ones((1, 1, self.block_size, self.block_size, self.block_size)).to(
mask.device),
padding=int(np.ceil(self.block_size // 2) + 1))
delta = self.block_size // 2
input_depth = mask.shape[-3] + delta
input_height = mask.shape[-2] + delta
input_width = mask.shape[-1] + delta
depth_to_crop = block_mask.shape[-3] - input_depth
height_to_crop = block_mask.shape[-2] - input_height
width_to_crop = block_mask.shape[-1] - input_width
if depth_to_crop != 0:
block_mask = block_mask[:, :, :-depth_to_crop, :, :]
if height_to_crop != 0:
block_mask = block_mask[:, :, :, :-height_to_crop, :]
if width_to_crop != 0:
block_mask = block_mask[:, :, :, :, :-width_to_crop]
block_mask = (block_mask >= 1).to(device=block_mask.device, dtype=block_mask.dtype)
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x, mask_sizes):
feat_volume = x.shape[-3] * x.shape[-2] * x.shape[-1]
mask_volume = mask_sizes[-3] * mask_sizes[-2] * mask_sizes[-1]
return (self.drop_prob / (self.block_size ** 3)) * (feat_volume / mask_volume)
class DropBlock2DMix(nn.Module):
"""
DropBlock with mixing
"""
def __init__(self, drop_prob, block_size, test=False, extra_mix=False):
super(DropBlock2DMix, self).__init__()
print("[*] using Dropblock mix")
print("[***] Setting fixed drop_window")
self.drop_prob = drop_prob
self.block_size = block_size
self.test = test
self.extra_mix = extra_mix
def forward(self, x, index=None):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
# raise ValueError("Dropblock mix, drop_prob > 0 ?")
return x, None, None, None
else:
# sample from a mask
mask_reduction = self.block_size // 2
mask_height = x.shape[-2] - mask_reduction
mask_width = x.shape[-1] - mask_reduction
mask_sizes = [mask_height, mask_width]
if any([x <= 0 for x in mask_sizes]):
raise ValueError('Input of shape {} is too small for block_size {}'
.format(tuple(x.shape), self.block_size))
# get gamma value
# gamma = self._compute_gamma(x, mask_sizes)
# if self.test: print("--- gamma ---\n", gamma)
# # sample mask
# mask = Bernoulli(gamma).sample((x.shape[0], *mask_sizes))
# if self.test: print("--- mask ---\n", mask)
bs = x.shape[0]
hw = mask_width
# rads = torch.randint(0, hw * hw, (bs,)).long()
rads = torch.randint(0, hw * hw, (1,)).long().repeat(bs) # repeat mode
rads = torch.unsqueeze(rads, 1)
mask = torch.zeros(bs, hw*hw).scatter_(1, rads, 1).reshape((bs,hw,hw))
# place mask on input device
mask = mask.to(x.device) # mask.cuda()
# compute block mask
block_mask = self._compute_block_mask(mask)
if self.test: print("--- block mask ---\n", block_mask)
# apply block mask
# out = x * block_mask[:, None, :, :]
batch_size = x.size()[0]
if index == None:
index = torch.randperm(batch_size).cuda()
verse_mask = torch.ones_like(block_mask) - block_mask
if self.test: print("--- verse_mask ---", verse_mask)
if self.extra_mix:
lam = 0.05
out = x*block_mask[:, None, :, :]*(1-lam) + \
x*verse_mask[:, None, :, :]*lam + \
x[index, :]*block_mask[:, None, :, :]*(lam) + \
x[index, :]*verse_mask[:, None, :, :]*(1-lam)
else:
out = x * block_mask[:, None, :, :] + \
x[index, :] * verse_mask[:, None, :, :] #* 0.1 这里需注意,是否加0.1
# if self.test: out = x * block_mask[:, None, :, :] + x[index, :] * verse_mask[:, None, :, :] * 0.1
# scale output
# out = out * block_mask.numel() / block_mask.sum()
return out, index, block_mask, verse_mask
def _compute_block_mask(self, mask):
block_mask = F.conv2d(mask[:, None, :, :],
torch.ones((1, 1, self.block_size, self.block_size)).to(
mask.device),
padding=int(np.ceil(self.block_size // 2) + 1))
delta = self.block_size // 2
input_height = mask.shape[-2] + delta
input_width = mask.shape[-1] + delta
height_to_crop = block_mask.shape[-2] - input_height
width_to_crop = block_mask.shape[-1] - input_width
if height_to_crop != 0:
block_mask = block_mask[:, :, :-height_to_crop, :]
if width_to_crop != 0:
block_mask = block_mask[:, :, :, :-width_to_crop]
block_mask = (block_mask >= 1).to(device=block_mask.device, dtype=block_mask.dtype)
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x, mask_sizes):
feat_area = x.shape[-2] * x.shape[-1]
mask_area = mask_sizes[-2] * mask_sizes[-1]
return (self.drop_prob / (self.block_size ** 2)) * (feat_area / mask_area)
class DropChannel(nn.Module):
"""
DropBlock with mixing
"""
def __init__(self, drop_prob, test=False, extra_mix=False):
super(DropChannel, self).__init__()
print("[*] using Drop Channel")
self.drop_prob = drop_prob
# self.block_size = block_size
self.test = test
self.extra_mix = extra_mix
def forward(self, x, index=None):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
# raise ValueError("Dropblock mix, drop_prob > 0 ?")
# print("On Testing")
return x
else:
bs = x.shape[0]
c = x.shape[1]
h, w = x.shape[-1], x.shape[-2]
index = torch.unsqueeze(Bernoulli(1.0 - self.drop_prob).sample((bs, c,)) , 2)
mask = index.repeat(1,1,h*w).reshape(bs,c,h,w).to(x.device)
out = x * mask
return out
class DropCBlock(nn.Module):
def __init__(self, drop_prob, block_size, test=False):
super(DropCBlock, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
self.test = test
print("[*] Using Drop Cblock ``")
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# sample from a mask
mask_reduction = self.block_size // 2
mask_height = x.shape[-2] - mask_reduction
mask_width = x.shape[-1] - mask_reduction
mask_sizes = [mask_height, mask_width]
if any([x <= 0 for x in mask_sizes]):
raise ValueError('Input of shape {} is too small for block_size {}'
.format(tuple(x.shape), self.block_size))
# get gamma value
gamma = self._compute_gamma(x, mask_sizes)
# sample mask
mask = Bernoulli(gamma).sample((x.shape[0], *mask_sizes))
# place mask on input device
mask = mask.to(x.device) # mask.cuda()
# compute block mask
block_mask = self._compute_block_mask(mask)
channel_mask = self._compute_channel_mask(x, block_mask)
# apply block mask
out = x * channel_mask
return out
def _compute_block_mask(self, mask):
kernel = torch.ones((1, 1, self.block_size, self.block_size)).to(
mask.device)
block_mask = F.conv2d(mask[:, None, :, :],kernel,
padding=int(np.ceil(self.block_size // 2) + 1))
delta = self.block_size // 2
input_height = mask.shape[-2] + delta
input_width = mask.shape[-1] + delta
height_to_crop = block_mask.shape[-2] - input_height
width_to_crop = block_mask.shape[-1] - input_width
if height_to_crop != 0:
block_mask = block_mask[:, :, :-height_to_crop, :]
if width_to_crop != 0:
block_mask = block_mask[:, :, :, :-width_to_crop]
block_mask = (block_mask >= 1).to(device=block_mask.device, dtype=block_mask.dtype)
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x, mask_sizes):
feat_area = x.shape[-2] * x.shape[-1]
mask_area = mask_sizes[-2] * mask_sizes[-1]
return (self.drop_prob / (self.block_size ** 2)) * (feat_area / mask_area)
def _compute_channel_mask(self, x, block_mask):
bs = x.shape[0]
c = x.shape[1]
h, w = x.shape[-1], x.shape[-2]
index = torch.unsqueeze(Bernoulli(1.0 - self.drop_prob).sample((bs, c,)), 2)
mask = index.repeat(1, 1, h * w).reshape(bs, c, h, w).to(x.device)
mask = mask * (1 - block_mask)[:, None, :, :]
mask = 1 - mask
# print("c mask", mask)
# print("block mask", block_mask)
return mask
if __name__ == "__main__":
db = DropBlock2DMix(0.2, 3, True)
cb = DropCBlock(0.2, 3)
from torch.autograd import Variable
import numpy as np
hw = 6
x = torch.Tensor(np.arange(hw*hw*4).reshape((1,4,hw,hw)))
x = Variable(x)
# xx, index = db(x)
xx = cb(x)
# print(xx, xx.size())
# print(index) | 36.178649 | 111 | 0.554498 |
1e09b12768596f61a69f5ab120b0ee1ccfb44eba | 6,768 | py | Python | tests/test_asyncfilters.py | captainmalloc/jinja | 540b260198285f0ed41fbe80c0b1b6f13be579c1 | [
"BSD-3-Clause"
] | 1 | 2020-07-06T05:53:18.000Z | 2020-07-06T05:53:18.000Z | tests/test_asyncfilters.py | captainmalloc/jinja | 540b260198285f0ed41fbe80c0b1b6f13be579c1 | [
"BSD-3-Clause"
] | null | null | null | tests/test_asyncfilters.py | captainmalloc/jinja | 540b260198285f0ed41fbe80c0b1b6f13be579c1 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from jinja2 import Environment
from jinja2.utils import Markup
async def make_aiter(iter):
for item in iter:
yield item
def mark_dualiter(parameter, factory):
def decorator(f):
return pytest.mark.parametrize(parameter, [
lambda: factory(),
lambda: make_aiter(factory()),
])(f)
return decorator
@pytest.fixture
def env_async():
return Environment(enable_async=True)
@mark_dualiter('foo', lambda: range(10))
def test_first(env_async, foo):
tmpl = env_async.from_string('{{ foo()|first }}')
out = tmpl.render(foo=foo)
assert out == '0'
@mark_dualiter('items', lambda: [
{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}
])
def test_groupby(env_async, items):
tmpl = env_async.from_string('''
{%- for grouper, list in items()|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render(items=items).split('|') == [
"1: 1, 2: 1, 1",
"2: 2, 3",
"3: 3, 4",
""
]
@mark_dualiter('items', lambda: [('a', 1), ('a', 2), ('b', 1)])
def test_groupby_tuple_index(env_async, items):
tmpl = env_async.from_string('''
{%- for grouper, list in items()|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render(items=items) == 'a:1:2|b:1|'
def make_articles():
class Date(object):
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
class Article(object):
def __init__(self, title, *date):
self.date = Date(*date)
self.title = title
return [
Article('aha', 1, 1, 1970),
Article('interesting', 2, 1, 1970),
Article('really?', 3, 1, 1970),
Article('totally not', 1, 1, 1971)
]
@mark_dualiter('articles', make_articles)
def test_groupby_multidot(env_async, articles):
tmpl = env_async.from_string('''
{%- for year, list in articles()|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}''')
assert tmpl.render(articles=articles).split('|') == [
'1970[aha][interesting][really?]',
'1971[totally not]',
''
]
@mark_dualiter('int_items', lambda: [1, 2, 3])
def test_join_env_int(env_async, int_items):
tmpl = env_async.from_string('{{ items()|join("|") }}')
out = tmpl.render(items=int_items)
assert out == '1|2|3'
@mark_dualiter('string_items', lambda: ["<foo>", Markup("<span>foo</span>")])
def test_join_string_list(string_items):
env2 = Environment(autoescape=True, enable_async=True)
tmpl = env2.from_string(
'{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render(items=string_items) == '<foo><span>foo</span>'
def make_users():
class User(object):
def __init__(self, username):
self.username = username
return map(User, ['foo', 'bar'])
@mark_dualiter('users', make_users)
def test_join_attribute(env_async, users):
tmpl = env_async.from_string('''{{ users()|join(', ', 'username') }}''')
assert tmpl.render(users=users) == 'foo, bar'
@mark_dualiter('items', lambda: [1, 2, 3, 4, 5])
def test_simple_reject(env_async, items):
tmpl = env_async.from_string('{{ items()|reject("odd")|join("|") }}')
assert tmpl.render(items=items) == '2|4'
@mark_dualiter('items', lambda: [None, False, 0, 1, 2, 3, 4, 5])
def test_bool_reject(env_async, items):
tmpl = env_async.from_string(
'{{ items()|reject|join("|") }}'
)
assert tmpl.render(items=items) == 'None|False|0'
@mark_dualiter('items', lambda: [1, 2, 3, 4, 5])
def test_simple_select(env_async, items):
tmpl = env_async.from_string('{{ items()|select("odd")|join("|") }}')
assert tmpl.render(items=items) == '1|3|5'
@mark_dualiter('items', lambda: [None, False, 0, 1, 2, 3, 4, 5])
def test_bool_select(env_async, items):
tmpl = env_async.from_string(
'{{ items()|select|join("|") }}'
)
assert tmpl.render(items=items) == '1|2|3|4|5'
def make_users():
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
return [
User('john', True),
User('jane', True),
User('mike', False),
]
@mark_dualiter('users', make_users)
def test_simple_select_attr(env_async, users):
tmpl = env_async.from_string(
'{{ users()|selectattr("is_active")|'
'map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == 'john|jane'
@mark_dualiter('items', lambda: list('123'))
def test_simple_map(env_async, items):
tmpl = env_async.from_string('{{ items()|map("int")|sum }}')
assert tmpl.render(items=items) == '6'
def test_map_sum(env_async): # async map + async filter
tmpl = env_async.from_string('{{ [[1,2], [3], [4,5,6]]|map("sum")|list }}')
assert tmpl.render() == '[3, 3, 15]'
@mark_dualiter('users', make_users)
def test_attribute_map(env_async, users):
tmpl = env_async.from_string('{{ users()|map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == 'john|jane|mike'
def test_empty_map(env_async):
tmpl = env_async.from_string('{{ none|map("upper")|list }}')
assert tmpl.render() == '[]'
@mark_dualiter('items', lambda: [1, 2, 3, 4, 5, 6])
def test_sum(env_async, items):
tmpl = env_async.from_string('''{{ items()|sum }}''')
assert tmpl.render(items=items) == '21'
@mark_dualiter('items', lambda: [
{'value': 23},
{'value': 1},
{'value': 18},
])
def test_sum_attributes(env_async, items):
tmpl = env_async.from_string('''{{ items()|sum('value') }}''')
assert tmpl.render(items=items)
def test_sum_attributes_nested(env_async):
tmpl = env_async.from_string('''{{ values|sum('real.value') }}''')
assert tmpl.render(values=[
{'real': {'value': 23}},
{'real': {'value': 1}},
{'real': {'value': 18}},
]) == '42'
def test_sum_attributes_tuple(env_async):
tmpl = env_async.from_string('''{{ values.items()|sum('1') }}''')
assert tmpl.render(values={
'foo': 23,
'bar': 1,
'baz': 18,
}) == '42'
@mark_dualiter('items', lambda: range(10))
def test_slice(env_async, items):
tmpl = env_async.from_string('{{ items()|slice(3)|list }}|'
'{{ items()|slice(3, "X")|list }}')
out = tmpl.render(items=items)
assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]")
| 29.04721 | 81 | 0.577423 |
8b25f3ce3b1a6af4045d1da7c07c69ec55e6693b | 7,503 | py | Python | po_pattern/Lib/site-packages/behave/log_capture.py | tomekwszelaki/page-object-pattern-python | eb0ff7a1329b88149d743f2bc4a827c984e72dc3 | [
"MIT"
] | 1 | 2017-03-22T04:25:35.000Z | 2017-03-22T04:25:35.000Z | po_pattern/Lib/site-packages/behave/log_capture.py | tomekwszelaki/page-object-pattern-python | eb0ff7a1329b88149d743f2bc4a827c984e72dc3 | [
"MIT"
] | null | null | null | po_pattern/Lib/site-packages/behave/log_capture.py | tomekwszelaki/page-object-pattern-python | eb0ff7a1329b88149d743f2bc4a827c984e72dc3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import logging
import functools
from logging.handlers import BufferingHandler
import re
class RecordFilter(object):
'''Implement logging record filtering as per the configuration
--logging-filter option.
'''
def __init__(self, names):
self.include = set()
self.exclude = set()
for name in names.split(','):
if name[0] == '-':
self.exclude.add(name[1:])
else:
self.include.add(name)
def filter(self, record):
if self.exclude:
return record.name not in self.exclude
return record.name in self.include
# originally from nostetsts logcapture plugin
class LoggingCapture(BufferingHandler):
'''Capture logging events in a memory buffer for later display or query.
Captured logging events are stored on the attribute
:attr:`~LoggingCapture.buffer`:
.. attribute:: buffer
This is a list of captured logging events as `logging.LogRecords`_.
.. _`logging.LogRecords`:
http://docs.python.org/library/logging.html#logrecord-objects
By default the format of the messages will be::
'%(levelname)s:%(name)s:%(message)s'
This may be overridden using standard logging formatter names in the
configuration variable ``logging_format``.
The level of logging captured is set to ``logging.NOTSET`` by default. You
may override this using the configuration setting ``logging_level`` (which
is set to a level name.)
Finally there may be `filtering of logging events`__ specified by the
configuration variable ``logging_filter``.
.. __: behave.html#command-line-arguments
'''
def __init__(self, config, level=None):
BufferingHandler.__init__(self, 1000)
self.config = config
self.old_handlers = []
self.old_level = None
# set my formatter
fmt = datefmt = None
if config.logging_format:
fmt = config.logging_format
else:
fmt = '%(levelname)s:%(name)s:%(message)s'
if config.logging_datefmt:
datefmt = config.logging_datefmt
fmt = logging.Formatter(fmt, datefmt)
self.setFormatter(fmt)
# figure the level we're logging at
if level is not None:
self.level = level
elif config.logging_level:
self.level = config.logging_level
else:
self.level = logging.NOTSET
# construct my filter
if config.logging_filter:
self.addFilter(RecordFilter(config.logging_filter))
def __bool__(self):
return bool(self.buffer)
def flush(self):
pass # do nothing
def truncate(self):
self.buffer = []
def getvalue(self):
return '\n'.join(self.formatter.format(r) for r in self.buffer)
def findEvent(self, pattern):
'''Search through the buffer for a message that matches the given
regular expression.
Returns boolean indicating whether a match was found.
'''
pattern = re.compile(pattern)
for record in self.buffer:
if pattern.search(record.getMessage()) is not None:
return True
return False
def any_errors(self):
'''Search through the buffer for any ERROR or CRITICAL events.
Returns boolean indicating whether a match was found.
'''
return any(record for record in self.buffer
if record.levelname in ('ERROR', 'CRITICAL'))
def inveigle(self):
'''Turn on logging capture by replacing all existing handlers
configured in the logging module.
If the config var logging_clear_handlers is set then we also remove
all existing handlers.
We also set the level of the root logger.
The opposite of this is :meth:`~LoggingCapture.abandon`.
'''
root_logger = logging.getLogger()
if self.config.logging_clear_handlers:
# kill off all the other log handlers
for logger in logging.Logger.manager.loggerDict.values():
if hasattr(logger, "handlers"):
for handler in logger.handlers:
self.old_handlers.append((logger, handler))
logger.removeHandler(handler)
# sanity check: remove any existing LoggingCapture
for handler in root_logger.handlers[:]:
if isinstance(handler, LoggingCapture):
root_logger.handlers.remove(handler)
elif self.config.logging_clear_handlers:
self.old_handlers.append((root_logger, handler))
root_logger.removeHandler(handler)
# right, we're it now
root_logger.addHandler(self)
# capture the level we're interested in
self.old_level = root_logger.level
root_logger.setLevel(self.level)
def abandon(self):
'''Turn off logging capture.
If other handlers were removed by :meth:`~LoggingCapture.inveigle` then
they are reinstated.
'''
root_logger = logging.getLogger()
for handler in root_logger.handlers[:]:
if handler is self:
root_logger.handlers.remove(handler)
if self.config.logging_clear_handlers:
for logger, handler in self.old_handlers:
logger.addHandler(handler)
if self.old_level is not None:
# -- RESTORE: Old log.level before inveigle() was used.
root_logger.setLevel(self.old_level)
self.old_level = None
# pre-1.2 backwards compatibility
MemoryHandler = LoggingCapture
def capture(*args, **kw):
'''Decorator to wrap an *environment file function* in log file capture.
It configures the logging capture using the *behave* context - the first
argument to the function being decorated (so don't use this to decorate
something that doesn't have *context* as the first argument.)
The basic usage is:
.. code-block: python
@capture
def after_scenario(context, scenario):
...
The function prints any captured logging (at the level determined by the
``log_level`` configuration setting) directly to stdout, regardless of
error conditions.
It is mostly useful for debugging in situations where you are seeing a
message like::
No handlers could be found for logger "name"
The decorator takes an optional "level" keyword argument which limits the
level of logging captured, overriding the level in the run's configuration:
.. code-block: python
@capture(level=logging.ERROR)
def after_scenario(context, scenario):
...
This would limit the logging captured to just ERROR and above, and thus
only display logged events if they are interesting.
'''
def create_decorator(func, level=None):
def f(context, *args):
h = LoggingCapture(context.config, level=level)
h.inveigle()
try:
func(context, *args)
finally:
h.abandon()
v = h.getvalue()
if v:
print('Captured Logging:')
print(v)
return f
if not args:
return functools.partial(create_decorator, level=kw.get('level'))
else:
return create_decorator(args[0])
| 32.064103 | 79 | 0.629615 |
d62ae55987f39192376d6d29cd332c0362d46d5e | 3,822 | py | Python | tests/cupy_tests/cuda_tests/test_compiler.py | garanews/cupy | b76611c1d5f165d01391977e3ba717ee7d7fda25 | [
"MIT"
] | 1 | 2020-12-27T13:06:35.000Z | 2020-12-27T13:06:35.000Z | tests/cupy_tests/cuda_tests/test_compiler.py | trivialfis/cupy | e0f0b3bba0fa1e809780350a4562b2ed1d1fe024 | [
"MIT"
] | 4 | 2020-09-15T01:49:38.000Z | 2020-12-11T03:52:35.000Z | tests/cupy_tests/cuda_tests/test_compiler.py | trivialfis/cupy | e0f0b3bba0fa1e809780350a4562b2ed1d1fe024 | [
"MIT"
] | 2 | 2018-07-21T13:44:26.000Z | 2019-03-25T11:30:16.000Z | import pickle
import unittest
from unittest import mock
import cupy
from cupy.cuda import compiler
from cupy import testing
def cuda_version():
return cupy.cuda.runtime.runtimeGetVersion()
@testing.gpu
class TestNvrtcArch(unittest.TestCase):
def setUp(self):
cupy.clear_memo() # _get_arch result is cached
def _check_get_arch(self, device_cc, expected_arch):
with mock.patch('cupy.cuda.device.Device') as device_class:
device_class.return_value.compute_capability = device_cc
assert compiler._get_arch() == expected_arch
cupy.clear_memo() # _get_arch result is cached
@unittest.skipUnless(9000 <= cuda_version(), 'Requires CUDA 9.x or later')
def test_get_arch_cuda9(self):
self._check_get_arch('62', '62') # Tegra
self._check_get_arch('70', '70')
self._check_get_arch('72', '72') # Tegra
@unittest.skipUnless(10010 <= cuda_version(),
'Requires CUDA 10.1 or later')
def test_get_arch_cuda101(self):
self._check_get_arch('75', '75')
@unittest.skipUnless(11000 <= cuda_version(),
'Requires CUDA 11.0 or later')
def test_get_arch_cuda11(self):
self._check_get_arch('80', '80')
def _compile(self, arch):
compiler.compile_using_nvrtc('', arch=arch)
@unittest.skipUnless(9000 <= cuda_version(), 'Requires CUDA 9.0 or later')
def test_compile_cuda9(self):
# This test is intended to detect specification change in NVRTC API.
# It should not fail.
# (Do not test `compute_72` as it is for Tegra.)
self._compile('70')
# It should fail.
self.assertRaises(
compiler.CompileException, self._compile, '73')
@unittest.skipUnless(10010 <= cuda_version() < 11000,
'Requires CUDA 10.1 or 10.2')
def test_compile_cuda101(self):
# This test is intended to detect specification change in NVRTC API.
# It should not fail.
# (Do not test `compute_72` as it is for Tegra.)
self._compile('75')
# It should fail. (compute_80 is not supported until CUDA 11)
self.assertRaises(
compiler.CompileException, self._compile, '80')
@unittest.skipUnless(11000 <= cuda_version(),
'Requires CUDA 11.0 or later')
def test_compile_cuda11(self):
# This test is intended to detect specification change in NVRTC API.
# It should not fail.
self._compile('80')
# It should fail.
self.assertRaises(
compiler.CompileException, self._compile, '83')
@testing.gpu
class TestNvrtcStderr(unittest.TestCase):
def test(self):
# An error message contains the file name `kern.cu`
with self.assertRaisesRegex(compiler.CompileException, 'kern.cu'):
compiler.compile_using_nvrtc('a')
class TestIsValidKernelName(unittest.TestCase):
def test_valid(self):
self.assertTrue(compiler.is_valid_kernel_name('valid_name_1'))
def test_empty(self):
self.assertFalse(compiler.is_valid_kernel_name(''))
def test_start_with_digit(self):
self.assertFalse(compiler.is_valid_kernel_name('0_invalid'))
def test_new_line(self):
self.assertFalse(compiler.is_valid_kernel_name('invalid\nname'))
def test_symbol(self):
self.assertFalse(compiler.is_valid_kernel_name('invalid$name'))
def test_space(self):
self.assertFalse(compiler.is_valid_kernel_name('invalid name'))
class TestExceptionPicklable(unittest.TestCase):
def test(self):
e1 = compiler.CompileException('msg', 'fn.cu', 'fn', ('-ftz=true',))
e2 = pickle.loads(pickle.dumps(e1))
assert e1.args == e2.args
assert str(e1) == str(e2)
| 32.117647 | 78 | 0.654631 |
91e495959b49c0309a9263ca8a277d9523f2d1ea | 1,419 | py | Python | tools/git_util.py | amikey/chromium | cda6acd369a3498f24d4e784c0ad060602a0a810 | [
"BSD-3-Clause"
] | 97 | 2015-05-03T20:16:08.000Z | 2021-11-16T13:16:25.000Z | tools/git_util.py | amikey/chromium | cda6acd369a3498f24d4e784c0ad060602a0a810 | [
"BSD-3-Clause"
] | 3 | 2020-10-16T03:15:20.000Z | 2020-10-26T15:31:01.000Z | tools/git_util.py | amikey/chromium | cda6acd369a3498f24d4e784c0ad060602a0a810 | [
"BSD-3-Clause"
] | 29 | 2015-03-28T02:20:33.000Z | 2021-10-29T20:58:28.000Z | # Copyright (c) 2014 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file
from exec_util import exec_cmd
import os
def is_checkout(path):
""" Returns true if the path represents a git checkout. """
return os.path.exists(os.path.join(path, '.git'))
def get_hash(path = '.', branch = 'HEAD'):
""" Returns the git hash for the specified branch/tag/hash. """
cmd = "git rev-parse %s" % (branch)
result = exec_cmd(cmd, path)
if result['out'] != '':
return result['out'].strip()
return 'Unknown'
def get_url(path = '.'):
""" Returns the origin url for the specified path. """
cmd = "git config --get remote.origin.url"
result = exec_cmd(cmd, path)
if result['out'] != '':
return result['out'].strip()
return 'Unknown'
def get_svn_revision(path = '.', branch = 'HEAD'):
""" Returns the SVN revision associated with the specified path and git
branch/tag/hash. """
svn_rev = "None"
cmd = "git log --grep=^git-svn-id: -n 1 %s" % (branch)
result = exec_cmd(cmd, path)
if result['err'] == '':
for line in result['out'].split('\n'):
if line.find("git-svn-id") > 0:
svn_rev = line.split("@")[1].split()[0]
break
return svn_rev
def get_changed_files(path = '.'):
""" Retrieves the list of changed files. """
# not implemented
return []
| 31.533333 | 75 | 0.641297 |
53b65520d263c32833f66f965d0c351da0101682 | 3,692 | py | Python | project/settings.py | tarek1500/Python-Blog | d9e8e90c20ad5906139b1468e4d195038dfbfc11 | [
"MIT"
] | null | null | null | project/settings.py | tarek1500/Python-Blog | d9e8e90c20ad5906139b1468e4d195038dfbfc11 | [
"MIT"
] | 5 | 2020-02-27T18:12:25.000Z | 2020-02-27T22:43:52.000Z | project/settings.py | tarek1500/Python-Blog | d9e8e90c20ad5906139b1468e4d195038dfbfc11 | [
"MIT"
] | 1 | 2020-05-16T00:34:02.000Z | 2020-05-16T00:34:02.000Z | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog',
'cp',
'users',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': config('DB_ENGINE'),
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': config('DB_PORT')
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
# STATIC_URL = os.path.join(BASE_DIR,'/static/')
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'landpage'
LOGIN_URL = 'login'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_cdn')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| 25.638889 | 91 | 0.694745 |
521daa6a7eb43766e014be96edbede50f2ce0e00 | 6,654 | py | Python | sdk/python/pulumi_azure_native/network/v20180401/get_vpn_gateway.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180401/get_vpn_gateway.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180401/get_vpn_gateway.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVpnGatewayResult',
'AwaitableGetVpnGatewayResult',
'get_vpn_gateway',
]
@pulumi.output_type
class GetVpnGatewayResult:
"""
VpnGateway Resource.
"""
def __init__(__self__, bgp_settings=None, connections=None, etag=None, id=None, location=None, name=None, policies=None, provisioning_state=None, tags=None, type=None, virtual_hub=None):
if bgp_settings and not isinstance(bgp_settings, dict):
raise TypeError("Expected argument 'bgp_settings' to be a dict")
pulumi.set(__self__, "bgp_settings", bgp_settings)
if connections and not isinstance(connections, list):
raise TypeError("Expected argument 'connections' to be a list")
pulumi.set(__self__, "connections", connections)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policies and not isinstance(policies, dict):
raise TypeError("Expected argument 'policies' to be a dict")
pulumi.set(__self__, "policies", policies)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter
def connections(self) -> Optional[Sequence['outputs.VpnConnectionResponse']]:
"""
list of all vpn connections to the gateway.
"""
return pulumi.get(self, "connections")
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policies(self) -> Optional['outputs.PoliciesResponse']:
"""
The policies applied to this vpn gateway.
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.SubResourceResponse']:
"""
The VirtualHub to which the gateway belongs
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetVpnGatewayResult(GetVpnGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVpnGatewayResult(
bgp_settings=self.bgp_settings,
connections=self.connections,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
policies=self.policies,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub)
def get_vpn_gateway(gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVpnGatewayResult:
"""
VpnGateway Resource.
:param str gateway_name: The name of the gateway.
:param str resource_group_name: The resource group name of the VpnGateway.
"""
__args__ = dict()
__args__['gatewayName'] = gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180401:getVpnGateway', __args__, opts=opts, typ=GetVpnGatewayResult).value
return AwaitableGetVpnGatewayResult(
bgp_settings=__ret__.bgp_settings,
connections=__ret__.connections,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
policies=__ret__.policies,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub)
| 33.77665 | 190 | 0.632251 |
33c259c2fd2c60e13cb2e46f5e6cb3d9af37b1fc | 16,334 | py | Python | utils/dash_utils.py | mintusf/land_cover_tracking | e1c389729fdb628e4d34e0d427f43f6317eba4ee | [
"Apache-2.0"
] | 1 | 2022-01-21T03:33:22.000Z | 2022-01-21T03:33:22.000Z | utils/dash_utils.py | mintusf/land_cover_tracking | e1c389729fdb628e4d34e0d427f43f6317eba4ee | [
"Apache-2.0"
] | 45 | 2021-08-03T11:45:16.000Z | 2021-10-20T11:56:24.000Z | utils/dash_utils.py | mintusf/land_cover_tracking | e1c389729fdb628e4d34e0d427f43f6317eba4ee | [
"Apache-2.0"
] | null | null | null | import glob
import os
from typing import Dict, Tuple, List
import cv2
import numpy as np
from utils.ai_engine_wrapper import ai_engine_infer
from utils.sentinel_api import get_raster_from_coord
from utils.io_utils import (
convert_sat_np_for_vis,
get_next_folder_name,
write_json,
load_json,
load_yaml,
)
from ai_engine.utils.infer_utils import get_path_for_output
from config.default import CfgNode
from ai_engine.utils.visualization_utils import generate_save_alphablend
def get_coord_from_feature(feature):
bounds = feature["properties"]["_bounds"]
return f"lat {bounds[0]['lat']:.2f}, long {bounds[0]['lng']:.2f}"
def get_polygon_coord(polygons, selected_polygon):
return [
[
polygons["features"][selected_polygon]["properties"]["_bounds"][0]["lat"],
polygons["features"][selected_polygon]["properties"]["_bounds"][1]["lat"],
],
[
polygons["features"][selected_polygon]["properties"]["_bounds"][0]["lng"],
polygons["features"][selected_polygon]["properties"]["_bounds"][1]["lng"],
],
]
def get_coord_single_key(
coord: Dict[str, List[float]], vertical: str, horizontal: str
) -> List[float]:
"""Returns coordinates of a selected corner
Args:
coord (Dict[str, List[float]]): Polygon coordinates
vertical (str): String from ["top", "bottom"] indicating the corner
horizontal (str): String from ["left", "right"] indicating the corner
Returns:
List[float]: [latitude, longitude] coordinates of the corner
"""
idx_vertical = 1 if vertical == "top" else 0
idx_horizontal = 0 if horizontal == "top" else 1
return [coord["lat"][idx_vertical], coord["long"][idx_horizontal]]
def get_coord_multiple_keys(
keys: List[str],
coords: Dict[str, Dict[str, List[float]]],
vertical: int,
horizontal: int,
) -> Tuple[List[float]]:
"""Returns lists of corner coordinates of all polygons
Args:
keys (List[str]): List containing paths to polygon files
coords (Dict[str, Dict[str, List[float]]]): [description]
vertical (str): String from ["top", "bottom"] indicating the corner
horizontal (str): String from ["left", "right"] indicating the corner
Returns:
Tuple[List[float]]: Lists of corner coordinates of all polygons
"""
lat_coords = []
long_coords = []
for key in keys:
coord = coords[key]
lat_coord, long_coord = get_coord_single_key(coord, vertical, horizontal)
lat_coords.append(lat_coord)
long_coords.append(long_coord)
return lat_coords, long_coords
def get_corner_coord(
polygon_id: str, vertical: str, horizontal: str, config: CfgNode
) -> List[float]:
"""Returns [lattitude, longitude] coordinates of a polygon corner
Args:
polygon_id (str): Id of the selected polygon
vertical (str): String from ["top", "bottom"] indicating the corner
horizontal (str): String from ["left", "right"] indicating the corner
config (CfgNode): App config
Returns:
List[float]: Corner's coordinates
"""
coords = load_json(os.path.join(config.DATA_DIR, config.POLYGON_JSON_NAME))
directory = os.path.join(config.DATA_DIR, str(polygon_id))
keys = glob.glob(os.path.join(directory, "tile_*.png"))
keys = [key for key in keys if "pred" not in key]
assert vertical in ["top", "bottom"]
comp_func_vertical = max if vertical == "top" else min
assert horizontal in ["left", "right"]
comp_func_horizontal = max if horizontal == "right" else min
vertical_coords, horizontal_coords = get_coord_multiple_keys(
keys, coords, vertical, horizontal
)
vertical_coord = comp_func_vertical(vertical_coords)
horizontal_coord = comp_func_horizontal(horizontal_coords)
return [vertical_coord, horizontal_coord]
def get_polygon_id_from_json(all_coords_json, coord):
for polygon_id, coord_dict in all_coords_json.items():
if coord == coord_dict:
return int(os.path.split(polygon_id)[1].split("_")[0])
return -1
def download_action(
polygons: dict,
selected_polygon_download: str,
config: CfgNode,
year: int,
season: int,
) -> Tuple[List[str], List[float]]:
"""Downloads satellite data using sentinel API.
Due to API limitation, if a selected polygon is too big,
it is splited into smaller ones which are downloaded instead.
Returns paths to downloaded images and corresponding coordinates.
Args:
polygons (dict): Dictioneries with coordinates of all polygons
selected_polygon_download (str): Id of selected polygon for download
config (CfgNode): App config
year (int): Year of the satellite data
season (str): Season of the satellite data
Returns:
Tuple[List[str], List[float]]: A Tuple containing
* List of paths to downloaded image
* List of corresponding coordinates, in format:
[[south, west], [north, east]]
"""
coord = get_polygon_coord(polygons, int(selected_polygon_download))
all_coords_json = load_json(os.path.join(config.DATA_DIR, config.POLYGON_JSON_NAME))
polygon_id = get_polygon_id_from_json(all_coords_json, coord)
if polygon_id >= 0:
foldername = str(polygon_id)
else:
foldername = get_next_folder_name(config.DATA_DIR)
savedir = os.path.join(config.DATA_DIR, foldername + f"_y{year}_s{season}")
# Save coords for whole polygon and all tiles
coords = get_raster_from_coord(coord[0], coord[1], config, savedir, year, season)
write_json(os.path.join(config.DATA_DIR, config.POLYGON_JSON_NAME), coords)
img_paths = glob.glob(f"{savedir}/*.npy")
paths = []
coords_collected = []
for img_path in img_paths:
img = convert_sat_np_for_vis(img_path)
png_path = img_path.replace(".npy", ".png")
cv2.imwrite(png_path, img)
converted_coord = get_converted_coords_from_dict(coords, png_path)
paths.append(png_path)
coords_collected.append(converted_coord)
return paths, coords_collected
def merge_preds(polygon_id: str, tile_name: str, savedir: str, config: CfgNode) -> None:
"""Merges subgrid predictions to build an image for whole polygon
Args:
polygon_id (str): Id of polygon selected for prediction
tile_name (str): Name of the tile from the polygon
savedir (str): Saving directory
config (CfgNode): App config
"""
polygon_dir = os.path.join(config.DATA_DIR, str(polygon_id))
whole_img = cv2.imread(os.path.join(polygon_dir, f"{tile_name}.png"))
for pred_path in glob.glob(
os.path.join(polygon_dir, f"{tile_name}", "alphablend", "*.png")
):
parts = os.path.splitext(os.path.split(pred_path)[1])[0].split("_")
x_min = int(parts[2])
y_min = int(parts[3])
x_max = int(parts[4])
y_max = int(parts[5])
subgrid = cv2.imread(pred_path)
whole_img[x_min:x_max, y_min:y_max, :] = subgrid
cv2.imwrite(savedir, whole_img)
def predict_action(
config: CfgNode, selected_polygon_pred: str
) -> Tuple[List[str], List[float]]:
"""Performs prediction on selected downloaded are
Args:
config (CfgNode): App config
selected_polygon_pred (str): Id of selected area, corresponds to folder name
Returns:
Tuple[List[str], List[float]]: A Tuple containing
* List of paths to downloaded image
* List of corresponding coordinates, in format:
[[south, west], [north, east]]
"""
paths = []
coords_collected = []
input_files_dir = os.path.join(config.DATA_DIR, selected_polygon_pred)
for input_file in glob.glob(os.path.join(input_files_dir, "*.npy")):
tile_name = os.path.splitext(os.path.split(input_file)[1])[0]
ai_engine_infer(
config,
tile_path=input_file,
checkpoint=config.INFER.WEIGHTS_PATH,
destination=os.path.join(input_files_dir, tile_name),
)
savedir = os.path.join(input_files_dir, f"{tile_name}_pred.png")
merge_preds(selected_polygon_pred, tile_name, savedir, config)
paths.append(savedir)
converted_coords = get_converted_coords(config, input_file)
coords_collected.append(converted_coords)
return paths, coords_collected
def generate_alpha_for_tile(mask_file: str, mask_config: dict, alpha: float) -> None:
"""Generates alphablend for a single tile
Args:
mask_file (str): Path to the predicted tile mask
mask_config (dict): Mask config, should have
"alpha", "class2label" and "colors" defined
alpha (float): Alpha for alphablend
"""
input_single_file = mask_file.replace("/mask_np", "")
mask = np.load(mask_file)
input_img = convert_sat_np_for_vis(input_single_file)
name = os.path.splitext(os.path.split(mask_file)[1])[0]
destination = os.path.split(os.path.split(mask_file)[0])[0]
output_path = get_path_for_output("alphablend", destination, name)
generate_save_alphablend(
input_img,
mask,
mask_config,
output_path,
alpha,
)
def convert_coords(coords_in: Dict[str, List[float]]) -> List[List[float]]:
"""Converts coordinates from a dict to a list
Args:
coords_in (Dict[str, List[float]]): Dictionary with following elements:
'lat':
[south, north]
'long':
[west, east]
Returns:
List[List[float]]: Coordinates in format [[south, west], [north, east]]
"""
converted_coord = [
[coords_in["lat"][0], coords_in["long"][0]],
[coords_in["lat"][1], coords_in["long"][1]],
]
return converted_coord
def get_converted_coords_from_dict(
coords_dict: Dict[str, Dict[str, List[float]]], key: str
) -> List[List[float]]:
"""Returns covnerted coordinates from a dictionary given a key
Args:
coords_dict (Dict[Dict[str, float]]): Dictionary with following elements:
path_to_a_png_tile_file:
'lat':
[south value, north value]
'long':
[west value, east value]
key (str): Key in a dictionary
Returns:
List[List[float]]: Coordinates in format [[south, west], [north, east]]
"""
tile_coord = coords_dict[key]
converted_coords = convert_coords(tile_coord)
return converted_coords
def get_converted_coords(config: CfgNode, input_file: str) -> List[List[float]]:
"""Returns coordinates given a path to an image.
Supported image extensions are: [npy, png]
Args:
config (CfgNode): App config
input_file (str): Path to an image
Returns:
List[List[float]]: Coordinates in format [[south, west], [north, east]]
"""
coords = load_json(os.path.join(config.DATA_DIR, config.POLYGON_JSON_NAME))
png_path = input_file.replace("npy", "png")
converted_coord = get_converted_coords_from_dict(coords, png_path)
return converted_coord
def new_alpha_action(
config: CfgNode, selected_polygon_analyze: str, alpha
) -> Tuple[List[str], List[float]]:
"""Performs prediction on selected downloaded are
Args:
config (CfgNode): App config
selected_polygon_pred (str): Id of selected area, corresponds to folder name
Returns:
Tuple[List[str], List[float]]: A Tuple containing
* List of paths to downloaded image
* List of corresponding coordinates, in format:
[[south, west], [north, east]]
"""
paths = []
coords_collected = []
mask_config = load_yaml(config.DATASET.MASK.CONFIG)
polygon_root = os.path.join(config.DATA_DIR, selected_polygon_analyze)
for input_file in glob.glob(os.path.join(polygon_root, "*.npy")):
tile_name = os.path.splitext(os.path.split(input_file)[1])[0]
save_filename = f"{tile_name}_pred_" + f"{alpha:.02f}".replace(".", "") + ".png"
savedir = os.path.join(polygon_root, save_filename)
if not os.path.isfile(savedir):
tile_masks_dir = os.path.join(polygon_root, tile_name, "mask_np")
tile_masks = glob.glob(os.path.join(tile_masks_dir, "*.npy"))
for mask_file in tile_masks:
generate_alpha_for_tile(mask_file, mask_config, alpha)
merge_preds(selected_polygon_analyze, tile_name, savedir, config)
paths.append(savedir)
converted_coords = get_converted_coords(config, input_file)
coords_collected.append(converted_coords)
return paths, coords_collected
def refresh_action(config: CfgNode) -> Tuple[List[str], List[float]]:
"""Collects all available images, both raw and predictions,
end returns them together with corresponding coordinates.
Args:
config (CfgNode): App config
Returns:
Tuple[List[str], List[float]]: A Tuple containing
* List of paths to downloaded image
* List of corresponding coordinates, in format:
[[south, west], [north, east]]
"""
coords_all = load_json(os.path.join(config.DATA_DIR, config.POLYGON_JSON_NAME))
paths = []
coords = []
for key, tile_coord in coords_all.items():
if "tile" not in key:
continue
pred_path = key.replace(".png", "_pred.png")
if os.path.isfile(pred_path):
url = pred_path
else:
url = key
converted_coords = convert_coords(tile_coord)
paths.append(url)
coords.append(converted_coords)
return paths, coords
def get_classes_count(polygon_id: str, config: CfgNode) -> Dict[str, int]:
"""Calculates count of each class for the polygon
Args:
polygon_id (str): Polygon id
config (CfgNode): App config
Returns:
Dict[str, int]: Classes counts
"""
polygon_dir = os.path.join(config.DATA_DIR, str(polygon_id))
all_masks = glob.glob(os.path.join(polygon_dir, "*", "mask_np", "*.npy"))
mask_config = load_yaml(config.DATASET.MASK.CONFIG)
class2label = mask_config["class2label"]
all_counts = np.zeros(len(class2label))
for mask_path in all_masks:
mask = np.load(mask_path)
classes_count = np.bincount(mask.flatten(), minlength=len(class2label))
all_counts += classes_count
labels_count = {}
for class_id, label in class2label.items():
labels_count[label] = all_counts[class_id]
return labels_count
def get_top_labels(labels_counts: Dict[str, int], k: int) -> Tuple[np.array, List[str]]:
"""Returns top k classes with highest pixels count
Args:
labels_counts (Dict[str, int]): Input dictionary with classes counts
k (int): Top k to select
Returns:
Tuple[np.array, List[str]]: Dictionary with topk classes
"""
sorted_labels = sorted(labels_counts.keys(), key=lambda x: labels_counts[x])[::-1]
counts = []
labels = []
for i, label in enumerate(sorted_labels):
if i == k:
break
if labels_counts[label]:
counts.append(labels_counts[label])
labels.append(label)
return np.array(counts), labels
def add_choice(
choices: list, coord: Dict[str, List[float]], option: int, year: int, season_id: int
) -> None:
"""Adds choice to the list of choices used for a dropdown component
Args:
choices (list): List of choices
coord (Dict[str, List[float]]): Coordinates of the choice
option (int): Option number
season_id (int): Season id
"""
coord_str = f"lat {coord['lat'][0]:.2f}, long {coord['long'][0]:.2f}"
if season_id == 1:
season_str = "JAN-MAR"
elif season_id == 2:
season_str = "APR-JUN"
elif season_id == 3:
season_str = "JUL-SEP"
elif season_id == 4:
season_str = "OCT-DEC"
else:
raise ValueError("Season id is not valid")
choices.append(
{
"label": f"Polygon {option} (Coord: {coord_str}), {year} {season_str}",
"value": f"{option}_y{year}_s{season_id}",
}
)
| 33.609053 | 88 | 0.648769 |
62d811062382466eb69e96b5a2d9c657805df443 | 6,244 | py | Python | datalad_xnat/init.py | bpoldrack/datalad-xnat | 64e1f1653af23957687341b66c4852813eb70a95 | [
"MIT"
] | null | null | null | datalad_xnat/init.py | bpoldrack/datalad-xnat | 64e1f1653af23957687341b66c4852813eb70a95 | [
"MIT"
] | null | null | null | datalad_xnat/init.py | bpoldrack/datalad-xnat | 64e1f1653af23957687341b66c4852813eb70a95 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
"""
import logging
from datalad.interface.base import Interface
from datalad.interface.utils import eval_results
from datalad.interface.base import build_doc
from datalad.support.constraints import (
EnsureNone,
)
from datalad.support.exceptions import CapturedException
from datalad.support.param import Parameter
from datalad.utils import (
quote_cmdlinearg,
)
from datalad.distribution.dataset import (
datasetmethod,
EnsureDataset,
require_dataset,
)
from .platform import _XNAT
__docformat__ = 'restructuredtext'
lgr = logging.getLogger('datalad.xnat.init')
@build_doc
class Init(Interface):
"""Initialize an existing dataset to track an XNAT project
"""
_examples_ = [
dict(
text='Initialize a dataset in the current directory',
code_cmd='datalad xnat-init http://central.xnat.org:8080',
code_py='xnat_init("http://central.xnat.org:8080")'),
]
_params_ = dict(
dataset=Parameter(
args=("-d", "--dataset"),
metavar='DATASET',
doc="""specify the dataset to perform the initialization on""",
constraints=EnsureDataset() | EnsureNone()),
url=Parameter(
args=("url",),
doc="""XNAT instance URL""",
),
project=Parameter(
args=("-p", "--project",),
doc="""name of an XNAT project to track""",
),
path=Parameter(
args=("-O", "--path",),
doc="""Specify the directory structure for the downloaded files, and
if/where a subdataset should be created.
To include the subject, session, or scan values, use the following
format: {subject}/{session}/{scan}/
To insert a subdataset at a specific directory level use '//':
{subject}/{session}//{scan}/""",
),
force=Parameter(
args=("-f", "--force",),
doc="""force (re-)initialization""",
action='store_true'),
**_XNAT.cmd_params
)
@staticmethod
@datasetmethod(name='xnat_init')
@eval_results
def __call__(url,
path="{subject}/{session}/{scan}/",
project=None,
force=False,
credential=None,
dataset=None):
ds = require_dataset(
dataset, check_installed=True, purpose='initialization')
config = ds.config
# TODO needs a better solution, with_pathsep adds a platform pathsep
# and ruins everything on windows
#path = with_pathsep(path)
# prep for yield
res = dict(
action='xnat_init',
path=ds.path,
type='dataset',
logger=lgr,
refds=ds.path,
)
try:
platform = _XNAT(url, credential=credential)
except Exception as e:
ce = CapturedException(e)
yield dict(
res,
status='error',
message=('During authentication the XNAT server sent %s', ce),
exception=ce
)
return
if project is None:
from datalad.ui import ui
projects = platform.get_projects()
ui.message(
'No project name specified. The following projects are '
'available on {} for user {}:'.format(
url,
'anonymous' if platform.credential_name == 'anonymous'
else platform.authenticated_user))
for p in sorted(projects):
# list and prep for C&P
# TODO multi-column formatting?
ui.message(" {}".format(quote_cmdlinearg(p)))
return
# query the specified project to make sure it exists and is accessible
try:
# TODO for big projects this may not be the cheapest possible query
# that ensures existence of the project
nsubj = platform.get_nsubjs(project)
except Exception as e:
yield dict(
res,
status='error',
message=(
'Failed to obtain information on project %s from XNAT. '
'Full error:\n%s',
project, e),
)
return
lgr.info('XNAT reports %i subjects currently on-record for project %s',
nsubj, project)
# check if dataset already initialized
auth_dir = ds.pathobj / '.datalad' / 'providers'
if auth_dir.exists() and not force:
yield dict(
res,
status='error',
message='Dataset found already initialized, '
'use `force` to reinitialize',
)
return
# put essential configuration into the dataset
# TODO https://github.com/datalad/datalad-xnat/issues/42
config.set('datalad.xnat.default.url',
url, where='dataset', reload=False)
config.set('datalad.xnat.default.project',
project, where='dataset', reload=False)
config.set('datalad.xnat.default.path',
path, where='dataset', reload=False)
config.set('datalad.xnat.default.credential-name',
platform.credential_name, where='dataset')
ds.save(
path=ds.pathobj / '.datalad' / 'config',
to_git=True,
message="Configure default XNAT url and project",
)
if not platform.credential_name == 'anonymous':
# Configure XNAT access authentication
ds.run_procedure(spec='cfg_xnat_dataset')
yield dict(
res,
status='ok',
)
return
| 32.520833 | 87 | 0.537796 |
76a78d623716b0a76de5fb22b37ad171fb792709 | 36,266 | py | Python | cluster/examples/kubernetes/ceph/create-external-cluster-resources.py | silverhandy/rook | 0e87348093f36e6c70eb985455dfc189d8467ec2 | [
"Apache-2.0"
] | 2 | 2021-02-21T12:42:22.000Z | 2021-03-04T05:23:30.000Z | cluster/examples/kubernetes/ceph/create-external-cluster-resources.py | silverhandy/rook | 0e87348093f36e6c70eb985455dfc189d8467ec2 | [
"Apache-2.0"
] | null | null | null | cluster/examples/kubernetes/ceph/create-external-cluster-resources.py | silverhandy/rook | 0e87348093f36e6c70eb985455dfc189d8467ec2 | [
"Apache-2.0"
] | null | null | null | '''
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import json
import argparse
import unittest
import re
import requests
from os import linesep as LINESEP
# backward compatibility with 2.x
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
import rados
except ModuleNotFoundError as noModErr:
print("Error: %s\nExiting the script..." % noModErr)
sys.exit(1)
try:
# for 2.7.x
from StringIO import StringIO
except ModuleNotFoundError:
# for 3.x
from io import StringIO
class ExecutionFailureException(Exception):
pass
class RadosJSON:
EXTERNAL_USER_NAME = "client.healthchecker"
EMPTY_OUTPUT_LIST = "Empty output list"
@classmethod
def gen_arg_parser(cls, args_to_parse=None):
argP = argparse.ArgumentParser()
argP.add_argument("--verbose", "-v",
action='store_true', default=False)
argP.add_argument("--ceph-conf", "-c",
help="Provide a ceph conf file.", type=str)
argP.add_argument("--run-as-user", "-u",
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'",
default=cls.EXTERNAL_USER_NAME, type=str)
argP.add_argument("--format", "-t", choices=["json", "bash"],
default='json', help="Provides the output format (json | bash)")
argP.add_argument("--cluster-name", default="openshift-storage",
help="Ceph cluster name")
argP.add_argument("--output", "-o", default="",
help="Output will be stored into the provided file")
argP.add_argument("--cephfs-filesystem-name", default="",
help="Provides the name of the Ceph filesystem")
argP.add_argument("--cephfs-data-pool-name", default="",
help="Provides the name of the cephfs data pool")
argP.add_argument("--rbd-data-pool-name", default="", required=True,
help="Provides the name of the RBD datapool")
argP.add_argument("--namespace", default="",
help="Namespace where CephCluster is running")
argP.add_argument("--rgw-pool-prefix", default="default",
help="RGW Pool prefix")
argP.add_argument("--rgw-endpoint", default="", required=False,
help="Rados GateWay endpoint (in <IP>:<PORT> format)")
argP.add_argument("--monitoring-endpoint", default="", required=False,
help="Ceph Manager prometheus exporter endpoints comma separated list of <IP> entries")
argP.add_argument("--monitoring-endpoint-port", default="9283", required=False,
help="Ceph Manager prometheus exporter port")
if args_to_parse:
assert type(args_to_parse) == list, \
"Argument to 'gen_arg_parser' should be a list"
else:
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)
def _invalid_endpoint(self, endpoint_str):
try:
ipv4, port = endpoint_str.split(':')
except ValueError:
raise ExecutionFailureException(
"Not a proper endpoint: {}, <IP>:<PORT>, format is expected".format(endpoint_str))
ipParts = ipv4.split('.')
if len(ipParts) != 4:
raise ExecutionFailureException(
"Not a valid IP address: {}".format(ipv4))
for eachPart in ipParts:
if not eachPart.isdigit():
raise ExecutionFailureException(
"IP address parts should be numbers: {}".format(ipv4))
intPart = int(eachPart)
if intPart < 0 or intPart > 254:
raise ExecutionFailureException(
"Out of range IP addresses: {}".format(ipv4))
if not port.isdigit():
raise ExecutionFailureException("Port not valid: {}".format(port))
intPort = int(port)
if intPort < 1 or intPort > 2**16-1:
raise ExecutionFailureException(
"Out of range port number: {}".format(port))
return False
def endpoint_dial(self, endpoint_str):
try:
ep = "http://" + endpoint_str
r = requests.head(ep)
rc = r.status_code
if rc != 200:
raise ExecutionFailureException(
"wrong return code {} on rgw endpoint http header request".format(rc))
except requests.ConnectionError:
raise ExecutionFailureException(
"failed to connect to rgw endpoint {}".format(ep))
def __init__(self, arg_list=None):
self.out_map = {}
self._excluded_keys = set()
self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
self.output_file = self._arg_parser.output
self.ceph_conf = self._arg_parser.ceph_conf
self.run_as_user = self._arg_parser.run_as_user
if not self.run_as_user:
self.run_as_user = self.EXTERNAL_USER_NAME
if self.ceph_conf:
self.cluster = rados.Rados(conffile=self.ceph_conf)
else:
self.cluster = rados.Rados()
self.cluster.conf_read_file()
self.cluster.connect()
def shutdown(self):
if self.cluster.state == "connected":
self.cluster.shutdown()
def get_fsid(self):
return str(self.cluster.get_fsid())
def _common_cmd_json_gen(self, cmd_json):
cmd = json.dumps(cmd_json, sort_keys=True)
ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b'')
if self._arg_parser.verbose:
print("Command Input: {}".format(cmd))
print("Return Val: {}\nCommand Output: {}\nError Message: {}\n----------\n".format(
ret_val, cmd_out, err_msg))
json_out = {}
if ret_val == 0:
json_out = json.loads(cmd_out)
return ret_val, json_out, err_msg
def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'quorum_status' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
q_leader_name = json_out['quorum_leader_name']
q_leader_details = {}
q_leader_matching_list = [l for l in json_out['monmap']['mons']
if l['name'] == q_leader_name]
if len(q_leader_matching_list) == 0:
raise ExecutionFailureException("No matching 'mon' details found")
q_leader_details = q_leader_matching_list[0]
ip_port = str(q_leader_details['public_addr'].split('/')[0])
return "{}={}".format(str(q_leader_name), ip_port)
def get_active_ceph_mgr(self):
if self._arg_parser.monitoring_endpoint:
return self._arg_parser.monitoring_endpoint+':'+self._arg_parser.monitoring_endpoint_port
else:
cmd_json = {"prefix": "mgr services", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'mgr services' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
monitoring_endpoint = json_out.get('prometheus')
if not monitoring_endpoint:
raise ExecutionFailureException(
"'prometheus' service not found, is the exporter enabled?'.\n")
monitoring_endpoint = monitoring_endpoint.replace("http://", "")
monitoring_endpoint = monitoring_endpoint.replace("/", "")
return monitoring_endpoint
def create_cephCSIKeyring_cephFSProvisioner(self):
'''
command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
'''
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-provisioner",
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_cephFSNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-node",
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-node' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_RBDProvisioner(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-provisioner",
"caps": ["mon", "profile rbd",
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
# if fs and data_pool arguments are not set, silently return
if self._arg_parser.cephfs_filesystem_name == "" and self._arg_parser.cephfs_data_pool_name == "":
return
# if user has provided any of the
# '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments,
# raise an exception as we are unable to verify the args
raise ExecutionFailureException("'fs ls' ceph call failed with error: {}".format(err_msg))
matching_json_out = {}
# if '--cephfs-filesystem-name' argument is provided,
# check whether the provided filesystem-name exists or not
if self._arg_parser.cephfs_filesystem_name:
# get the matching list
matching_json_out_list = [matched for matched in json_out
if str(matched['name']) == self._arg_parser.cephfs_filesystem_name]
# unable to find a matching fs-name, raise an error
if len(matching_json_out_list) == 0:
raise ExecutionFailureException(
("Filesystem provided, '{}', " +
"is not found in the fs-list: '{}'").format(
self._arg_parser.cephfs_filesystem_name,
[str(x['name']) for x in json_out]))
matching_json_out = matching_json_out_list[0]
# if cephfs filesystem name is not provided,
# try to get a default fs name by doing the following
else:
# a. check if there is only one filesystem is present
if len(json_out) == 1:
matching_json_out = json_out[0]
# b. or else, check if data_pool name is provided
elif self._arg_parser.cephfs_data_pool_name:
# and if present, check whether there exists a fs which has the data_pool
for eachJ in json_out:
if self._arg_parser.cephfs_data_pool_name in eachJ['data_pools']:
matching_json_out = eachJ
break
# if there is no matching fs exists, that means provided data_pool name is invalid
if not matching_json_out:
raise ExecutionFailureException(
"Provided data_pool name, {}, does not exists".format(
self._arg_parser.cephfs_data_pool_name))
# c. if nothing is set and couldn't find a default,
else:
# just return silently
return
if matching_json_out:
self._arg_parser.cephfs_filesystem_name = str(matching_json_out['name'])
if type(matching_json_out['data_pools']) == list:
# if the user has already provided data-pool-name,
# through --cephfs-data-pool-name
if self._arg_parser.cephfs_data_pool_name:
# if the provided name is not matching with the one in the list
if self._arg_parser.cephfs_data_pool_name not in matching_json_out['data_pools']:
raise ExecutionFailureException(
"{}: '{}', {}: {}".format(
"Provided data-pool-name",
self._arg_parser.cephfs_data_pool_name,
"doesn't match from the data-pools' list",
[str(x) for x in matching_json_out['data_pools']]))
# if data_pool name is not provided,
# then try to find a default data pool name
else:
# if no data_pools exist, silently return
if len(matching_json_out['data_pools']) == 0:
return
self._arg_parser.cephfs_data_pool_name = str(
matching_json_out['data_pools'][0])
# if there are more than one 'data_pools' exist,
# then warn the user that we are using the selected name
if len(matching_json_out['data_pools']) > 1:
print("{}: {}\n{}: '{}'\n".format(
"WARNING: Multiple data pools detected",
[str(x) for x in matching_json_out['data_pools']],
"Using the data-pool",
self._arg_parser.cephfs_data_pool_name))
def create_cephCSIKeyring_RBDNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-node",
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-node' command failed\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_checkerKey(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": self.run_as_user,
"caps": ["mon", "allow r, allow command quorum_status, allow command version",
"mgr", "allow command config",
"osd", ("allow rwx pool={0}.rgw.meta, " +
"allow r pool=.rgw.root, " +
"allow rw pool={0}.rgw.control, " +
"allow rx pool={0}.rgw.log, " +
"allow x pool={0}.rgw.buckets.index").format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create {}' command failed\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def _gen_output_map(self):
if self.out_map:
return
pools_to_validate = [self._arg_parser.rbd_data_pool_name]
# if rgw_endpoint is provided, validate it
if self._arg_parser.rgw_endpoint:
self._invalid_endpoint(self._arg_parser.rgw_endpoint)
self.endpoint_dial(self._arg_parser.rgw_endpoint)
rgw_pool_to_validate = ["{0}.rgw.meta".format(self._arg_parser.rgw_pool_prefix),
".rgw.root",
"{0}.rgw.control".format(
self._arg_parser.rgw_pool_prefix),
"{0}.rgw.log".format(
self._arg_parser.rgw_pool_prefix),
"{0}.rgw.buckets.index".format(self._arg_parser.rgw_pool_prefix)]
pools_to_validate.extend(rgw_pool_to_validate)
for pool in pools_to_validate:
if not self.cluster.pool_exists(pool):
raise ExecutionFailureException(
"The provided pool {} does not exist".format(pool))
self._excluded_keys.add('CLUSTER_NAME')
self.get_cephfs_data_pool_details()
self.out_map['NAMESPACE'] = self._arg_parser.namespace
self.out_map['CLUSTER_NAME'] = self._arg_parser.cluster_name
self.out_map['ROOK_EXTERNAL_FSID'] = self.get_fsid()
self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user
self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data()
self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey()
self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_RBDNode()
self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_RBDProvisioner()
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode()
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['MONITORING_ENDPOINT'] = self.get_active_ceph_mgr().split(":")[
0]
self.out_map['MONITORING_ENDPOINT_PORT'] = self.get_active_ceph_mgr().split(":")[
1]
self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name
self.out_map['CEPHFS_FS_NAME'] = self._arg_parser.cephfs_filesystem_name
self.out_map['RBD_POOL_NAME'] = self._arg_parser.rbd_data_pool_name
self.out_map['RGW_POOL_PREFIX'] = self._arg_parser.rgw_pool_prefix
def gen_shell_out(self):
self._gen_output_map()
shOutIO = StringIO()
for k, v in self.out_map.items():
if v and k not in self._excluded_keys:
shOutIO.write('export {}={}{}'.format(k, v, LINESEP))
shOut = shOutIO.getvalue()
shOutIO.close()
return shOut
def gen_json_out(self):
self._gen_output_map()
json_out = [
{
"name": "rook-ceph-mon-endpoints",
"kind": "ConfigMap",
"data": {
"data": self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'],
"maxMonId": "0",
"mapping": "{}"
}
},
{
"name": "rook-ceph-mon",
"kind": "Secret",
"data": {
"admin-secret": "admin-secret",
"fsid": self.out_map['ROOK_EXTERNAL_FSID'],
"mon-secret": "mon-secret"
},
},
{
"name": "rook-ceph-operator-creds",
"kind": "Secret",
"data": {
"userID": self.out_map['ROOK_EXTERNAL_USERNAME'],
"userKey": self.out_map['ROOK_EXTERNAL_USER_SECRET']
}
},
{
"name": "rook-csi-rbd-node",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-node',
"userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET']
}
},
{
"name": "rook-csi-rbd-provisioner",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-provisioner',
"userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET']
},
},
{
"name": "rook-csi-cephfs-node",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node',
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
},
{
"name": "rook-csi-cephfs-provisioner",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner',
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
},
{
"name": "ceph-rbd",
"kind": "StorageClass",
"data": {
"pool": self.out_map['RBD_POOL_NAME']
}
},
{
"name": "monitoring-endpoint",
"kind": "CephCluster",
"data": {
"MonitoringEndpoint": self.out_map['MONITORING_ENDPOINT'],
"MonitoringPort": self.out_map['MONITORING_ENDPOINT_PORT']
}
}
]
# if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass
if self.out_map['CEPHFS_FS_NAME']:
json_out.append(
{
"name": "cephfs",
"kind": "StorageClass",
"data": {
"fsName": self.out_map['CEPHFS_FS_NAME'],
"pool": self.out_map['CEPHFS_POOL_NAME']
}
})
# if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass
if self.out_map['RGW_ENDPOINT']:
json_out.append(
{
"name": "ceph-rgw",
"kind": "StorageClass",
"data": {
"endpoint": self.out_map['RGW_ENDPOINT'],
"poolPrefix": self.out_map['RGW_POOL_PREFIX']
}
})
return json.dumps(json_out)+LINESEP
def main(self):
generated_output = ''
if self._arg_parser.format == 'json':
generated_output = self.gen_json_out()
elif self._arg_parser.format == 'bash':
generated_output = self.gen_shell_out()
else:
raise ExecutionFailureException("Unsupported format: {}".format(
self._arg_parser.format))
print('{}'.format(generated_output))
if self.output_file and generated_output:
fOut = open(self.output_file, 'w')
fOut.write(generated_output)
fOut.close()
################################################
##################### MAIN #####################
################################################
if __name__ == '__main__':
rjObj = RadosJSON()
try:
rjObj.main()
except ExecutionFailureException as err:
print("Excecution Failed: {}".format(err))
except KeyError as kErr:
print("KeyError: %s", kErr)
except OSError as osErr:
print("Error while trying to output the data: {}".format(osErr))
finally:
rjObj.shutdown()
################################################
##################### TEST #####################
################################################
# this is mainly for testing and could be used where 'rados' is not available
class DummyRados(object):
def __init__(self):
self.return_val = 0
self.err_message = ''
self.state = 'connected'
self.cmd_output_map = {}
self.cmd_names = {}
self._init_cmd_output_map()
def _init_cmd_output_map(self):
self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}'''
self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}'''
# all the commands and their output
self.cmd_output_map[self.cmd_names['fs ls']
] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-data0"]}]'''
self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs *=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=*"}}]'''
def shutdown(self):
pass
def get_fsid(self):
return 'af4e1673-0b72-402d-990a-22d2919d0f1c'
def conf_read_file(self):
pass
def connect(self):
pass
def mon_command(self, cmd, out):
json_cmd = json.loads(cmd)
json_cmd_str = json.dumps(json_cmd, sort_keys=True)
cmd_output = self.cmd_output_map[json_cmd_str]
return self.return_val, \
cmd_output, \
"{}".format(self.err_message).encode('utf-8')
@classmethod
def Rados(conffile=None):
return DummyRados()
# inorder to test the package,
# cd <script_directory>
# python -m unittest --verbose <script_name_without_dot_py>
class TestRadosJSON(unittest.TestCase):
def setUp(self):
print("{}".format("I am in setup"))
self.rjObj = RadosJSON(['--rbd-data-pool-name=abc',
'--rgw-endpoint=10.10.212.122:9000', '--format=json'])
# for testing, we are using 'DummyRados' object
self.rjObj.cluster = DummyRados.Rados()
def tearDown(self):
print("{}".format("I am tearing down the setup"))
self.rjObj.shutdown()
def test_method_main_output(self):
print("JSON Output")
self.rjObj._arg_parser.format = "json"
self.rjObj.main()
print("\n\nShell Output")
self.rjObj._arg_parser.format = "bash"
self.rjObj.main()
print("\n\nNon compatible output (--abcd)")
try:
self.rjObj._arg_parser.format = 'abcd'
self.rjObj.main()
self.fail("Function should have thrown an Exception")
except ExecutionFailureException as err:
print("Exception thrown successfully: {}".format(err))
def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("{}".format(csiKeyring))
def test_non_zero_return_and_error(self):
self.rjObj.cluster.return_val = 1
self.rjObj.cluster.err_message = "Dummy Error"
try:
self.rjObj.create_checkerKey()
self.fail("Failed to raise an exception, 'ExecutionFailureException'")
except ExecutionFailureException as err:
print("Successfully thrown error.\nError: {}".format(err))
def test_multi_filesystem_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
second_fs_details = dict(cmd_json_out[0])
second_fs_details['name'] += '-2'
cmd_json_out.append(second_fs_details)
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
# multiple filesystem present,
# but no specific '--cephfs-filesystem-name' argument provided
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# pass an existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name = second_fs_details['name']
self.rjObj.get_cephfs_data_pool_details()
except ExecutionFailureException as err:
self.fail("Should not have thrown error: {}".format(err))
# pass a non-existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name += "-non-existing-fs-name"
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty file-system array
try:
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps([])
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_multi_data_pool_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
first_fs_details = cmd_json_out[0]
new_data_pool_name = 'myfs-data1'
first_fs_details['data_pools'].append(new_data_pool_name)
print("Modified JSON Cmd Out: {}".format(cmd_json_out))
self.rjObj._arg_parser.cephfs_data_pool_name = new_data_pool_name
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
self.rjObj.get_cephfs_data_pool_details()
# use a non-existing data-pool-name
bad_data_pool_name = 'myfs-data3'
self.rjObj._arg_parser.cephfs_data_pool_name = bad_data_pool_name
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty data-pool scenario
first_fs_details['data_pools'] = []
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_valid_rgw_endpoint(self):
self.rjObj._invalid_endpoint("10.10.212.133:8000")
# invalid port
try:
self.rjObj._invalid_endpoint("10.10.212.133:238000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# out of range IP
try:
self.rjObj._invalid_endpoint("10.1033.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# mal formatted IP
try:
self.rjObj._invalid_endpoint("10.103..212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.103.212.133::8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.10.103.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
| 48.94197 | 861 | 0.578338 |
6d5d5283ebc438129007df927840aa7692cb2d41 | 1,649 | py | Python | torchmodels/modules/feedforward.py | kaniblu/pytorch-models | a50ef66ecc3de498e0856489d801c61417188f7b | [
"MIT"
] | 11 | 2018-09-17T17:50:46.000Z | 2021-05-06T13:04:05.000Z | torchmodels/modules/feedforward.py | kaniblu/pytorch-models | a50ef66ecc3de498e0856489d801c61417188f7b | [
"MIT"
] | 1 | 2019-01-17T15:08:39.000Z | 2019-01-17T15:08:39.000Z | torchmodels/modules/feedforward.py | kaniblu/pytorch-models | a50ef66ecc3de498e0856489d801c61417188f7b | [
"MIT"
] | 1 | 2018-11-15T04:43:35.000Z | 2018-11-15T04:43:35.000Z | import torch.nn as nn
from .. import common
from . import activation
class AbstractFeedForward(common.Module):
def __init__(self, input_dim, output_dim):
super(AbstractFeedForward, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
def forward(self, h):
raise NotImplementedError()
class MultiLayerFeedForward(AbstractFeedForward):
name = "multilayer"
def __init__(self, *args,
num_layers=1,
hidden_dim=300,
activation=activation.ReluActivation,
dropout=0.0,
batch_norm=False, **kwargs):
super(MultiLayerFeedForward, self).__init__(*args, **kwargs)
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.activation_cls = activation
self.dropout_prob = dropout
self.should_dropout = dropout > 0.0
self.should_batchnorm = batch_norm
layers = []
for i in range(self.num_layers):
if i == 0:
layers.append(nn.Linear(self.input_dim, self.hidden_dim))
else:
layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))
if self.should_batchnorm:
layers.append(nn.BatchNorm1d(self.hidden_dim))
layers.append(self.activation_cls())
if dropout > 0:
layers.append(nn.Dropout(dropout))
layers.append(nn.Linear(self.hidden_dim, self.output_dim))
self.sequential = nn.Sequential(*layers)
self.reset_parameters()
def forward(self, h):
return self.sequential(h)
| 31.113208 | 74 | 0.614312 |
4ae7d1a42e11e27dc1d72e33af824487f4f1714e | 12,635 | py | Python | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/basic_timezone_list_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/basic_timezone_list_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/basic_timezone_list_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Quotes API For Digital Portals
The quotes API combines endpoints for retrieving security end-of-day, delayed, and realtime prices with performance key figures and basic reference data on the security and market level. The API supports over 20 different price types for each quote and comes with basic search endpoints based on security identifiers and instrument names. Market coverage is included in the *Sample Use Cases* section below. The Digital Portal use case is focused on high-performance applications that are * serving millions of end-users, * accessible by client browsers via the internet, * supporting subscriptions for streamed updates out-of-the-box, * typically combining a wide variety of *for Digital Portals*-APIs into a highly use-case specific solution for customers, * integrated into complex infrastructures such as existing frontend frameworks, authentication services. All APIs labelled *for Digital Portals* have been designed for direct use by client web applications and feature extreme low latency: The average response time across all endpoints is 30 ms whereas 99% of all requests are answered in close to under 300ms. See the Time Series API for Digital Portals for direct access to price histories, and the News API for Digital Portals for searching and fetching related news. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.basic_timezone_list_data_filter import BasicTimezoneListDataFilter
globals()['BasicTimezoneListDataFilter'] = BasicTimezoneListDataFilter
class BasicTimezoneListData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'filter': (BasicTimezoneListDataFilter,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'filter': 'filter', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BasicTimezoneListData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (BasicTimezoneListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BasicTimezoneListData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (BasicTimezoneListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 48.225191 | 1,302 | 0.601741 |
6e19acacd2d552018fbe715f2c796590b811e0ab | 1,271 | py | Python | setup.py | mnemchinov/django-logging-eventlog | 59e93d4406d911cfd6d586ae6b9979a583da22f3 | [
"MIT"
] | 1 | 2022-01-20T08:06:52.000Z | 2022-01-20T08:06:52.000Z | setup.py | mnemchinov/django-logging-eventlog | 59e93d4406d911cfd6d586ae6b9979a583da22f3 | [
"MIT"
] | null | null | null | setup.py | mnemchinov/django-logging-eventlog | 59e93d4406d911cfd6d586ae6b9979a583da22f3 | [
"MIT"
] | null | null | null | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
def local_scheme(version):
return ""
setup(
name='django-logging-eventlog',
version=__import__('eventlog').__version__,
setup_requires=['setuptools_scm'],
packages=['eventlog', 'eventlog.migrations'],
include_package_data=True,
license='MIT License',
description='Logger for the logging module that writes messages to the database',
long_description=README,
url='https://github.com/mnemchinov/django-logging-eventlog',
author='mnemchinov',
author_email='mnemchinov@mail.ru',
install_requires=['django>=3.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Framework :: Django',
'Environment :: Web Environment',
'Natural Language :: Russian',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development',
],
)
| 31 | 85 | 0.664831 |
369e57bc8298134474236ba8b816c21244f53695 | 2,275 | py | Python | tagging_tools/TagElfFile.py | draperlaboratory/hope-policy-engine | abfc813e2ea7814a05204f981374fcefbab1d959 | [
"MIT"
] | 1 | 2021-05-02T07:09:40.000Z | 2021-05-02T07:09:40.000Z | tagging_tools/TagElfFile.py | draperlaboratory/hope-policy-engine | abfc813e2ea7814a05204f981374fcefbab1d959 | [
"MIT"
] | 13 | 2019-07-01T14:49:16.000Z | 2020-08-04T15:24:05.000Z | tagging_tools/TagElfFile.py | draperlaboratory/hope-policy-engine | abfc813e2ea7814a05204f981374fcefbab1d959 | [
"MIT"
] | 3 | 2019-06-18T17:09:12.000Z | 2021-05-02T07:09:42.000Z | #!/usr/bin/env python3
import os
import sys
import subprocess
import tempfile
def generate_tag_array(elfname, range_file, policy_meta_info, rv64):
tag_array_file = tempfile.NamedTemporaryFile(mode='w+b', delete=False, prefix='tag_array_')
tag_array_filename = tag_array_file.name
length = policy_meta_info.get('MaxBit')
if rv64:
bytes_per_address = 8 # 64/8
bfd_target = "elf64-littleriscv"
else:
bytes_per_address = 4 # 32/8
bfd_target = "elf32-littleriscv"
tool_prefix = "riscv64-unknown-elf-"
tag_array_bytes = [0]*bytes_per_address*(length+1)
tag_array_file.write(int.to_bytes(length, byteorder='little', length=bytes_per_address))
tag_array_file.write(bytearray(tag_array_bytes))
tag_array_file.close()
pout = subprocess.check_output([tool_prefix + 'objdump', '-h', elfname])
if ".tag_array" in str(pout): # section exists, update the elf
base_command = tool_prefix + "objcopy --target=" + bfd_target + " --update-section .tag_array=" + tag_array_filename + " " + elfname + " " + elfname
else:
base_command = tool_prefix + "objcopy --target=" + bfd_target + " --add-section .tag_array=" + tag_array_filename + " --set-section-flags .tag_array=readonly,data " + elfname + " " + elfname
presult = subprocess.call(base_command.split(' '))
os.remove(tag_array_filename)
if presult != 0:
return presult
start_addr = ""
pout = subprocess.check_output([tool_prefix + 'objdump', '--target', bfd_target ,'-h', elfname])
for line in str(pout).split('\\n'):
if '.tag_array' in line:
start_addr = (line.split()[3])
start_addr = int(start_addr, 16)
if start_addr:
# metadata ids are 0-indexed, so we offset by 1 to allow .tag_array[0] to be the size.
# iterate through addresses in .tag_array, tagging .tag_array[i+1] with the metadata with id i.
for m in policy_meta_info.get('Metadata'):
mid = int(m.get('id'))
range_file.write_range(start_addr + (mid*bytes_per_address) + bytes_per_address,
start_addr + (mid*bytes_per_address) + (2*bytes_per_address),
m.get('name'))
return presult
| 37.916667 | 198 | 0.651868 |
d68ccc40d7708621f78ee425dd8f36dad1145387 | 2,565 | py | Python | fhi_lib/img_coordinate.py | yhsueh/FHI_RCNN | f12df17049d5c72d1a7cec89e3943013150177a5 | [
"MIT"
] | null | null | null | fhi_lib/img_coordinate.py | yhsueh/FHI_RCNN | f12df17049d5c72d1a7cec89e3943013150177a5 | [
"MIT"
] | null | null | null | fhi_lib/img_coordinate.py | yhsueh/FHI_RCNN | f12df17049d5c72d1a7cec89e3943013150177a5 | [
"MIT"
] | null | null | null | import sys
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
from fhi_lib.geometry import Point
class ImgCoord():
def __init__(self, info):
self.mask = info[0].astype(np.uint8)
self.roi = info[1]
self.class_id= info[2]
def draw_point_of_interest(self, img):
img = cv2.circle(img, (self.x_interest, self.y_interest), 0, (0, 0, 255), 5)
img = cv2.circle(img, (self.x_interest, self.y_interest), 15, (100,255,100), 3)
return img
def get_point_of_interest(self):
raise NotImplementedError()
def update_interest_pt(self, pt):
self.x_interest = pt[0]
self.y_interest = pt[1]
class Type1_2Coord(ImgCoord):
def __init__(self, info):
super().__init__(info)
def get_point_of_interest(self):
epsilon = 2
contours, _ = cv2.findContours(image=self.mask,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)
contour = contours[0][:,0,:]
pts_x = contour[:,0]
pts_y = contour[:,1]
pts_ux = np.mean(pts_x)
### Select the points near x mean ###
selected_pts_mask = (pts_x < pts_ux + epsilon) & (pts_x > pts_ux - epsilon)
selected_pts_x = pts_x[selected_pts_mask]
selected_pts_y = pts_y[selected_pts_mask]
selected_pts_uy = np.mean(selected_pts_y)
### Find min y that is greater than y_mean ###
conditioned_min_y = 99999
for i, y in enumerate(selected_pts_y):
if y < conditioned_min_y and y > selected_pts_uy:
conditioned_min_y = y
### Take the average of x coordinates of the points with same y coordinates ###
selected_pts_y_mask = selected_pts_y == conditioned_min_y
interested_pts_x = selected_pts_x[selected_pts_y_mask]
self.x_interest = int(np.mean(interested_pts_x))
self.y_interest = conditioned_min_y
return Point((self.x_interest,self.y_interest))
class Type3_4Coord(ImgCoord):
def __init__(self, info):
super().__init__(info)
def get_point_of_interest(self):
approx_y_selection_range = 20
contours, _ = cv2.findContours(image=self.mask,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)
approx = cv2.approxPolyDP(contours[0], 20, True)
approx = approx[:,0,:]
approx_y = approx[:,1]
approx_y_max = np.max(approx_y)
selected_pt_mask_max = approx_y > (approx_y_max-approx_y_selection_range)
approx_max_pts = approx[selected_pt_mask_max]
approx_left_corner = approx_max_pts[0]
for pt in approx_max_pts:
if pt[0] < approx_left_corner[0]:
approx_left_corner = pt
self.x_interest = approx_left_corner[0]
self.y_interest = approx_left_corner[1]
return Point(approx_left_corner) | 30.903614 | 81 | 0.726316 |
0086a4b3aabbda0d8aa33d9ad043945508dd1c39 | 1,776 | py | Python | programFiles/select.py | Burria/CubeX-DB | e92cf165a326674d72bd24d1d04cff893a3a1a8e | [
"MIT"
] | 1 | 2019-04-16T15:45:14.000Z | 2019-04-16T15:45:14.000Z | programFiles/select.py | Burria/CubeX-DB | e92cf165a326674d72bd24d1d04cff893a3a1a8e | [
"MIT"
] | null | null | null | programFiles/select.py | Burria/CubeX-DB | e92cf165a326674d72bd24d1d04cff893a3a1a8e | [
"MIT"
] | null | null | null | import re
def toSelect(q,l):
#for ad in d:
#print(ad)
idFound = None
#lets check where we should compare
try:
if q[8]=='at':
directoryToCompare=l+'/'+q[9]+'/'+q[2]+'.cxdb'
except:
directoryToCompare=l+'/'+q[2]+'.cxdb'
with open(directoryToCompare,'r')as db:
for line in db:
#find id if
#clean this, it's embarasing
line=line.strip()
line=line[1:]
line=line[:-1]
idSelect=line.split('#')
lineS=line.replace(idSelect[0]+'#', '')
#all operators, probably there is a cleaner way to do this
#but I don't know how to do it
if q[7]=='=':
if lineS==q[3]:
#and now we have the id
idFound=(idSelect[0])
break
if q[7]=='>':
if lineS>q[3]:
idFound=(idSelect[0])
break
if q[7]=='<':
if lineS<q[3]:
idFound=(idSelect[0])
break
if q[7]=='<=':
if lineS<=q[3]:
idFound=(idSelect[0])
break
if q[7]=='>=':
if lineS>=q[3]:
idFound=(idSelect[0])
break
if q[7]=='!=':
if lineS!=q[3]:
idFound=(idSelect[0])
break
#now we have the id probably, lets check
if idFound== None:
print("Sorry couldn't be found")
return
#lets split destiny columns into a new list
destinyCol=q[1].split(',')
#2d array
resultOfQuery=[]
for col in destinyCol:
if q[5]=='dimension0':
directiryDestiny=l+'/'+q[1]+'.cxdb'
else:
directiryDestiny=l+'/'+q[5]+'/'+col+'.cxdb'
with open(directiryDestiny,'r')as db:
for line in db:
line=line.strip()
line=line[1:]
idSelect=line.split('#')
if idSelect[0]==idFound:
resultOfQuery.append(idSelect[1])
print (resultOfQuery)
| 16.145455 | 61 | 0.53491 |
320a4b9b5f23fc79da2b93e7d04d5898ddf85faa | 64,383 | py | Python | python/src/Lib/io.py | vlinhd11/vlinhd11-android-scripting | c90f04eb26a3746f025a6a0beab92bb6aa88c084 | [
"Apache-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | python/src/Lib/io.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 315 | 2015-05-31T11:55:46.000Z | 2022-01-12T08:36:37.000Z | python/src/Lib/io.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z | """
The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
seperation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# This is a prototype; hopefully eventually some of this will be
# reimplemented in C.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
from __future__ import print_function
from __future__ import unicode_literals
__author__ = ("Guido van Rossum <guido@python.org>, "
"Mike Verdone <mike.verdone@gmail.com>, "
"Mark Russell <mark.russell@zen.co.uk>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper"]
import os
import abc
import codecs
import _fileio
import threading
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# py3k has only new style classes
__metaclass__ = type
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
IOError.__init__(self, errno, strerror)
self.characters_written = characters_written
def open(file, mode="r", buffering=None, encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. If the file cannot be opened, an IOError is
raised.
file is either a string giving the name (and the path if the file
isn't in the current working directory) of the file to be opened or an
integer file descriptor of the file to be wrapped. (If a file
descriptor is given, it is closed when the returned I/O object is
closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy. By
default full buffering is on. Pass 0 to switch buffering off (only
allowed in binary mode), 1 to set line buffering, and an integer > 1
for full buffering.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (basestring, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, basestring):
raise TypeError("invalid mode: %r" % mode)
if buffering is not None and not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, basestring):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, basestring):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
if buffering is None:
buffering = -1
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class _DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=None, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dumbdbm does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = _DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(object):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise a IOError when operations they do not support are called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statment is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
__metaclass__ = abc.ABCMeta
### Internal ###
def _unsupported(self, name):
"""Internal: raise an exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence = 0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return current stream position."""
return self.seek(0, 1)
def truncate(self, pos = None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
except IOError:
pass # If flush() fails, just give up
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise an IOError if file is not seekable
"""
if not self.seekable():
raise IOError("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise an IOError if file is not readable
"""
if not self.readable():
raise IOError("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise an IOError if file is not writable
"""
if not self.writable():
raise IOError("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self):
"""Context management protocol. Returns self."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit = -1):
r"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
self._checkClosed()
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
if not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None:
hint = -1
if not isinstance(hint, (int, long)):
raise TypeError("hint must be an integer")
if hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n = -1):
"""Read and return up to n bytes.
Returns an empty bytes array on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
return bytes(res)
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block as has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
class FileIO(_fileio._FileIO, RawIOBase):
"""Raw I/O implementation for OS files."""
# This multiply inherits from _FileIO and RawIOBase to make
# isinstance(io.FileIO(), io.RawIOBase) return True without requiring
# that _fileio._FileIO inherits from io.RawIOBase (which would be hard
# to do since _fileio.c is written in C).
def __init__(self, name, mode="r", closefd=True):
_fileio._FileIO.__init__(self, name, mode, closefd)
self._name = name
def close(self):
_fileio._FileIO.close(self)
RawIOBase.close(self)
@property
def name(self):
return self._name
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n = None):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def readinto(self, b):
"""Read up to len(b) bytes into b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
def write(self, b):
"""Write the given buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self.raw = raw
### Positioning ###
def seek(self, pos, whence=0):
return self.raw.seek(pos, whence)
def tell(self):
return self.raw.tell()
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
self.raw.flush()
def close(self):
if not self.closed:
try:
self.flush()
except IOError:
pass # If flush() fails, just give up
self.raw.close()
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class _BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
# XXX More docs
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += bytearray(initial_bytes)
self._buffer = buf
self._pos = 0
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("argument must be an integer")
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""this is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos = pos.__index__()
except AttributeError as err:
raise TypeError("an integer is required") # from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
elif pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return self.seek(pos)
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
# Use the faster implementation of BytesIO if available
try:
import _bytesio
class BytesIO(_bytesio._BytesIO, BufferedIOBase):
__doc__ = _bytesio._BytesIO.__doc__
except ImportError:
BytesIO = _BytesIO
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
raw._checkReadable()
_BufferedIOMixin.__init__(self, raw)
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = threading.Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n <= 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return self.raw.tell() - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = self.raw.seek(pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEAFULT_BUFFER_SIZE. If max_buffer_size is omitted, it defaults to
twice the buffer size.
"""
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkWritable()
_BufferedIOMixin.__init__(self, raw)
self.buffer_size = buffer_size
self.max_buffer_size = (2*buffer_size
if max_buffer_size is None
else max_buffer_size)
self._write_buf = bytearray()
self._write_lock = threading.Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer
try:
self._flush_unlocked()
except BlockingIOError as e:
# We can't accept anything else.
# XXX Why not just let the exception pass through?
raise BlockingIOError(e.errno, e.strerror, 0)
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.max_buffer_size:
# We've hit max_buffer_size. We have to accept a
# partial write and cut back our buffer.
overage = len(self._write_buf) - self.max_buffer_size
self._write_buf = self._write_buf[:self.max_buffer_size]
raise BlockingIOError(e.errno, e.strerror, overage)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
written = 0
try:
while self._write_buf:
n = self.raw.write(self._write_buf)
del self._write_buf[:n]
written += n
except BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
written += n
raise BlockingIOError(e.errno, e.strerror, written)
def tell(self):
return self.raw.tell() + len(self._write_buf)
def seek(self, pos, whence=0):
with self._write_lock:
self._flush_unlocked()
return self.raw.seek(pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE. The max_buffer_size (for the buffered writer)
defaults to twice the buffer size.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
reader._checkReadable()
writer._checkWritable()
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size, max_buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE. The max_buffer_size (for the buffered
writer) defaults to twice the buffer size.
"""
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
self.flush()
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
return pos
def tell(self):
if self._write_buf:
return self.raw.tell() + len(self._write_buf)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
self.seek(pos)
return BufferedWriter.truncate(self)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n = -1):
"""Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream."""
self._unsupported("write")
def truncate(self, pos = None):
"""Truncate size to pos."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
"""Codec used when reading a file in universal newlines mode.
It wraps another incremental decoder, translating \\r\\n and \\r into \\n.
It also records the types of newlines encountered.
When used with translate=False, it ensures that the newline sequence is
returned in one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line seperator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 128
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False):
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, basestring):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, basestring):
raise ValueError("invalid errors: %r" % errors)
self.buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
def seekable(self):
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
try:
self.flush()
except:
pass # If flush() fails, just give up
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, unicode):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
The return value is True unless EOF was reached. The decoded string
is placed in self._decoded_chars (replacing its previous value).
The entire input chunk is sent to the decoder, though some of it
may remain buffered in the decoder, yet to be converted.
"""
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise IOError("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
for next_byte in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
self.seek(pos)
return self.buffer.truncate()
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise IOError("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise IOError("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise IOError("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
return cookie
def read(self, n=None):
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def next(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
if not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
decoder = self._decoder or self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data
more_line = ''
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""An in-memory stream for text. The initial_value argument sets the
value of object. The other arguments are like those of TextIOWrapper's
constructor.
"""
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
if initial_value:
if not isinstance(initial_value, unicode):
initial_value = unicode(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
| 34.540236 | 82 | 0.599382 |
d1c49675703a6dbc1e9640b6eb2c7e4529d7c3da | 2,016 | py | Python | google-cloud-sdk/lib/surface/emulators/datastore/__init__.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/lib/surface/emulators/datastore/__init__.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/lib/surface/emulators/datastore/__init__.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud datastore emulator group."""
from googlecloudsdk.api_lib.emulators import datastore_util
from googlecloudsdk.api_lib.emulators import util
from googlecloudsdk.calliope import base
class Datastore(base.Group):
"""Manage your local datastore emulator.
This set of commands allows you to start and use a local datastore emulator.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To start a local datastore emulator, run:
$ {command} start
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'--data-dir',
required=False,
help='The directory to be used to store/retrieve data/config for an'
' emulator run.')
parser.add_argument(
'--legacy',
default=False,
action='store_true',
help='Set to use the legacy emulator which supports Cloud Datastore'
' API v1beta2.')
def Filter(self, context, args):
util.CheckIfJava7IsInstalled(datastore_util.DATASTORE_TITLE)
if args.legacy:
util.EnsureComponentIsInstalled('gcd-emulator',
datastore_util.DATASTORE_TITLE)
else:
util.EnsureComponentIsInstalled('cloud-datastore-emulator',
datastore_util.DATASTORE_TITLE)
if not args.data_dir:
args.data_dir = datastore_util.GetDataDir()
| 33.6 | 78 | 0.68006 |
82a7777578af63f8886646f48676f5b3ae9ac722 | 1,470 | py | Python | bynge/blueprints/storage.py | gutmensch/bynge | 738b518e2615e0f2d4e668f86c0c7cdebaf8d084 | [
"BSD-2-Clause"
] | null | null | null | bynge/blueprints/storage.py | gutmensch/bynge | 738b518e2615e0f2d4e668f86c0c7cdebaf8d084 | [
"BSD-2-Clause"
] | null | null | null | bynge/blueprints/storage.py | gutmensch/bynge | 738b518e2615e0f2d4e668f86c0c7cdebaf8d084 | [
"BSD-2-Clause"
] | null | null | null | import os
from datetime import datetime
from uuid import uuid4
import gevent
from flask import Blueprint
from flask import request, render_template, copy_current_request_context
from werkzeug.utils import secure_filename
from bynge.lib.processor.audio import AudioFileProcessor
from bynge.models import IncomingFile
from bynge import app
blueprint = Blueprint(__name__, __name__)
uuid = str(uuid4())
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def incoming_filename(filename):
file = "%s_%s" % (uuid, secure_filename(filename))
return os.path.join(app.config['UPLOAD_DIRECTORY'], file)
@blueprint.route('', methods=['PUT'])
def store_file():
os.makedirs(app.config['UPLOAD_DIRECTORY'], exist_ok=True)
file = request.files['file']
if file and allowed_file(file.filename):
file_dest_path = incoming_filename(file.filename)
file.save(file_dest_path)
incoming_file = IncomingFile(processed='false', file_path=file_dest_path, uuid=uuid, store_date=datetime.now())
incoming_file.save()
# processing file in a background thread
@copy_current_request_context
def process_file():
uploaded_file = AudioFileProcessor(uuid)
uploaded_file.process()
gevent.spawn(process_file)
return render_template('storage.html'), 200
return render_template('error.html'), 406
| 31.956522 | 119 | 0.72449 |
e2aa7cd7309e3241b8487a78b7b45a186cd351fa | 688 | py | Python | examples/wmt/tools/scorer/refbleu.py | godweiyang/ParaGen | 9665d1244ea38a41fc06b4e0a7f6411985e2221f | [
"Apache-2.0"
] | 50 | 2022-01-18T07:25:46.000Z | 2022-03-14T13:06:18.000Z | examples/wmt/tools/scorer/refbleu.py | JiangtaoFeng/ParaGen | 509334bf16e3674e009bb9dc37ecc38ae3b5c977 | [
"Apache-2.0"
] | 2 | 2022-01-19T09:36:42.000Z | 2022-02-23T07:16:02.000Z | examples/wmt/tools/scorer/refbleu.py | JiangtaoFeng/ParaGen | 509334bf16e3674e009bb9dc37ecc38ae3b5c977 | [
"Apache-2.0"
] | 6 | 2022-01-19T09:28:53.000Z | 2022-03-10T10:20:08.000Z | import argparse
import sacrebleu
parser = argparse.ArgumentParser()
parser.add_argument('--hypo_filename', metavar='N', type=str, help='hypo_filename')
parser.add_argument('--ref_filename', metavar='N', type=str, help='ref_filename')
parser.add_argument('--out_filename', metavar='N', type=str, help='out_filename')
args, unknown = parser.parse_known_args()
with open(args.hypo_filename, 'r') as fhypo, open(args.ref_filename, 'r') as fref, open(args.out_filename, 'w') as fout:
max_bleu = 0
for hypo, ref in zip(fhypo, fref):
sent_bleu = sacrebleu.sentence_bleu(hypo, [ref]).score / 100
fout.write(f'{sent_bleu}\n')
max_bleu = max(max_bleu, sent_bleu)
| 40.470588 | 120 | 0.710756 |
491ca1ad3b516f65b7757bbd05b1fe7af241f594 | 3,389 | py | Python | vsts/vsts/work_item_tracking_process_definitions/v4_1/models/control.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/work_item_tracking_process_definitions/v4_1/models/control.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/work_item_tracking_process_definitions/v4_1/models/control.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Control(Model):
"""Control.
:param contribution: Contribution for the control.
:type contribution: :class:`WitContribution <work-item-tracking.v4_1.models.WitContribution>`
:param control_type: Type of the control.
:type control_type: str
:param height: Height of the control, for html controls.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution or not.
:type is_contribution: bool
:param label: Label for the field
:type label: str
:param metadata: Inner text of the control.
:type metadata: str
:param order:
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param read_only: A value indicating if the control is readonly.
:type read_only: bool
:param visible: A value indicating if the control should be hidden or not.
:type visible: bool
:param watermark: Watermark text for the textbox.
:type watermark: str
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'control_type': {'key': 'controlType', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'},
'watermark': {'key': 'watermark', 'type': 'str'}
}
def __init__(self, contribution=None, control_type=None, height=None, id=None, inherited=None, is_contribution=None, label=None, metadata=None, order=None, overridden=None, read_only=None, visible=None, watermark=None):
super(Control, self).__init__()
self.contribution = contribution
self.control_type = control_type
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.metadata = metadata
self.order = order
self.overridden = overridden
self.read_only = read_only
self.visible = visible
self.watermark = watermark
| 45.797297 | 223 | 0.597816 |
2a918616003d63f2a435d32515a8d116dfe5e2d2 | 67 | py | Python | wsgi.py | pbhuss/meowbot | 7d9c0ea5e9bab2836df6262bfc85467e2a71dcd4 | [
"MIT"
] | 9 | 2018-09-25T04:48:51.000Z | 2022-03-14T04:01:43.000Z | wsgi.py | pbhuss/meowbot | 7d9c0ea5e9bab2836df6262bfc85467e2a71dcd4 | [
"MIT"
] | 2 | 2019-05-27T03:45:02.000Z | 2019-05-27T19:27:19.000Z | wsgi.py | pbhuss/meowbot | 7d9c0ea5e9bab2836df6262bfc85467e2a71dcd4 | [
"MIT"
] | 3 | 2019-01-07T08:51:17.000Z | 2020-08-06T15:02:32.000Z | from meowbot import app
if __name__ == "__main__":
app.run()
| 11.166667 | 26 | 0.656716 |
eaa35bc30cf79cec62759e2e065e52932b32a97b | 1,097 | py | Python | tests/test_sigmoid.py | wakamezake/deep-learning-from-scratch-3 | 92614028be0bcd0f0b2b6ada419a20110bae7ea7 | [
"MIT"
] | null | null | null | tests/test_sigmoid.py | wakamezake/deep-learning-from-scratch-3 | 92614028be0bcd0f0b2b6ada419a20110bae7ea7 | [
"MIT"
] | null | null | null | tests/test_sigmoid.py | wakamezake/deep-learning-from-scratch-3 | 92614028be0bcd0f0b2b6ada419a20110bae7ea7 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from dezero import Variable
import dezero.functions as F
from dezero.utils import check_backward
import chainer.functions as CF
class TestSigmoid(unittest.TestCase):
def test_forward1(self):
x = np.array([[0, 1, 2], [0, 2, 4]], np.float32)
y2 = CF.sigmoid(x)
y = F.sigmoid(Variable(x))
res = np.allclose(y.data, y2.data)
self.assertTrue(res)
def test_forward2(self):
x = np.random.randn(10, 10).astype(np.float32)
y2 = CF.sigmoid(x)
y = F.sigmoid(Variable(x))
res = np.allclose(y.data, y2.data)
self.assertTrue(res)
def test_backward1(self):
x_data = np.array([[0, 1, 2], [0, 2, 4]])
self.assertTrue(check_backward(F.sigmoid, x_data))
def test_backward2(self):
np.random.seed(0)
x_data = np.random.rand(10, 10)
self.assertTrue(check_backward(F.sigmoid, x_data))
def test_backward3(self):
np.random.seed(0)
x_data = np.random.rand(10, 10, 10)
self.assertTrue(check_backward(F.sigmoid, x_data)) | 30.472222 | 58 | 0.627165 |
49757e218ee248d9b143f9cc4f715eef4784e8bb | 2,616 | py | Python | SVM/Combine.py | Globe-Eater/Geographic-Duplicate-Detection- | ec467fc41cb456959da87fd913465dc9daa27d80 | [
"MIT"
] | null | null | null | SVM/Combine.py | Globe-Eater/Geographic-Duplicate-Detection- | ec467fc41cb456959da87fd913465dc9daa27d80 | [
"MIT"
] | null | null | null | SVM/Combine.py | Globe-Eater/Geographic-Duplicate-Detection- | ec467fc41cb456959da87fd913465dc9daa27d80 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 15 11:34:39 2019
@author: kellenbullock
Combine all dataframes because I could do in a different file...
"""
import pandas as pd
prop = pd.read_csv('PROPNAME.csv')
prop = prop.drop(columns=['Unnamed: 0'])
res = pd.read_csv('RESNAME.csv')
res = res.drop(columns=['Unnamed: 0'])
add = pd.read_csv('ADDRESS.CSV')
add = add.drop(columns=['Unnamed: 0'])
df = pd.read_excel('Tulsa.xls')
def match_key(dataframe):
dataframe['left_key'] = ""
dataframe['right_key'] = ""
def get_key(col_item):
for i in col_item:
answer = df.get_index()
return answer
dataframe['left_key'] = dataframe['left_key'].apply(match_key)
dataframe['right_key'] = dataframe['right_key'].apply(match_key)
match_key(prop)
'''
--------------------------------
First Idea, wasn't working but wanna keep it here
df = pd.read_csv('data.csv')
files = [prop, res, add]
columns = ['PROPNAME', 'RESNAME', 'ADDRESS']
intermediate = df.merge(prop, right_on='left_side', left_on='PROPNAME', left_index=True)
intermediate = intermediate.set_index(['Unnamed: 0'])
#intermediate = intermediate.rename(columns={'Unnamed: 0': 'Index'})
intermediate_2 = intermediate.merge(res, right_on='left_side', left_on='RESNAME', left_index=True)
#intermediate_2 = intermediate_2.set_index(['Unnamed: 0'])
final = intermediate_2.merge(add, right_on='left_side', left_on='RESNAME', left_index=True)
#$final = final.drop_duplicates(subset=final['Index'],keep='first')
final = final.loc[~final.index.duplicated(keep='first')]
final.head()
-----------------------------------
Second idea. Really wasn't working either.
for i in files:
for x in columns:
final = df.merge(i, right_on='left_side', left_on=x, left_index=True)
final.to_csv('final.csv')
#final = final.drop(columns=['left_side','right_side','PROPNAME', 'RESNAME','ADDRESS','Lat','Long'])
new = pd.read_csv('final.csv')
'''
'''
#################################
# Third Idea: take the similarity columns out and join the orginial
prop_sim = prop[['similairity']]
prop_sim = prop_sim.rename(columns={'similairity': 'prop_sim'})
res_sim = res[['similairity']]
res_sim = res_sim.rename(columns={'similairity': 'res_sim'})
'''
'''
------------------------------
Okay my idea now is to do a join between the dataframes and just join the records
share and index togther! With that said I will only be looking at records that have
duplicates in two fields. Rather than some duplicates that have only one field or
another.
'''
| 29.393258 | 108 | 0.654434 |
0e4152dae3f0505fc7b4c7873960fc4d1707d1e3 | 651 | py | Python | threespot/documentation/views.py | macfound/django-threespot | 8bc6253bf0f73b8f9957ab51df8e3dcf4081bdd0 | [
"Apache-2.0"
] | 6 | 2015-10-29T17:07:07.000Z | 2021-04-25T21:14:40.000Z | threespot/documentation/views.py | macfound/django-threespot | 8bc6253bf0f73b8f9957ab51df8e3dcf4081bdd0 | [
"Apache-2.0"
] | 1 | 2018-11-05T20:47:45.000Z | 2018-11-05T20:47:45.000Z | threespot/documentation/views.py | macfound/django-threespot | 8bc6253bf0f73b8f9957ab51df8e3dcf4081bdd0 | [
"Apache-2.0"
] | 2 | 2015-01-13T04:59:00.000Z | 2015-10-28T14:34:26.000Z | from django.contrib.admin.views.decorators import staff_member_required
from django.views.static import serve
from threespot.documentation.app_settings import PUBLISH_PATH
@staff_member_required
def documentation(request, path, *args, **kwargs):
"""
Uses the ugly, but good-enough django static files server. Ensures the
server serves from the ``DOCUMENTATION_PUBLISH_PATH`` setting, and
that only staff members can see it.
"""
kwargs['document_root'] = PUBLISH_PATH
kwargs['show_indexes'] = False
if not path or path.endswith("/"):
path += "index.html"
return serve(request, path, *args, **kwargs) | 34.263158 | 74 | 0.725038 |
f097bf9d5100c3ab04b7d329ef6dd4b42cfc25ea | 2,894 | py | Python | nameko/timer.py | vlcinsky/nameko | 88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d | [
"Apache-2.0"
] | 3,425 | 2016-11-10T17:12:42.000Z | 2022-03-31T19:07:49.000Z | nameko/timer.py | vlcinsky/nameko | 88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d | [
"Apache-2.0"
] | 311 | 2016-11-10T20:58:16.000Z | 2022-03-26T09:03:22.000Z | nameko/timer.py | vlcinsky/nameko | 88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d | [
"Apache-2.0"
] | 420 | 2016-11-17T05:46:42.000Z | 2022-03-23T12:36:06.000Z | from __future__ import absolute_import
import itertools
import time
from logging import getLogger
from eventlet import Timeout
from eventlet.event import Event
from nameko.extensions import Entrypoint
_log = getLogger(__name__)
class Timer(Entrypoint):
def __init__(self, interval, eager=False, **kwargs):
"""
Timer entrypoint. Fires every `interval` seconds or as soon as
the previous worker completes if that took longer.
The default behaviour is to wait `interval` seconds
before firing for the first time. If you want the entrypoint
to fire as soon as the service starts, pass `eager=True`.
Example::
timer = Timer.decorator
class Service(object):
name = "service"
@timer(interval=5)
def tick(self):
pass
"""
self.interval = interval
self.eager = eager
self.should_stop = Event()
self.worker_complete = Event()
self.gt = None
super(Timer, self).__init__(**kwargs)
def start(self):
_log.debug('starting %s', self)
self.gt = self.container.spawn_managed_thread(self._run)
def stop(self):
_log.debug('stopping %s', self)
self.should_stop.send(True)
self.gt.wait()
def kill(self):
_log.debug('killing %s', self)
self.gt.kill()
def _run(self):
""" Runs the interval loop. """
def get_next_interval():
start_time = time.time()
start = 0 if self.eager else 1
for count in itertools.count(start=start):
yield max(start_time + count * self.interval - time.time(), 0)
interval = get_next_interval()
sleep_time = next(interval)
while True:
# sleep for `sleep_time`, unless `should_stop` fires, in which
# case we leave the while loop and stop entirely
with Timeout(sleep_time, exception=False):
self.should_stop.wait()
break
self.handle_timer_tick()
self.worker_complete.wait()
self.worker_complete.reset()
sleep_time = next(interval)
def handle_timer_tick(self):
args = ()
kwargs = {}
# Note that we don't catch ContainerBeingKilled here. If that's raised,
# there is nothing for us to do anyway. The exception bubbles, and is
# caught by :meth:`Container._handle_thread_exited`, though the
# triggered `kill` is a no-op, since the container is already
# `_being_killed`.
self.container.spawn_worker(
self, args, kwargs, handle_result=self.handle_result)
def handle_result(self, worker_ctx, result, exc_info):
self.worker_complete.send()
return result, exc_info
timer = Timer.decorator
| 28.94 | 79 | 0.606773 |
1bba6971dc8ffc3c44c98081191bb8ef903e7b2b | 2,297 | py | Python | tests/test_ui.py | uetke/UUPharosController | 1663dcb5acab78fe65eab9eee7948d1257dec1f0 | [
"MIT"
] | null | null | null | tests/test_ui.py | uetke/UUPharosController | 1663dcb5acab78fe65eab9eee7948d1257dec1f0 | [
"MIT"
] | 1 | 2018-03-13T14:06:21.000Z | 2018-03-13T14:06:21.000Z | tests/test_ui.py | uetke/UUPharosController | 1663dcb5acab78fe65eab9eee7948d1257dec1f0 | [
"MIT"
] | null | null | null | import os
import sys
from PyQt4 import QtCore, QtGui, uic
from lantz import Q_
from PyQt4.Qt import QApplication
import pharos.view.GUI.QtCreator.resources_rc
from pharos.view.GUI.laser_widget import LaserWidget
from pharos.view.GUI.monitor_config_widget import MonitorConfigWidget
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent=parent)
p = os.path.dirname(__file__).split('/')
uic.loadUi(os.path.join('C:\\Users\\Aquiles\\Documents\\Programs\\PharosController\\', 'pharos\\view\\GUI\\QtCreator\main_window.ui'), self)
# Make connections
QtCore.QObject.connect(self.apply_laser, QtCore.SIGNAL('clicked()'), self.update_laser)
# QtCore.QObject.connect(self.laser_button, QtCore.SIGNAL('clicked()'), self.laser_widget.show)
# QtCore.QObject.connect(self.monitor_button, QtCore.SIGNAL('clicked()'), self.monitor_widget.show)
QtCore.QObject.connect(self.wavelength_slider, QtCore.SIGNAL('valueChanged(int)'), self.update_wavelength)
QtCore.QObject.connect(self.power_slider, QtCore.SIGNAL('valueChanged(int)'), self.update_power)
QtCore.QObject.connect(self.shutter, QtCore.SIGNAL('stateChanged(int)'), self.update_shutter)
def update_laser(self):
wavelength = Q_(self.wavelength.text())
power = Q_(self.power.text())
values = {
'wavelength': wavelength,
'powermW': power,
}
self.wavelength_slider.setValue((wavelength.m_as(Q_('nm')) - 1480) / 0.0001)
print(values)
def update_wavelength(self, value):
new_value = 1480+value*0.0001
new_value = new_value*Q_('nm')
self.wavelength.setText('{:~}'.format(new_value))
print(new_value)
def update_shutter(self, state):
state = bool(state)
#self.shutter_value = not self.shutter_value
print(state)
# self.shutter.setDown(self.shutter_value)
def update_power(self, value):
new_value = 0.01+value*0.01
new_value = new_value*Q_('mW')
self.power.setText('{:~}'.format(new_value))
print(new_value)
ap = QApplication(sys.argv)
m = MainWindow()
m.show()
ap.exit(ap.exec_()) | 41.763636 | 149 | 0.666957 |
bebfd767fe972f7e774fdccc696d9c527982209f | 7,242 | py | Python | dephell/actions/_transform.py | espdev/dephell | 17d5604e7b443b4d58bffc635a139adb49431efc | [
"MIT"
] | null | null | null | dephell/actions/_transform.py | espdev/dephell | 17d5604e7b443b4d58bffc635a139adb49431efc | [
"MIT"
] | null | null | null | dephell/actions/_transform.py | espdev/dephell | 17d5604e7b443b4d58bffc635a139adb49431efc | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
# external
from fissix.fixer_util import Dot, Name, syms
from fissix.pytree import Node
if TYPE_CHECKING:
from bowler import LN, Capture, Filename, Query
try:
from bowler.helpers import dotted_parts, power_parts, quoted_parts
except ImportError:
pass
modifiers = []
def _register(modifier):
modifiers.append(modifier)
return modifier
def transform_imports(query: 'Query', old_name: str, new_name: str) -> 'Query':
params = dict(
name=old_name,
dotted_name=' '.join(quoted_parts(old_name)),
power_name=' '.join(power_parts(old_name)),
)
for modifier_class in modifiers:
modifier = modifier_class(old_name=old_name, new_name=new_name)
selector = modifier.selector.format(**params)
query = query.select(selector).modify(modifier)
return query
@_register
class ModuleImportModifier:
"""import foo -> import bar as foo
"""
selector = """
import_name< 'import'
(
module_name='{name}' any*
|
dotted_as_names< (any ',')* module_name='{name}' (',' any)* >
)
>
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def __call__(self, node: 'LN', capture: 'Capture', filename: 'Filename') -> None:
old_node = capture['module_name']
new_node = Node(
type=syms.dotted_as_name,
children=[
build_new_name_node(
old_node=old_node,
new_name=self.new_name,
attach=False,
),
Name('as', prefix=' '),
old_node.clone(),
],
)
old_node.replace(new_node)
@_register
class FromImportModifier:
"""import foo -> import bar as foo
"""
selector = """
import_from< 'from'
(
module_name='{name}'
|
module_name=dotted_name< {dotted_name} any* >
)
'import' any*
>
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def __call__(self, node: 'LN', capture: 'Capture', filename: 'Filename') -> None:
new_name_node = build_new_name_node(
old_node=capture['module_name'],
new_name=self.new_name,
old_name=self.old_name,
attach=True,
)
capture['module_name'].replace(new_name_node)
@_register
class ModuleAsImportModifier:
"""import foo as bar -> import baz as bar
"""
selector = """
import_name< 'import'
(
dotted_as_name<
(
module_name='{name}'
|
module_name=dotted_name< {dotted_name} any* >
)
'as' module_nickname=any
>
|
dotted_as_names<
(any ',')*
dotted_as_name<
(
module_name='{name}'
|
module_name=dotted_name< {dotted_name} any* >
)
'as' module_nickname=any
>
(',' any)*
>
)
>
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def __call__(self, node: 'LN', capture: 'Capture', filename: 'Filename') -> None:
new_name_node = build_new_name_node(
old_node=capture['module_name'],
new_name=self.new_name,
old_name=self.old_name,
attach=True,
)
capture['module_name'].replace(new_name_node)
@_register
class StringModifier:
"""sys.modules["foo"] -> sys.modules["bar"]
"""
selector = """
string=STRING
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def __call__(self, node: 'LN', capture: 'Capture', filename: 'Filename') -> None:
if not self._capture(capture['string'].value):
return
old_node = capture['string']
new_node = old_node.clone()
new_node.value = old_node.value.replace(self.old_name, self.new_name)
old_node.replace(new_node)
def _capture(self, value):
if value[0] in 'uUrRbBfF':
value = value[1:]
for quote in ('"', "'"):
if value.strip(quote) == self.old_name:
return True
if value.strip(quote).startswith(self.old_name + '.'):
return True
return False
# @_register
class DottedModuleImportModifier:
"""import foo.bar -> import baz.bar
"""
selector = """
(
import_name< 'import' module_name=dotted_name< {dotted_name} any* > >
|
power< {power_name} any* >
)
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def __call__(self, node: 'LN', capture: 'Capture', filename: 'Filename') -> None:
if node.type == syms.power:
self._modify_power(node)
else:
self._modify_import(capture)
def _modify_import(self, capture):
new_name_node = build_new_name_node(
old_node=capture['module_name'],
new_name=self.new_name,
old_name=self.old_name,
attach=True,
)
capture['module_name'].replace(new_name_node)
def _modify_power(self, node):
prefix = node.children[0].prefix
# remove old prefix
parts = dotted_parts(self.old_name)
for _ in range((len(parts) + 1) // 2):
node.children.pop(0)
# add new prefix
head = Name(self.new_name.split('.', maxsplit=1)[0], prefix=prefix)
children = []
for part in dotted_parts(self.new_name)[2::2]:
children.append(Node(
type=syms.trailer,
children=[Dot(), Name(part)],
))
node.children = [head] + children + node.children
def build_new_name_node(*, old_node, attach: bool, new_name: str, old_name: str = None):
# build new node from new_name
if '.' in new_name:
children = []
for part in dotted_parts(new_name):
if part == '.':
children.append(Dot())
else:
children.append(Name(part))
else:
children = [Name(new_name)]
# attach to the new node subimports from the old module
if attach and type(old_node) is Node:
original_name_size = len(dotted_parts(old_name))
for part in old_node.children[original_name_size:]:
if part.value == '.':
children.append(Dot())
else:
children.append(Name(part.value))
return Node(
type=syms.dotted_name,
children=children,
prefix=old_node.prefix,
)
| 27.641221 | 88 | 0.533969 |
805d9b1a5e2aa82657eca5d3a93f86d7b0e0ce73 | 1,127 | py | Python | alembic/versions/7a43773ac926_add_bugzillas.py | TomasTomecek/packit-service | f0e5c0c04df80a600fdba33c1a8dbf9f81fdea08 | [
"MIT"
] | null | null | null | alembic/versions/7a43773ac926_add_bugzillas.py | TomasTomecek/packit-service | f0e5c0c04df80a600fdba33c1a8dbf9f81fdea08 | [
"MIT"
] | 2 | 2020-09-02T08:14:27.000Z | 2020-09-03T03:16:27.000Z | alembic/versions/7a43773ac926_add_bugzillas.py | shreyaspapi/packit-service | a64e7db9f354df9b3c346948e661a89236b22387 | [
"MIT"
] | null | null | null | """Add bugzillas
Revision ID: 7a43773ac926
Revises: 307a4c43ae47
Create Date: 2020-06-11 07:29:27.146501
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "7a43773ac926"
down_revision = "307a4c43ae47"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"bugzillas",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("bug_id", sa.Integer(), nullable=True),
sa.Column("bug_url", sa.String(), nullable=True),
sa.Column("pull_request_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["pull_request_id"], ["pull_requests.id"],),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_bugzillas_bug_id"), "bugzillas", ["bug_id"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_bugzillas_bug_id"), table_name="bugzillas")
op.drop_table("bugzillas")
# ### end Alembic commands ###
| 28.897436 | 87 | 0.668146 |
356761f1f1fb644c257ecbf7d8964f7ad41c9623 | 4,487 | py | Python | app/src/thirdparty/telemetry/internal/backends/android_browser_backend_settings.py | ta2edchimp/big-rig | d08ffaa34b1496ca6c8e69a8797d280583472156 | [
"Apache-2.0"
] | 925 | 2015-11-06T03:04:46.000Z | 2017-09-16T19:08:43.000Z | app/src/thirdparty/telemetry/internal/backends/android_browser_backend_settings.py | ta2edchimp/big-rig | d08ffaa34b1496ca6c8e69a8797d280583472156 | [
"Apache-2.0"
] | 29 | 2015-11-09T17:37:28.000Z | 2017-08-16T17:50:11.000Z | app/src/thirdparty/telemetry/internal/backends/android_browser_backend_settings.py | ta2edchimp/big-rig | d08ffaa34b1496ca6c8e69a8797d280583472156 | [
"Apache-2.0"
] | 51 | 2015-11-08T07:06:38.000Z | 2017-08-21T07:27:19.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from telemetry.core import exceptions
class AndroidBrowserBackendSettings(object):
def __init__(self, activity, cmdline_file, package, pseudo_exec_name,
supports_tab_control):
self._activity = activity
self._cmdline_file = cmdline_file
self._package = package
self._pseudo_exec_name = pseudo_exec_name
self._supports_tab_control = supports_tab_control
@property
def activity(self):
return self._activity
@property
def package(self):
return self._package
@property
def pseudo_exec_name(self):
return self._pseudo_exec_name
@property
def supports_tab_control(self):
return self._supports_tab_control
def GetCommandLineFile(self, is_user_debug_build): # pylint: disable=W0613
return self._cmdline_file
def GetDevtoolsRemotePort(self, device):
raise NotImplementedError()
@property
def profile_ignore_list(self):
# Don't delete lib, since it is created by the installer.
return ['lib']
class ChromeBackendSettings(AndroidBrowserBackendSettings):
# Stores a default Preferences file, re-used to speed up "--page-repeat".
_default_preferences_file = None
def GetCommandLineFile(self, is_user_debug_build):
if is_user_debug_build:
return '/data/local/tmp/chrome-command-line'
else:
return '/data/local/chrome-command-line'
def __init__(self, package):
super(ChromeBackendSettings, self).__init__(
activity='com.google.android.apps.chrome.Main',
cmdline_file=None,
package=package,
pseudo_exec_name='chrome',
supports_tab_control=True)
def GetDevtoolsRemotePort(self, device):
return 'localabstract:chrome_devtools_remote'
class ContentShellBackendSettings(AndroidBrowserBackendSettings):
def __init__(self, package):
super(ContentShellBackendSettings, self).__init__(
activity='org.chromium.content_shell_apk.ContentShellActivity',
cmdline_file='/data/local/tmp/content-shell-command-line',
package=package,
pseudo_exec_name='content_shell',
supports_tab_control=False)
def GetDevtoolsRemotePort(self, device):
return 'localabstract:content_shell_devtools_remote'
class ChromeShellBackendSettings(AndroidBrowserBackendSettings):
def __init__(self, package):
super(ChromeShellBackendSettings, self).__init__(
activity='org.chromium.chrome.shell.ChromeShellActivity',
cmdline_file='/data/local/tmp/chrome-shell-command-line',
package=package,
pseudo_exec_name='chrome_shell',
supports_tab_control=False)
def GetDevtoolsRemotePort(self, device):
return 'localabstract:chrome_shell_devtools_remote'
class WebviewBackendSettings(AndroidBrowserBackendSettings):
def __init__(self,
package,
activity='org.chromium.webview_shell.TelemetryActivity',
cmdline_file='/data/local/tmp/webview-command-line'):
super(WebviewBackendSettings, self).__init__(
activity=activity,
cmdline_file=cmdline_file,
package=package,
pseudo_exec_name='webview',
supports_tab_control=False)
def GetDevtoolsRemotePort(self, device):
# The DevTools socket name for WebView depends on the activity PID's.
retries = 0
timeout = 1
pid = None
while True:
pids = device.GetPids(self.package)
if not pids or self.package not in pids:
time.sleep(timeout)
retries += 1
timeout *= 2
if retries == 4:
logging.critical('android_browser_backend: Timeout while waiting for '
'activity %s:%s to come up',
self.package,
self.activity)
raise exceptions.BrowserGoneException(self.browser,
'Timeout waiting for PID.')
pid = pids[self.package]
break
return 'localabstract:webview_devtools_remote_%s' % str(pid)
class WebviewShellBackendSettings(WebviewBackendSettings):
def __init__(self, package):
super(WebviewShellBackendSettings, self).__init__(
activity='org.chromium.android_webview.shell.AwShellActivity',
cmdline_file='/data/local/tmp/android-webview-command-line',
package=package)
| 32.514493 | 80 | 0.705371 |
33d2725daace6477340f57433619737302a6a709 | 983 | py | Python | torch_ipex_py/ops/interaction.py | intel/intel-extension-for-pytorch | a1cc6681447ef1079e0c156e531df109cb30ffdf | [
"Apache-2.0"
] | 322 | 2020-05-08T04:03:51.000Z | 2022-03-30T13:01:31.000Z | torch_ipex_py/ops/interaction.py | intel/intel-extension-for-pytorch | a1cc6681447ef1079e0c156e531df109cb30ffdf | [
"Apache-2.0"
] | 159 | 2020-05-09T02:55:40.000Z | 2022-03-30T13:43:04.000Z | torch_ipex_py/ops/interaction.py | intel/intel-extension-for-pytorch | a1cc6681447ef1079e0c156e531df109cb30ffdf | [
"Apache-2.0"
] | 64 | 2020-05-08T03:49:27.000Z | 2022-03-22T09:50:23.000Z | import torch
from torch import nn
from torch.autograd import Function
import intel_extension_for_pytorch._C as core
def interaction(*args):
# Current pytorch dose not support vector<Tensor> input for c++ custom function
# So we preserve python custom function while need backward
# Since python custom function will meet GIL when run multi-thread in one process
# We will drop python custom function after c++ are supported
if torch.is_grad_enabled():
return InteractionFunc.apply(*args)
return torch.ops.torch_ipex.interaction_forward(args)
class InteractionFunc(Function):
@staticmethod
def forward(ctx, *args):
ctx.save_for_backward(*args)
output = torch.ops.torch_ipex.interaction_forward(args)
return output
@staticmethod
def backward(ctx, grad_out):
args = ctx.saved_tensors
grad_in = torch.ops.torch_ipex.interaction_backward(grad_out.contiguous(), args)
return tuple(grad_in)
| 36.407407 | 88 | 0.734486 |
ad1983da767c1005c82c92e7c8fefa4aa1fee65e | 1,982 | py | Python | src/tests/fidl/source_compatibility/gen/scaffolding.py | gongcm/fuchisa-os | 83c2f90cf8f80aefb964dfe4a653664377b59384 | [
"BSD-Source-Code"
] | 1 | 2021-05-19T04:30:05.000Z | 2021-05-19T04:30:05.000Z | src/tests/fidl/source_compatibility/gen/scaffolding.py | gongcm/fuchisa-os | 83c2f90cf8f80aefb964dfe4a653664377b59384 | [
"BSD-Source-Code"
] | null | null | null | src/tests/fidl/source_compatibility/gen/scaffolding.py | gongcm/fuchisa-os | 83c2f90cf8f80aefb964dfe4a653664377b59384 | [
"BSD-Source-Code"
] | null | null | null | # Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Contains all of the FIDL/binding starter code. """
import datetime
from transitions import Binding
year = datetime.datetime.now().year
fuchsia_copyright = '''
// Copyright {year} The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'''.format(year=year).strip()
def get_fidl(library_name: str) -> str:
return fuchsia_copyright + fidl_file.format(
lib_decl=fidl_lib_decl(library_name))
def fidl_lib_decl(library_name: str) -> str:
return f'library fidl.test.{library_name}'
def get_src(binding: Binding, library_name: str) -> str:
return fuchsia_copyright + init_by_binding[binding].format(
library_name=library_name)
fidl_file = '''
{lib_decl};
// INSERT FIDL HERE
'''
hlcpp_init = '''
#include <fidl/test/{library_name}/cpp/fidl.h> // nogncheck
namespace fidl_test = fidl::test::{library_name};
// INSERT TEST CODE HERE
int main(int argc, const char** argv) {{
return 0;
}}
'''
llcpp_init = '''
#include <fidl/test/{library_name}/llcpp/fidl.h> // nogncheck
namespace fidl_test = llcpp::fidl::test::{library_name};
// INSERT TEST CODE HERE
int main(int argc, const char** argv) {{
return 0;
}}
'''
rust_init = '''
#![allow(dead_code)]
use fidl_fidl_test_{library_name} as fidl_lib;
// INSERT TEST CODE HERE
fn main() {{}}
'''
go_init = '''
package main
import (
lib "fidl/fidl/test/{library_name}"
"syscall/zx/fidl"
)
// INSERT TEST CODE HERE
func main() {{}}
'''
dart_init = '''
import 'package:fidl_fidl_test_{library_name}/fidl_async.dart' as fidllib;
// INSERT TEST CODE HERE
'''
init_by_binding = {
Binding.HLCPP: hlcpp_init,
Binding.LLCPP: llcpp_init,
Binding.RUST: rust_init,
Binding.GO: go_init,
Binding.DART: dart_init,
}
| 19.431373 | 74 | 0.698285 |
421c8e6933dde5c73a76ab16c3dc1452e9312599 | 11 | py | Python | secret.py | raymag/igbot | 73d82ef2b12c95f58383f609ca97ca40ea5262d5 | [
"MIT"
] | null | null | null | secret.py | raymag/igbot | 73d82ef2b12c95f58383f609ca97ca40ea5262d5 | [
"MIT"
] | null | null | null | secret.py | raymag/igbot | 73d82ef2b12c95f58383f609ca97ca40ea5262d5 | [
"MIT"
] | null | null | null | passwd = '' | 11 | 11 | 0.545455 |
242f51499339c91cb84536b263f9199f13eef4fa | 97 | py | Python | ciyi/data/__init__.py | VisualJoyce/CiYi | b9a0987a0fc33142b643f9b23521be73493689f2 | [
"MIT"
] | 1 | 2021-09-13T02:20:53.000Z | 2021-09-13T02:20:53.000Z | ciyi/data/__init__.py | VisualJoyce/CiYi | b9a0987a0fc33142b643f9b23521be73493689f2 | [
"MIT"
] | 1 | 2021-11-10T09:43:12.000Z | 2021-12-07T05:53:20.000Z | src/andushu/data/__init__.py | VisualJoyce/andushu | adf7f32b89c788734d9dff8e96ff55a35488dd51 | [
"MIT"
] | null | null | null | from .token_indexers.openai_transformer_byte_pair_indexer import OpenaiTransformerBytePairIndexer | 97 | 97 | 0.948454 |
eb0010d88f4c1f552acfdfee70af0ca771724d27 | 1,909 | py | Python | Analysis/Precision-Recall_Analysis/scripts/hmmscan_parse.py | dantaslab/resfams_update | 982091818a299d316811fe98c7656762be7284fb | [
"MIT"
] | null | null | null | Analysis/Precision-Recall_Analysis/scripts/hmmscan_parse.py | dantaslab/resfams_update | 982091818a299d316811fe98c7656762be7284fb | [
"MIT"
] | null | null | null | Analysis/Precision-Recall_Analysis/scripts/hmmscan_parse.py | dantaslab/resfams_update | 982091818a299d316811fe98c7656762be7284fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
File Name : hmmscan_parse.py
Author : Max Bernstein
Created On : 2018-04-11
Last Modified : 2019-12-17
Description : A program to parse an hmmscan output
Dependencies :py-biopython
Usage : hmmscan_parse.py --infile testing_set_hmmscan.txt
--out_path testing_set_hmmscan_parsed.txt
CHANGE LOG :
TODO :
"""
import sys
import os
import argparse
from Bio import SearchIO
def main(argv):
args = parse_arguments(argv)
infile = args.infile
out= args.out_path
outputs = []
query_sequences = []
count = 0
with open(out, 'w+') as output:
output.write("%s\t%s\t%s\t%s\n" % ("Accession","family","query_name","Resfams_description"))
for qresult in SearchIO.parse(infile, "hmmer3-tab"):
for hits in qresult:
accession = hits.accession
id = hits.id
query = hits.query_id
description = hits.description
score = hits.bitscore
array = [accession,id,query,description,str(score)]
print("\t".join(array))
output.write("\t".join(array)+"\n")
if hits.query_id not in query_sequences:
query_sequences.append(hits.query_id)
count += 1
print("Unique Seqs: " + str(count))
def parse_arguments(argv):
parser = argparse.ArgumentParser(
prog = 'hmmscan.parse',
description = 'A program to parse an hmmscan output')
parser.add_argument(
'-i', '--infile',
help = 'Enter first hmmscan outfile',
required = True
)
parser.add_argument(
'-o', '-outpath',
dest = 'out_path',
help = 'Enter path to output directory'
)
return parser.parse_args()
if __name__=="__main__":
main(sys.argv[1:])
| 25.453333 | 100 | 0.573075 |
b6891a3637df14fec1b518cb28cfa7c09116f858 | 48 | py | Python | tests/__init__.py | j-faria/observing-strategy | f15def60227a312e846810a763926a2b2f8155ea | [
"MIT"
] | null | null | null | tests/__init__.py | j-faria/observing-strategy | f15def60227a312e846810a763926a2b2f8155ea | [
"MIT"
] | null | null | null | tests/__init__.py | j-faria/observing-strategy | f15def60227a312e846810a763926a2b2f8155ea | [
"MIT"
] | null | null | null | """Unit test package for observing_strategy."""
| 24 | 47 | 0.75 |
90acc351928ed6b13391b9c49eadba4bd20309a1 | 209 | py | Python | Jennifer Ezeobi/project8-python.py | cornelia247/cil-internship-cohort-01 | b8184337056d378eab16d26b40b26ed58cd177bb | [
"MIT"
] | null | null | null | Jennifer Ezeobi/project8-python.py | cornelia247/cil-internship-cohort-01 | b8184337056d378eab16d26b40b26ed58cd177bb | [
"MIT"
] | null | null | null | Jennifer Ezeobi/project8-python.py | cornelia247/cil-internship-cohort-01 | b8184337056d378eab16d26b40b26ed58cd177bb | [
"MIT"
] | null | null | null | def image_resize(image,width,height):
from PIL import Image
img = Image.open(image)
resized_img = img.resize((width,height))
resized_img.save("resized_image.jpg")
image_resize('rtg.jpeg',70,70) | 34.833333 | 44 | 0.722488 |
4e72b9e5d12ab64c93950fc99cdc01f922522a15 | 569 | py | Python | using_dict.py | lzcdev/EasyPythonDemo | 6e8af845d08a47e378c5c7d170dfa45fd7e362e3 | [
"MIT"
] | null | null | null | using_dict.py | lzcdev/EasyPythonDemo | 6e8af845d08a47e378c5c7d170dfa45fd7e362e3 | [
"MIT"
] | null | null | null | using_dict.py | lzcdev/EasyPythonDemo | 6e8af845d08a47e378c5c7d170dfa45fd7e362e3 | [
"MIT"
] | null | null | null | #!us/bin/python
# 'ab' is short for 'a'ddress'b'ook
ab = {
'Swaroop': 'swaroopch@byteofpython.info',
'Larry': 'larry@wall.org',
'Matsumoto': 'matz@ruby-lang.org',
'Spammer': 'spammer@hotmail.com'
}
print 'Swaroop\'s address is %s' % ab['Swaroop']
# Adding a key/valye pair
ab['Guido'] = 'guido@python.org'
# Deleting a key/value pair
del ab['Spammer']
print '\nThere are %d contacts in the address-book\n' % len(ab)
for name, address in ab.items():
print 'contact %s at %s' % (name, address)
if 'Guido' in ab:
print '\nGuido\'s address is %s' % ab['Guido'] | 22.76 | 63 | 0.655536 |
5fe39831a9a230892ff603cbc82ed94755815c24 | 2,099 | py | Python | operators/akka-cluster-operator/python/pulumi_pulumi_kubernetes_crds_operators_akka_cluster_operator/provider.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | operators/akka-cluster-operator/python/pulumi_pulumi_kubernetes_crds_operators_akka_cluster_operator/provider.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | 2 | 2020-09-18T17:12:23.000Z | 2020-12-30T19:40:56.000Z | operators/akka-cluster-operator/python/pulumi_pulumi_kubernetes_crds_operators_akka_cluster_operator/provider.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
__all__ = ['Provider']
class Provider(pulumi.ProviderResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Create a Crds resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
super(Provider, __self__).__init__(
'pulumi_kubernetes_crds_operators_akka_cluster_operator',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 38.163636 | 134 | 0.647451 |
33e9b41a4a961780d6c81f953b5cdf249906868c | 1,325 | py | Python | setup.py | marquicus/django-sequence-field | f6d71bac8e9fa42e5ce9822628125ff2293aed83 | [
"BSD-3-Clause"
] | 1 | 2020-01-14T08:11:57.000Z | 2020-01-14T08:11:57.000Z | setup.py | marquicus/django-sequence-field | f6d71bac8e9fa42e5ce9822628125ff2293aed83 | [
"BSD-3-Clause"
] | null | null | null | setup.py | marquicus/django-sequence-field | f6d71bac8e9fa42e5ce9822628125ff2293aed83 | [
"BSD-3-Clause"
] | 2 | 2020-01-14T10:09:52.000Z | 2020-08-18T20:56:56.000Z | from setuptools import setup, find_packages
description = 'A Django Field for creating templated sequence strings.'
try:
with open('README.md') as f:
long_description = f.read()
except IOError:
long_description = description
setup(
name='django-sequence-field',
version='0.2.4',
description=description,
packages=find_packages(),
include_package_data=True,
author='Antonio Ognio',
author_email='antonio@ognio.com',
url='https://github.com/gnrfan/django-sequence-field',
long_description=long_description,
install_requires=['django >= 1.11'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0",
"Framework :: Django :: 2.1",
"Framework :: Django :: 2.2",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Database",
],
)
| 32.317073 | 71 | 0.616604 |
93a35c74ff77230404227426798bd7148052e1b3 | 5,270 | py | Python | src/sdk/pynni/nni/nas/pytorch/base_mutator.py | LIIXII/nni | f1ce1648b24d2668c2eb8fa02b158a7b6da80ea4 | [
"MIT"
] | 2 | 2020-04-24T01:36:40.000Z | 2020-04-24T01:37:10.000Z | src/sdk/pynni/nni/nas/pytorch/base_mutator.py | bobo4u/nni | f1ce1648b24d2668c2eb8fa02b158a7b6da80ea4 | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/nas/pytorch/base_mutator.py | bobo4u/nni | f1ce1648b24d2668c2eb8fa02b158a7b6da80ea4 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch.nn as nn
from nni.nas.pytorch.mutables import Mutable, MutableScope, InputChoice
from nni.nas.pytorch.utils import StructuredMutableTreeNode
logger = logging.getLogger(__name__)
class BaseMutator(nn.Module):
"""
A mutator is responsible for mutating a graph by obtaining the search space from the network and implementing
callbacks that are called in ``forward`` in mutables.
Parameters
----------
model : nn.Module
PyTorch model to apply mutator on.
"""
def __init__(self, model):
super().__init__()
self.__dict__["model"] = model
self._structured_mutables = self._parse_search_space(self.model)
def _parse_search_space(self, module, root=None, prefix="", memo=None, nested_detection=None):
if memo is None:
memo = set()
if root is None:
root = StructuredMutableTreeNode(None)
if module not in memo:
memo.add(module)
if isinstance(module, Mutable):
if nested_detection is not None:
raise RuntimeError("Cannot have nested search space. Error at {} in {}"
.format(module, nested_detection))
module.name = prefix
module.set_mutator(self)
root = root.add_child(module)
if not isinstance(module, MutableScope):
nested_detection = module
if isinstance(module, InputChoice):
for k in module.choose_from:
if k != InputChoice.NO_KEY and k not in [m.key for m in memo if isinstance(m, Mutable)]:
raise RuntimeError("'{}' required by '{}' not found in keys that appeared before, and is not NO_KEY."
.format(k, module.key))
for name, submodule in module._modules.items():
if submodule is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
self._parse_search_space(submodule, root, submodule_prefix, memo=memo,
nested_detection=nested_detection)
return root
@property
def mutables(self):
"""
A generator of all modules inheriting :class:`~nni.nas.pytorch.mutables.Mutable`.
Modules are yielded in the order that they are defined in ``__init__``.
For mutables with their keys appearing multiple times, only the first one will appear.
"""
return self._structured_mutables
@property
def undedup_mutables(self):
return self._structured_mutables.traverse(deduplicate=False)
def forward(self, *inputs):
"""
Warnings
--------
Don't call forward of a mutator.
"""
raise RuntimeError("Forward is undefined for mutators.")
def __setattr__(self, name, value):
if name == "model":
raise AttributeError("Attribute `model` can be set at most once, and you shouldn't use `self.model = model` to "
"include you network, as it will include all parameters in model into the mutator.")
return super().__setattr__(name, value)
def enter_mutable_scope(self, mutable_scope):
"""
Callback when forward of a MutableScope is entered.
Parameters
----------
mutable_scope : MutableScope
The mutable scope that is entered.
"""
pass
def exit_mutable_scope(self, mutable_scope):
"""
Callback when forward of a MutableScope is exited.
Parameters
----------
mutable_scope : MutableScope
The mutable scope that is exited.
"""
pass
def on_forward_layer_choice(self, mutable, *inputs):
"""
Callbacks of forward in LayerChoice.
Parameters
----------
mutable : LayerChoice
Module whose forward is called.
inputs : list of torch.Tensor
The arguments of its forward function.
Returns
-------
tuple of torch.Tensor and torch.Tensor
Output tensor and mask.
"""
raise NotImplementedError
def on_forward_input_choice(self, mutable, tensor_list):
"""
Callbacks of forward in InputChoice.
Parameters
----------
mutable : InputChoice
Mutable that is called.
tensor_list : list of torch.Tensor
The arguments mutable is called with.
Returns
-------
tuple of torch.Tensor and torch.Tensor
Output tensor and mask.
"""
raise NotImplementedError
def export(self):
"""
Export the data of all decisions. This should output the decisions of all the mutables, so that the whole
network can be fully determined with these decisions for further training from scratch.
Returns
-------
dict
Mappings from mutable keys to decisions.
"""
raise NotImplementedError
| 34.220779 | 129 | 0.585579 |
987542eac15d945be2f4981f633f63e43a1d6be6 | 448 | py | Python | Ex_63.py | soldierloko/Curso-em-Video | d867366425f72fe15903cb17cdc222a7fe7a3831 | [
"MIT"
] | null | null | null | Ex_63.py | soldierloko/Curso-em-Video | d867366425f72fe15903cb17cdc222a7fe7a3831 | [
"MIT"
] | null | null | null | Ex_63.py | soldierloko/Curso-em-Video | d867366425f72fe15903cb17cdc222a7fe7a3831 | [
"MIT"
] | null | null | null | #Escreva um programa que leia um número n inteiro qualquer e mostre na tela os n primeiros elementos de uma sequência de Fibonacci
print('-'*30)
print('Sequencia de Fibonacci')
print('-'*30)
n = int(input('Quantos termos você quer mostrar? '))
t1 = 0
t2 = 1
print('~'*30)
print('{} > {}'.format(t1,t2), end='')
cont=3
while cont <= n:
t3 = t1+t2
print(' >'.format(t3),end='')
t1=t2
t2=t3
cont += 1
print(' > FIM')
print('~'*30)
| 23.578947 | 130 | 0.622768 |
ed2af4dbac17f1dec6b5a8d68db9e3202f8a14a0 | 931 | py | Python | tests/test_config.py | mochazi/objprint | 49bc213073ec56a4bed63fb5180240f6c01df6eb | [
"Apache-2.0"
] | 191 | 2021-03-10T09:41:02.000Z | 2022-03-30T19:15:39.000Z | tests/test_config.py | mochazi/objprint | 49bc213073ec56a4bed63fb5180240f6c01df6eb | [
"Apache-2.0"
] | 46 | 2021-03-02T02:09:39.000Z | 2022-03-26T11:22:05.000Z | tests/test_config.py | mochazi/objprint | 49bc213073ec56a4bed63fb5180240f6c01df6eb | [
"Apache-2.0"
] | 12 | 2021-02-28T21:43:58.000Z | 2022-03-23T11:44:47.000Z | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/objprint/blob/master/NOTICE.txt
import io
from contextlib import redirect_stdout
from objprint import config, objprint
from .objtest import ObjTest, ObjprintTestCase
class TestConfig(ObjprintTestCase):
def test_config_none_exist(self):
self.assertRaises(ValueError, lambda: config(height=50))
def test_config_wrong_type(self):
self.assertRaises(TypeError, lambda: config(exclude=50))
def test_config_element(self):
config(elements=2)
e = ObjTest({"first": 1, "second": 2, "third": 3})
with io.StringIO() as buf, redirect_stdout(buf):
objprint(e)
output = buf.getvalue()
self.assertIn("first", output)
self.assertIn("second", output)
self.assertNotIn("third", output)
config(elements=-1)
| 32.103448 | 80 | 0.689581 |
9e2f6f2ae6c6c66ceb0a97611560af2eafee6d17 | 1,105 | py | Python | roast/exceptions.py | Xilinx/roast | 999594fd5d1f0c92d26bc36741ea85d57a0235e9 | [
"MIT"
] | 1 | 2021-09-01T15:29:15.000Z | 2021-09-01T15:29:15.000Z | roast/exceptions.py | Xilinx/roast | 999594fd5d1f0c92d26bc36741ea85d57a0235e9 | [
"MIT"
] | null | null | null | roast/exceptions.py | Xilinx/roast | 999594fd5d1f0c92d26bc36741ea85d57a0235e9 | [
"MIT"
] | 4 | 2020-11-19T11:42:36.000Z | 2021-02-05T04:20:52.000Z | #
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
import logging
log = logging.getLogger(__name__)
class RoastError(Exception):
"""
The base exception class for all roast exceptions.
"""
def __init__(self, message: str = None, log_stack: bool = False) -> None:
self.message = message or getattr(self.__class__, "message", None)
super().__init__(message)
if log_stack:
log.exception(message)
else:
log.error(message)
def __str__(self):
return self.message
class DirectoryNotFoundError(RoastError):
"""
Raised when directory is not found in roast utils.
"""
class GitError(RoastError):
"""
Raised when a Git error occurs in roast utils.
"""
class ExpectError(RoastError):
"""
Raised when EOF or TIMEOUT occurs in Xexpect.
"""
class PluginError(RoastError):
"""
Raised when plugin type is not supported in roast.
"""
class RandomizerError(RoastError):
"""
Raised when Randomizer failed data generation.
"""
| 19.385965 | 77 | 0.642534 |
8ea9a649350beff7b0a0068d1ea7c6d8b31224ac | 1,838 | py | Python | src/m6_your_turtles.py | boewebe/01-IntroductionToPython | 14da52b34275c7e69bb89f04aa2a06ba5c3707cb | [
"MIT"
] | null | null | null | src/m6_your_turtles.py | boewebe/01-IntroductionToPython | 14da52b34275c7e69bb89f04aa2a06ba5c3707cb | [
"MIT"
] | null | null | null | src/m6_your_turtles.py | boewebe/01-IntroductionToPython | 14da52b34275c7e69bb89f04aa2a06ba5c3707cb | [
"MIT"
] | null | null | null | """
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Brendan Boewe.
"""
########################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
########################################################################
########################################################################
# DONE: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
#
########################################################################
import rosegraphics as rg
window = rg.TurtleWindow()
boris = rg.SimpleTurtle()
boris.pen = rg.Pen('yellow',5)
boris.speed = 5
john = rg.SimpleTurtle()
john.pen = rg.Pen('blue', 1)
john.speed = 9
for k in range(5):
boris.forward(100)
boris.right(145)
john.pen_up()
john.left(90)
john.forward(100)
john.right(90)
john.forward(100)
for k in range(14):
john.pen_down()
john.right(120)
john.forward(100)
john.right(120)
john.forward(100)
john.right(120)
john.forward(100)
john.pen_up()
john.right(25)
window.close_on_mouse_click() | 27.432836 | 73 | 0.577258 |
7647d8eba17816e1e834594514504936cefe7913 | 725 | py | Python | tests/test_svgstyle.py | tomghyselinck/sphinxcontrib-plantuml | 851d572b94f64f82603d11d01afb9c07ada89456 | [
"BSD-2-Clause"
] | null | null | null | tests/test_svgstyle.py | tomghyselinck/sphinxcontrib-plantuml | 851d572b94f64f82603d11d01afb9c07ada89456 | [
"BSD-2-Clause"
] | null | null | null | tests/test_svgstyle.py | tomghyselinck/sphinxcontrib-plantuml | 851d572b94f64f82603d11d01afb9c07ada89456 | [
"BSD-2-Clause"
] | null | null | null | import os
import shutil
import tempfile
from sphinxcontrib import plantuml
def setup():
global _tempdir
_tempdir = tempfile.mkdtemp()
def teardown():
shutil.rmtree(_tempdir)
def writefile(fname, data):
f = open(fname, 'w')
try:
f.write(data)
finally:
f.close()
def test_get_svg_style():
fname = os.path.join(_tempdir, 'a.svg')
writefile(
fname,
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
'<svg xmlns="http://www.w3.org/2000/svg" height="147pt" '
'style="width:115px;height:147px;" version="1.1" viewBox="0 0 115 147" '
'width="115pt"><defs/>')
assert plantuml._get_svg_style(fname) == 'width:115px;height:147px;'
| 24.166667 | 80 | 0.626207 |
affb1733dd4cd4bd9e5fc7503afba4ca00d1ad98 | 18,191 | py | Python | smop/parse.py | blink1073/smop | 87e25309b1fb5ed54a5bd8d5d5cee00d39452800 | [
"MIT"
] | 1 | 2015-01-26T09:23:44.000Z | 2015-01-26T09:23:44.000Z | smop/parse.py | blink1073/smop | 87e25309b1fb5ed54a5bd8d5d5cee00d39452800 | [
"MIT"
] | null | null | null | smop/parse.py | blink1073/smop | 87e25309b1fb5ed54a5bd8d5d5cee00d39452800 | [
"MIT"
] | null | null | null | # SMOP compiler -- Simple Matlab/Octave to Python compiler
# Copyright 2011-2013 Victor Leikehman
import copy
import pprint
import os
import operator
import sys
import re
import yacc
from lexer import tokens
import lexer
#import builtins
import node
#from node import *
import resolve
# ident properties (set in parse.py)
# ----------------------------------
# G global
# A function argument
# R function return value
# I for-loop iteration index
#
# ident properties (set in resolve.py)
# ------------------------------------
# U use =...a or =...a(b)
# D def a=... or [a,b,c]=...
# P update a(b)=... or [a(b) c(d)]=...
class error(Exception):
pass
class syntax_error(error):
pass
precedence = (
("nonassoc","HANDLE"),
("left", "COMMA"),
("left", "COLON"),
("left", "ANDAND", "OROR"),
("left", "EQ", "NE", "GE", "LE", "GT", "LT"),
("left", "OR", "AND"),
("left", "PLUS", "MINUS"),
("left", "MUL","DIV","DOTMUL","DOTDIV","BACKSLASH"),
("right","UMINUS","NEG"),
("right","TRANSPOSE"),
("right","EXP", "DOTEXP"),
("nonassoc","LPAREN","RPAREN","RBRACE","LBRACE"),
("left", "FIELD","DOT"),
)
def p_top(p):
"""
top :
| stmt_list
| top func_decl stmt_list_opt
| top func_decl stmt_list END_STMT semi_opt
"""
if len(p) == 1:
p[0] = node.stmt_list()
elif len(p) == 2:
p[0] = p[1]
else:
# we backpatch the func_decl node
assert p[2].__class__ is node.func_decl
p[2].use_nargin = use_nargin
try:
if p[3][-1].__class__ is not node.return_stmt:
p[3].append(node.return_stmt(ret_expr))
except:
raise syntax_error(p)
p[0] = p[1]
p[0].append(node.function(head=p[2],body=p[3]))
assert isinstance(p[0],node.stmt_list)
def p_semi_opt(p):
"""
semi_opt :
| semi_opt SEMI
| semi_opt COMMA
"""
pass
def p_stmt(p):
"""
stmt : let
| continue_stmt
| break_stmt
| expr_stmt
| global_stmt
| command
| for_stmt
| if_stmt
| null_stmt
| return_stmt
| switch_stmt
| try_catch
| while_stmt
"""
# END_STMT is intentionally left out
p[0] = p[1]
# FIXME: order statements by ABC
### command
def p_arg1(p):
"""
arg1 : STRING
| NUMBER
| IDENT
| GLOBAL
"""
# a hack to support "clear global"
p[0] = node.string(value=str(p[1]),
lineno=p.lineno(1),
lexpos=p.lexpos(1))
def p_args(p):
"""
args : arg1
| args arg1
"""
if len(p) == 2:
p[0] = node.expr_list([p[1]])
else:
p[0] = p[1]
p[0].append(p[2])
def p_command(p):
"""
command : ident args SEMI
"""
# if p[1].name == "load":
# # "load filename x" ==> "x=load(filename)"
# # "load filename x y z" ==> "(x,y,z)=load(filename)"
# ret=node.expr_list([node.ident(t.value) for t in p[2][1:]])
# p[0] = node.funcall(func_expr=p[1],
# args=node.expr_list(p[2]),
# ret=ret)
# else:
p[0] = node.funcall(p[1],p[2])
####################
def p_global_list(p):
"""global_list : ident
| global_list ident
"""
if len(p) == 2:
p[0] = node.global_list([p[1]])
elif len(p) == 3:
p[0] = p[1]
p[0].append(p[2])
def p_global_stmt(p):
"global_stmt : GLOBAL global_list SEMI"
p[0] = node.global_stmt(p[2])
for ident in p[0]:
ident.props="G" # G=global
def p_return_stmt(p):
"return_stmt : RETURN SEMI"
p[0] = node.return_stmt(ret=ret_expr)
def p_continue_stmt(p):
"continue_stmt : CONTINUE SEMI"
p[0] = node.continue_stmt(None)
def p_break_stmt(p):
"break_stmt : BREAK SEMI"
p[0] = node.break_stmt(None)
# switch-case-otherwise
def p_switch_stmt(p):
"""
switch_stmt : SWITCH expr semi_opt case_list END_STMT
"""
def backpatch(expr,stmt):
if isinstance(stmt,node.if_stmt):
stmt.cond_expr.args[1] = expr
backpatch(expr,stmt.else_stmt)
backpatch(p[2],p[4])
p[0] = p[4]
def p_case_list(p):
"""
case_list :
| CASE expr sep stmt_list_opt case_list
| OTHERWISE stmt_list
"""
if len(p) == 1:
p[0] = node.stmt_list()
elif len(p) == 3:
assert isinstance(p[2],node.stmt_list)
p[0] = p[2]
elif len(p) == 6:
p[0] = node.if_stmt(cond_expr=node.expr(op="==",
args=node.expr_list([p[2]])),
then_stmt=p[4],
else_stmt=p[5])
p[0].cond_expr.args.append(None) # None will be replaced using backpatch()
else:
assert 0
# try-catch
def p_try_catch(p):
"""
try_catch : TRY stmt_list CATCH stmt_list END_STMT
"""
assert isinstance(p[2],node.stmt_list)
assert isinstance(p[4],node.stmt_list)
p[0] = node.try_catch(try_stmt=p[2],
catch_stmt=p[4])
def p_null_stmt(p):
"""
null_stmt : SEMI
| COMMA
"""
p[0] = None
def p_func_decl(p):
"""func_decl : FUNCTION ident args_opt SEMI
| FUNCTION ret '=' ident args_opt SEMI
"""
global ret_expr,use_nargin
use_nargin = 0
if len(p) == 5:
assert isinstance(p[3],node.expr_list)
p[0] = node.func_decl(ident=p[2],
ret=node.expr_list(),
args=p[3])
ret_expr = node.expr_list()
elif len(p) == 7:
assert isinstance(p[2],node.expr_list)
assert isinstance(p[5],node.expr_list)
p[0] = node.func_decl(ident=p[4],
ret=p[2],
args=p[5])
ret_expr = p[2]
else:
assert 0
def p_args_opt(p):
"""
args_opt :
| LPAREN RPAREN
| LPAREN arg_list RPAREN
"""
if len(p) == 1:
p[0] = node.expr_list()
elif len(p) == 3:
p[0] = node.expr_list()
elif len(p) == 4:
assert isinstance(p[2],node.expr_list)
p[0] = p[2]
else:
assert 0
def p_arg_list(p):
"""
arg_list : ident
| arg_list COMMA ident
"""
if len(p) == 2:
p[1].__class__ = node.param
p[0] = node.expr_list([p[1]])
elif len(p) == 4:
p[0] = p[1]
p[3].__class__ = node.param
p[0].append(p[3])
else:
assert 0
assert isinstance(p[0],node.expr_list)
for ident in p[0]:
ident.props="A"
def p_ret(p):
"""
ret : ident
| LBRACKET RBRACKET
| LBRACKET expr_list RBRACKET
"""
if len(p) == 2:
p[0] = node.expr_list([p[1]])
elif len(p) == 3:
p[0] = node.expr_list([])
elif len(p) == 4:
assert isinstance(p[2],node.expr_list)
p[0] = p[2]
else:
assert 0
for ident in p[0]:
ident.props="R"
# end func_decl
def p_stmt_list_opt(p):
"""
stmt_list_opt :
| stmt_list
"""
if len(p) == 1:
p[0] = node.stmt_list()
else:
p[0] = p[1]
def p_stmt_list(p):
"""
stmt_list : stmt
| stmt_list stmt
"""
if len(p) == 2:
p[0] = node.stmt_list([p[1]] if p[1] else [])
elif len(p) == 3:
p[0] = p[1]
if p[2]:
p[0].append(p[2])
else:
assert 0
def p_concat_list(p):
"""
concat_list : expr_list SEMI expr_list
| concat_list SEMI expr_list
"""
if p[1].__class__ == node.expr_list:
p[0] = node.concat_list([p[1],p[3]])
else:
p[0] = p[1]
p[0].append(p[3])
def p_expr_list(p):
"""
expr_list : exprs
| exprs COMMA
"""
p[0] = p[1]
def p_exprs(p):
"""
exprs : expr
| exprs COMMA expr
"""
if len(p) == 2:
p[0] = node.expr_list([p[1]])
elif len(p) == 4:
p[0] = p[1]
p[0].append(p[3])
else:
assert(0)
assert isinstance(p[0],node.expr_list)
def p_expr_stmt(p):
"""
expr_stmt : expr_list SEMI
"""
assert isinstance(p[1],node.expr_list)
p[0] = node.expr_stmt(expr=p[1])
def p_while_stmt(p):
"""
while_stmt : WHILE expr SEMI stmt_list END_STMT
"""
assert isinstance(p[4],node.stmt_list)
p[0] = node.while_stmt(cond_expr=p[2],
stmt_list=p[4])
def p_separator(p):
"""
sep : COMMA
| SEMI
"""
p[0] = p[1]
def p_if_stmt(p):
"""
if_stmt : IF expr sep stmt_list_opt elseif_stmt END_STMT
| IF expr error stmt_list_opt elseif_stmt END_STMT
"""
p[0] = node.if_stmt(cond_expr=p[2],
then_stmt=p[4],
else_stmt=p[5])
def p_elseif_stmt(p):
"""
elseif_stmt :
| ELSE stmt_list_opt
| ELSEIF expr sep stmt_list_opt elseif_stmt
"""
if len(p) == 1:
p[0] = node.stmt_list()
elif len(p) == 3:
p[0] = p[2]
elif len(p) == 6:
p[0] = node.if_stmt(cond_expr=p[2],
then_stmt=p[4],
else_stmt=p[5])
else:
assert 0
def p_let(p):
"""
let : expr '=' expr SEMI
"""
assert (isinstance(p[1],(node.ident,node.funcall,node.cellarrayref)) or
(isinstance(p[1],node.expr) and p[1].op in (("{}","DOT","."))))
"""
try:
# a(:) = ...
# ravel(a) = ...
# a[:] =
if p[1].func_expr.name == "ravel":
p[1] = node.arrayref(p[1].args[0],node.expr(":",node.expr_list()))
except:
pass
"""
if isinstance(p[1],node.getfield):
p[0] = node.setfield(p[1].args[0],
p[1].args[1],
p[3])
else:
#assert len(p[1].args) > 0
ret = p[1].args if isinstance(p[1],node.matrix) else p[1]
p[0] = node.let(ret=ret,
args=p[3],
lineno=p.lineno(2),
lexpos=p.lexpos(2))
if isinstance(p[1],node.matrix):
p[0].nargout = len(p[1].args)
else:
p[0].nargout = 1
def p_for_stmt(p):
"""
for_stmt : FOR ident '=' expr SEMI stmt_list END_STMT
| FOR LPAREN ident '=' expr RPAREN SEMI stmt_list END_STMT
"""
if len(p) == 8:
p[2].props="I" # I= for-loop iteration variable
p[0] = node.for_stmt(ident=p[2],
expr=p[4],
stmt_list=p[6])
# # lexpos used to be unique per ident
# # this rare case breaks that assumption
# new_index = node.ident.new("I",
# lineno=p.lineno(2),
# lexpos=p.lexpos(2))
# new_data = node.ident.new("D",
# lineno=p.lineno(2),
# lexpos=p.lexpos(2))
# stmt1 = node.let(new_data, p[4])
# stmt2 = node.let(p[2],node.arrayref(new_data,new_index))
# stmt3 = node.for_stmt(ident=new_index,
# expr=node.expr_list([node.number(1),
# node.SIZE(new_data)]),
# stmt_list=p[6])
# stmt3.stmt_list.insert(0,stmt2)
# p[0] = node.stmt_list([stmt1,stmt3])
# #p[0] = stmt3
# if len(p) == 8:
# assert isinstance(p[6],stmt_list)
# p[0] = for_stmt(ident=p[2],
# expr=p[4],
# stmt_list=p[6])
# else:
# p[0] = for_stmt(ident=p[3],
# expr=p[5],
# stmt_list=p[8])
################ expr ################
def p_expr(p):
"""expr : ident
| end
| number
| string
| colon
| NEG
| matrix
| cellarray
| expr2
| expr1
| lambda_expr
"""
if p[1]=="~":
p[0] = node.ident(name="__")
else:
p[0] = p[1]
def p_lambda_args(p):
"""lambda_args : LPAREN RPAREN
| LPAREN arg_list RPAREN
"""
p[0] = p[2] if len(p) == 4 else node.expr_list()
def p_lambda_expr(p):
"""lambda_expr : HANDLE lambda_args expr
"""
p[0] = node.lambda_expr(args=p[2], ret=p[3])
def p_expr_ident(p):
"ident : IDENT"
if p[1] == "nargin":
global use_nargin
use_nargin += 1
#import pdb; pdb.set_trace()
p[0] = node.ident(name=p[1],
lineno=p.lineno(1),
lexpos=p.lexpos(1),
column=p.lexpos(1) - p.lexer.lexdata.rfind("\n",0,p.lexpos(1)))
def p_expr_number(p):
"number : NUMBER"
p[0] = node.number(p[1],lineno=p.lineno(1),lexpos=p.lexpos(1))
def p_expr_end(p):
"end : END_EXPR"
p[0] = node.expr(op="end",args=node.expr_list([node.number(0),
node.number(0)]))
def p_expr_string(p):
"string : STRING"
p[0] = node.string(p[1],lineno=p.lineno(1),lexpos=p.lexpos(1))
def p_expr_colon(p):
"colon : COLON"
p[0] = node.expr(op=":",args=node.expr_list())
def p_expr1(p):
"""expr1 : MINUS expr %prec UMINUS
| PLUS expr %prec UMINUS
| NEG expr
| HANDLE ident
"""
p[0] = node.expr(op=p[1],args=node.expr_list([p[2]]))
def p_cellarray(p):
"""
cellarray : LBRACE RBRACE
| LBRACE expr_list RBRACE
"""
if len(p) == 3:
p[0] = node.cellarray(op="{}",args=node.expr_list())
else:
p[0] = node.cellarray(op="{}",args=p[2])
def p_matrix(p):
"""matrix : LBRACKET RBRACKET
| LBRACKET concat_list RBRACKET
| LBRACKET concat_list SEMI RBRACKET
| LBRACKET expr_list RBRACKET
| LBRACKET expr_list SEMI RBRACKET
"""
if len(p) == 3:
p[0] = node.matrix()
else:
p[0] = node.matrix(p[2])
def p_paren_expr(p):
"""
expr : LPAREN expr RPAREN
"""
p[0] = node.expr(op="parens",args=node.expr_list([p[2]]))
def p_field_expr(p):
"""
expr : expr FIELD
"""
p[0] = node.expr(op=".",
args=node.expr_list([p[1],
node.ident(name=p[2],
lineno=p.lineno(2),
lexpos=p.lexpos(2))]))
def p_transpose_expr(p):
# p[2] contains the exact combination of plain and conjugate
# transpose operators, such as "'.''.''''".
"expr : expr TRANSPOSE"
p[0] = node.transpose(p[1],node.string(p[2]))
def p_cellarrayref(p):
"""expr : expr LBRACE expr_list RBRACE
| expr LBRACE RBRACE
"""
args = node.expr_list() if len(p) == 4 else p[3]
assert isinstance(args,node.expr_list)
p[0] = node.cellarrayref(func_expr=p[1],args=args)
def p_funcall_expr(p):
"""expr : expr LPAREN expr_list RPAREN
| expr LPAREN RPAREN
"""
if (0 and len(p)==5 and
len(p[3])==1 and
p[3][0].__class__ is node.expr and
p[3][0].op == ":" and not p[3][0].args):
# foo(:) => ravel(foo)
p[0] = node.funcall(func_expr=node.ident("ravel"),
args=node.expr_list([p[1]]))
else:
args = node.expr_list() if len(p) == 4 else p[3]
assert isinstance(args,node.expr_list)
p[0] = node.funcall(func_expr=p[1],args=args)
def p_expr2(p):
"""expr2 : expr AND expr
| expr ANDAND expr
| expr BACKSLASH expr
| expr COLON expr
| expr DIV expr
| expr DOT expr
| expr DOTDIV expr
| expr DOTEXP expr
| expr DOTMUL expr
| expr EQ expr
| expr EXP expr
| expr GE expr
| expr GT expr
| expr LE expr
| expr LT expr
| expr MINUS expr
| expr MUL expr
| expr NE expr
| expr OR expr
| expr OROR expr
| expr PLUS expr
"""
if p[2] == ".*":
p[0] = node.dot(p[1],p[3])
elif p[2] == "." and isinstance(p[3],node.expr) and p[3].op=="parens":
p[0] = node.getfield(p[1],p[3].args[0])
elif p[2] == ":" and isinstance(p[1],node.expr) and p[1].op==":":
# Colon expression means different things depending on the
# context. As an array subscript, it is a slice; otherwise,
# it is a call to the "range" function, and the parser can't
# tell which is which. So understanding of colon expressions
# is put off until after "resolve".
p[0] = p[1]
p[0].args.insert(1,p[3])
else:
p[0] = node.expr(op=p[2],args=node.expr_list([p[1],p[3]]))
opt_exclude = []
def p_error(p):
if p is None:
if p not in opt_exclude:
raise syntax_error(p)
elif p.lexpos not in opt_exclude:
raise syntax_error(p)
#print "Ignored:",p
return p
parser = yacc.yacc(start="top")
def parse(buf,filename=""):
try:
new_lexer = lexer.new()
p = parser.parse(buf,tracking=1,debug=0,lexer=new_lexer)
return p
except syntax_error as e:
try:
#import pdb;pdb.set_trace()
column=e[0].lexpos - new_lexer.lexdata.rfind("\n",0,e[0].lexpos)
print >> sys.stderr, '%s:%s.%s:syntax error' % (filename,e[0].lineno,column)
except:
print >> sys.stderr, "%s:syntax error" % filename
return []
# def fparse(filename):
# buf = open(filename).read()
# return parse(buf)
# vim: ts=4:sw=4:et:si:ai
| 26.595029 | 88 | 0.483591 |
09946beeefa275d1daf6be7e81620645e2c19acb | 157,659 | py | Python | Tensile/KernelWriterSource.py | micmelesse/Tensile | 62fb9a16909ddef08010915cfefe4c0341f48daa | [
"MIT"
] | null | null | null | Tensile/KernelWriterSource.py | micmelesse/Tensile | 62fb9a16909ddef08010915cfefe4c0341f48daa | [
"MIT"
] | null | null | null | Tensile/KernelWriterSource.py | micmelesse/Tensile | 62fb9a16909ddef08010915cfefe4c0341f48daa | [
"MIT"
] | null | null | null | ################################################################################
# Copyright (C) 2016-2019 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from . import Code
from .DataType import DataType
from .SolutionStructs import isPackedIndex
from .Common import globalParameters, printExit
from .KernelWriter import KernelWriter
################################################################################
# Make OpenCL Kernel String
################################################################################
class KernelWriterSource(KernelWriter):
##############################################################################
# Make OpenCL Kernel String
##############################################################################
def __init__( self, kernelMinNaming, kernelSerialNaming ):
super(KernelWriterSource, self).__init__( \
kernelMinNaming, kernelSerialNaming)
self.language = globalParameters["RuntimeLanguage"]
if self.language == "OCL":
# everything escaped extra b/c string
self.endLine = "\\n\"\n\""
self.endLinePP = "\\\\" + self.endLine
self.quote = "\\\""
self.endLineQuote = "\\\\n\\\""
else:
self.endLine = "\n"
self.endLinePP = "\\" + self.endLine
self.quote = "\""
self.endLineQuote = "\\n\""
if self.language == "OCL":
self.getGroupIdStr = "get_group_id"
self.getNumGroupsStr = "get_num_groups"
self.getLocalIdStr = "get_local_id"
self.getGlobalIdStr = "get_global_id"
self.sharedDeclStr = "__local "
self.sharedPtrStr = "__local "
self.globalPtrStr = "__global "
self.syncStr = "barrier(CLK_LOCAL_MEM_FENCE);"
self.fenceStr = "mem_fence(CLK_LOCAL_MEM_FENCE);"
self.macFStr = "mad"
self.macDStr = "mad"
self.int64Str = "long"
self.uint64Str = "unsigned long"
self.vectorComponents = ["s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7"]
self.atomicCasStr = "atomic_cmpxchg"
self.volatileStr = "volatile "
self.deviceFunctionStr = ""
else:
self.getGroupIdStr = "hc_get_group_id"
self.getNumGroupsStr = "hc_get_num_groups"
self.getLocalIdStr = "hc_get_workitem_id"
self.getGlobalIdStr = "hc_get_workitem_absolute_id"
self.sharedDeclStr = "__shared__ "
self.sharedPtrStr = ""
self.globalPtrStr = ""
self.syncStr = "__syncthreads();"
self.fenceStr = self.syncStr
self.macFStr = "fmaf"
self.macDStr = "fma"
self.int64Str = "int64_t"
self.uint64Str = "uint64_t"
self.vectorComponents = ["x", "y", "z", "w"]
self.atomicCasStr = "atomicCAS"
self.volatileStr = ""
self.deviceFunctionStr = "__device__ "
self.commentPrefix = "/*"
self.commentSuffix = "*/"
self.commentHR = "*"*40
self.indent = " "
self.psdUuseMagic = 1 # use magic number calc for pack summaton dims
self.db={}
self.db["PrintStagger"] = 0
##############################################################################
#
# Functions to Write Kernel Segments
#
##############################################################################
##############################################################################
# Open String
##############################################################################
def openString(self, kernel):
kStr = ""
if self.language == "OCL":
kernelName = self.getKernelName(kernel)
kStr += "\n"
kStr += "std::string %s_src_%u = \"" % (kernelName, self.stringIdx)
return kStr
##############################################################################
# Close String
##############################################################################
def closeString(self, kernel):
kStr = ""
if self.language == "OCL":
kStr += "\";\n"
self.stringIdx += 1
return kStr
##############################################################################
# Init Kernel
##############################################################################
def initKernel(self, kernel, tPA, tPB ):
super(KernelWriterSource, self).initKernel( kernel, tPA, tPB )
self.definedIter=set()
pass
##############################################################################
# Function Prefix
##############################################################################
def functionPrefix(self, kernel):
kStr = ""
if kernel["ProblemType"]["DataType"].isHalf():
if self.language == "OCL":
self.vectorComponents = ["p[0]", "p[1]"]
else:
self.vectorComponents = ["p[0]", "p[1]"]
kStr += self.endLine
####################################
# kernel preprocessor definitions
kStr += self.endLine
kStr += "/* tile parameters */" + self.endLine
kStr += "#define NUM_THREADS %3d%s" \
% (kernel["NumThreads"], self.endLine )
kStr += "#define SG%s %d%s" \
% (self.tileChar0, kernel["SubGroup0"], self.endLine )
kStr += "#define SG%s %d%s" \
% (self.tileChar1, kernel["SubGroup1"], self.endLine )
kStr += "#define TT%s %d%s" \
% (self.tileChar0, kernel["ThreadTile0"], self.endLine )
kStr += "#define TT%s %d%s" \
% (self.tileChar1, kernel["ThreadTile1"], self.endLine )
kStr += "#define MT%s (SG%s*TT%s)%s" \
% (self.tileChar0, self.tileChar0, self.tileChar0, self.endLine )
kStr += "#define MT%s (SG%s*TT%s)%s" \
% (self.tileChar1, self.tileChar1, self.tileChar1, self.endLine )
kStr += "#define VECTOR_WIDTH %u%s" % (kernel["VectorWidth"], self.endLine)
kStr += "#define GLOBAL_LOAD_VECTOR_WIDTH_A %u%s" \
% (kernel["GlobalLoadVectorWidthA"], self.endLine)
kStr += "#define GLOBAL_LOAD_VECTOR_WIDTH_B %u%s" \
% (kernel["GlobalLoadVectorWidthB"], self.endLine)
kStr += "#define GLOBAL_WRITE_VECTOR_WIDTH %u%s" \
% (kernel["GlobalWriteVectorWidth"], self.endLine)
kStr += self.endLine
kStr += "/* DepthU parameters*/%s" % self.endLine
kStr += "#define CPSV (NUM_THREADS / MT%s * VECTOR_WIDTH)%s" \
% (self.tileChar0, self.endLine)
kStr += "#define LOCAL_SPLITU %d%s" \
% (kernel["LocalSplitU"], self.endLine )
kStr += "#define UNROLL %d%s" \
% (kernel["LoopUnroll"], self.endLine )
kStr += "#define LOCAL_DEPTHU (LOCAL_SPLITU*UNROLL)%s" % (self.endLine )
if kernel["GlobalSplitU"] > 1:
kStr += "#define GLOBAL_SPLITU %u%s" \
% (kernel["GlobalSplitU"], self.endLine )
kStr += self.endLine
kStr += "/* other */%s" % self.endLine
kStr += "#define PAD %u%s" % (kernel["LdsPadA"], self.endLine) # TODO - ignore LdsPadB
kStr += "#define WORK_GROUP_MAPPING %u%s" \
% (abs(kernel["WorkGroupMapping"]), self.endLine)
kStr += self.endLine
####################################
# num loads
kStr += "/* num loads parallel and perpendicular to coalesced */%s" \
% self.endLine
kStr += "#define NLCA %d%s" % (kernel["NumLoadsCoalescedA"], self.endLine )
kStr += "#define NLCB %d%s" % (kernel["NumLoadsCoalescedB"], \
self.endLine )
kStr += "#define NLPA %d%s" % (kernel["NumLoadsPerpendicularA"], \
self.endLine )
kStr += "#define NLPB %d%s" % (kernel["NumLoadsPerpendicularB"], \
self.endLine )
kStr += self.endLine
####################################
# load sizes
kStr += "/* load sizes parallel and perpendicular to coalesced */%s" % self.endLine
if kernel["ProblemType"]["TLUA"]:
kStr += "#define LSCA (MT%s/NLCA)%s" \
% (self.tileCharA, self.endLine)
kStr += "#define LSPA (LOCAL_DEPTHU/NLPA)" + self.endLine
else:
kStr += "#define LSCA (LOCAL_DEPTHU/NLCA)%s" \
% (self.endLine)
kStr += "#define LSPA (MT%s/NLPA)%s" \
% ( self.tileCharA, self.endLine)
if kernel["ProblemType"]["TLUB"]:
kStr += "#define LSCB (MT%s/NLCB)%s" \
% (self.tileCharB, self.endLine)
kStr += "#define LSPB (LOCAL_DEPTHU/NLPB)" + self.endLine
else:
kStr += "#define LSCB (LOCAL_DEPTHU/NLCB)%s" \
% (self.endLine)
kStr += "#define LSPB (MT%s/NLPB)%s" % (self.tileCharB, self.endLine)
kStr += "#define LVCA (LSCA/GLOBAL_LOAD_VECTOR_WIDTH_A)%s" % (self.endLine)
kStr += "#define LVCB (LSCB/GLOBAL_LOAD_VECTOR_WIDTH_B)%s" % (self.endLine)
kStr += "#define LVPA (LSPA/GLOBAL_LOAD_VECTOR_WIDTH_A)%s" % (self.endLine)
kStr += "#define LVPB (LSPB/GLOBAL_LOAD_VECTOR_WIDTH_B)%s" % (self.endLine)
# local buffer size
kStr += "#define LDS_OFFSET_B %u%s" % (kernel["LdsOffsetB"], self.endLine)
kStr += "#define LDS_NUM_ELEMENTS %u%s" % (kernel["LdsNumElements"], \
self.endLine)
# prefetch local buffer offsets
# layout is redA, redB, blkA, blkB
if kernel["PrefetchGlobalRead"]:
kStr += "#define LDS_OFFSET_BLK %u%s" \
% (kernel["LdsOffsetA_Blk"], self.endLine)
########################################
# z-ordering
if kernel["WorkGroupMappingType"] == "Z":
kStr += self.endLine
kStr += "#ifndef Z_ORDER_FUNCTIONS%s" % self.endLine
kStr += "#define Z_ORDER_FUNCTIONS%s" % self.endLine
kStr += "%svoid z_order(%s" % (self.deviceFunctionStr, self.endLine)
kStr += " unsigned int *z0, // 16-bits output%s" % self.endLine
kStr += " unsigned int *z1, // 16-bits output%s" % self.endLine
kStr += " unsigned int serial ) { // 32-bits input%s" % self.endLine
kStr += " *z0 = serial;%s" % (self.endLine)
kStr += " *z1 = (serial >> 1);%s" % (self.endLine)
kStr += " *z0 &= 0x55555555;%s" % (self.endLine)
kStr += " *z1 &= 0x55555555;%s" % (self.endLine)
kStr += " *z0 |= ( (*z0) >> 1 );%s" % (self.endLine)
kStr += " *z1 |= ( (*z1) >> 1 );%s" % (self.endLine)
kStr += " *z0 &= 0x33333333;%s" % (self.endLine)
kStr += " *z1 &= 0x33333333;%s" % (self.endLine)
kStr += " *z0 |= ( (*z0) >> 2 );%s" % (self.endLine)
kStr += " *z1 |= ( (*z1) >> 2 );%s" % (self.endLine)
kStr += " *z0 &= 0x0f0f0f0f; %s" % (self.endLine)
kStr += " *z1 &= 0x0f0f0f0f;%s" % (self.endLine)
kStr += " *z0 |= ( (*z0) >> 4 );%s" % (self.endLine)
kStr += " *z1 |= ( (*z1) >> 4 );%s" % (self.endLine)
kStr += " *z0 &= 0x00ff00ff;%s" % (self.endLine)
kStr += " *z1 &= 0x00ff00ff;%s" % (self.endLine)
kStr += " *z0 |= ( (*z0) >> 8 );%s" % (self.endLine)
kStr += " *z1 |= ( (*z1) >> 8 );%s" % (self.endLine)
kStr += " *z0 &= 0x0000ffff;%s" % (self.endLine)
kStr += " *z1 &= 0x0000ffff;%s" % (self.endLine)
kStr += "}%s" % self.endLine
kStr += self.endLine
kStr += "%sunsigned int round_down_power_of_2( unsigned int d0, unsigned int d1) {%s" % (self.deviceFunctionStr, self.endLine)
kStr += " unsigned int pow2 = min(d0, d1);%s" % self.endLine
kStr += " pow2 = pow2 | (pow2 >> 1);%s" % self.endLine
kStr += " pow2 = pow2 | (pow2 >> 2);%s" % self.endLine
kStr += " pow2 = pow2 | (pow2 >> 4);%s" % self.endLine
kStr += " pow2 = pow2 | (pow2 >> 8);%s" % self.endLine
kStr += " pow2 = pow2 | (pow2 >> 16);%s" % self.endLine
kStr += " pow2 = pow2 - (pow2 >> 1);%s" % self.endLine
kStr += " return pow2;%s" % self.endLine
kStr += "}%s" % self.endLine
kStr += self.endLine
kStr += "%svoid generalized_z_order(%s" % (self.deviceFunctionStr, self.endLine)
kStr += " unsigned int *z0,%s" % self.endLine
kStr += " unsigned int *z1,%s" % self.endLine
kStr += " unsigned int d0,%s" % self.endLine
kStr += " unsigned int d1,%s" % self.endLine
kStr += " unsigned int maxPow2,%s" % self.endLine
kStr += " unsigned int max0,%s" % self.endLine
kStr += " unsigned int max1 ) {%s" % self.endLine
kStr += " if (! maxPow2) maxPow2 = round_down_power_of_2( max0, max1 );%s" % self.endLine
kStr += " // determine which tile wg is in and relative coord in tile%s" % self.endLine
kStr += " unsigned int offset0 = 0; // coord of tile%s" % self.endLine
kStr += " unsigned int offset1 = 0; // coord of tile%s" % self.endLine
kStr += " unsigned int start0 = 0;%s" % self.endLine
kStr += " unsigned int start1 = 0;%s" % self.endLine
kStr += " unsigned int tile = maxPow2;%s" % self.endLine
kStr += " unsigned int tilem1 = tile - 1;%s" % self.endLine
kStr += " for ( unsigned int i = 0; i < 16; i++ ) {%s" % self.endLine
kStr += " start0 = d0 & ~tilem1; // (d0 / tile) * tile;%s" % self.endLine
kStr += " start1 = d1 & ~tilem1; // (d1 / tile) * tile;%s" % self.endLine
kStr += " offset0 |= start0; // +=%s" % self.endLine
kStr += " offset1 |= start1;%s" % self.endLine
kStr += " d0 &= ~start0; // -=%s" % self.endLine
kStr += " d1 &= ~start1;%s" % self.endLine
kStr += " unsigned int end0 = start0 + tile; // cant be | b/c evals to 0+4->4 or 4+4->8%s" % self.endLine
kStr += " unsigned int end1 = start1 + tile;%s" % self.endLine
kStr += " if ( end0 <= max0 && end1 <= max1 ) break; // both end and max can be non-pow2%s" % self.endLine
kStr += " max0 -= start0; // cant be &~ b/c max0 doesnt necessarily have multiple of start0 to turn off%s" % self.endLine
kStr += " max1 -= start1;%s" % self.endLine
kStr += " tile >>= 1;%s" % self.endLine
kStr += " tilem1 >>= 1;%s" % self.endLine
kStr += " }%s" % self.endLine
kStr += " // d0, d1 is relative coord within tile%s" % self.endLine
kStr += self.endLine
kStr += " // z-order relative coord%s" % self.endLine
kStr += " unsigned int serial = d0 + d1 * tile;%s" % self.endLine
kStr += " z_order( z0, z1, serial );%s" % self.endLine
kStr += " // add tile offset onto z-ordered index%s" % self.endLine
kStr += " *z0 |= offset0;%s" % self.endLine
kStr += " *z1 |= offset1;%s" % self.endLine
#kStr += " if (get_local_id(0)==0) printf(\\\"%%u, %%u -> %%u, %%u\\\\n\\\", d0, d1, (*z0), (*z1));%s" % self.endLine
kStr += "}%s" % self.endLine
kStr += "#endif%s" % self.endLine
####################################
# global memory indices
kStr += self.endLine
kStr += "/* global memory indices */" + self.endLine
# D
kStr += "#define GLOBAL_D(IDX%s" % self.indexChars[0]
for i in range(1, kernel["ProblemType"]["NumIndicesC"]):
kStr += ", IDX%s" % self.indexChars[i]
indexChar = self.indexChars[0]
kStr += ") (( (IDX%s)*strideD%s" % (indexChar, indexChar)
for i in range(1, kernel["ProblemType"]["NumIndicesC"]):
indexChar = self.indexChars[i]
kStr += " + (IDX%s)*strideD%s" % (indexChar, indexChar)
kStr += " ))" + self.endLine
# C
kStr += "#define GLOBAL_C(IDX%s" % self.indexChars[0]
for i in range(1, kernel["ProblemType"]["NumIndicesC"]):
kStr += ", IDX%s" % self.indexChars[i]
indexChar = self.indexChars[0]
kStr += ") (( (IDX%s)*strideC%s" % (indexChar, indexChar)
for i in range(1, kernel["ProblemType"]["NumIndicesC"]):
indexChar = self.indexChars[i]
kStr += " + (IDX%s)*strideC%s" % (indexChar, indexChar)
kStr += " ))" + self.endLine
# A non-vector
kStr += "#define GLOBAL_OFFSET_A(IDX%s" \
% self.indexChars[kernel["ProblemType"]["IndexAssignmentsA"][0]]
for i in range(1, len(kernel["ProblemType"]["IndexAssignmentsA"])):
kStr += ", IDX%s" \
% self.indexChars[kernel["ProblemType"]["IndexAssignmentsA"][i]]
indexChar = self.indexChars[kernel["ProblemType"]["IndexAssignmentsA"][0]]
kStr += ") (( (IDX%s)*strideA%s" % (indexChar, indexChar)
for i in range(1, len(kernel["ProblemType"]["IndexAssignmentsA"])):
indexChar = self.indexChars[kernel["ProblemType"]["IndexAssignmentsA"][i]]
kStr += " + (IDX%s)*strideA%s" % (indexChar, indexChar)
kStr += " ))%s" % self.endLine
# B non-vector
kStr += "#define GLOBAL_OFFSET_B(IDX%s" \
% self.indexChars[kernel["ProblemType"]["IndexAssignmentsB"][0]]
for i in range(1, len(kernel["ProblemType"]["IndexAssignmentsB"])):
kStr += ", IDX%s" \
% self.indexChars[kernel["ProblemType"]["IndexAssignmentsB"][i]]
indexChar = self.indexChars[kernel["ProblemType"]["IndexAssignmentsB"][0]]
kStr += ") (( (IDX%s)*strideB%s" % (indexChar, indexChar)
for i in range(1, len(kernel["ProblemType"]["IndexAssignmentsB"])):
indexChar = self.indexChars[kernel["ProblemType"]["IndexAssignmentsB"][i]]
kStr += " + (IDX%s)*strideB%s" % (indexChar, indexChar)
kStr += " ))" + self.endLine
kStr += self.endLine
####################################
# data types
kStr += "/* data types */" + self.endLine
kStr += "#define DATA_TYPE %s%s" \
% (kernel["ProblemType"]["DataType"].toDevice(self.language), \
self.endLine)
kStr += "#define DEST_DATA_TYPE %s%s" \
% (kernel["ProblemType"]["DestDataType"].toDevice(self.language), \
self.endLine)
kStr += "#define COMPUTE_DATA_TYPE %s%s" \
% (kernel["ProblemType"]["ComputeDataType"].toDevice(self.language), \
self.endLine)
#vecStr = kernel["ProblemType"]["DataType"].toDevice(self.language)
#if kernel["VectorWidth"] > 1:
# vecStr += str(kernel["VectorWidth"])
#kStr += "#define VECTOR_TYPE %s%s" % (vecStr, self.endLine)
if self.language == "HIP" and kernel["ProblemType"]["DataType"].isComplex():
kStr += "#define s0 x" + self.endLine
kStr += "#define s1 y" + self.endLine
####################################
# Atomic Global MAC
if kernel["GlobalSplitU"] > 1:
kStr += self.comment("atomic add float")
kStr += "#ifndef ATOMIC_FLOAT_FUNCTION%s" % (self.endLine)
kStr += "#define ATOMIC_FLOAT_FUNCTION%s" % (self.endLine)
if self.language == "OCL":
"""
kStr += self.endLine
kStr += "void atomicAddType(%s%sfloat *fPtr, float operand) {%s" \
% (self.volatileStr, self.globalPtrStr, self.endLine)
kStr += " volatile atomic_float *aPtr = (atomic_float*)(fPtr);%s" % (self.endLine)
kStr += " float oldValue, newValue;%s" % (self.endLine)
kStr += " oldValue = atomic_load_explicit(aPtr, memory_order_relaxed, memory_scope_device);%s" % (self.endLine)
#kStr += " oldValue = atomic_load(aPtr);%s" % (self.endLine)
kStr += " do {%s" % (self.endLine)
kStr += " newValue = oldValue + operand;%s" % (self.endLine)
#kStr += " prevReturn = %s(uPtr, prevVal.ui, newVal.ui);%s" \
# % (self.atomicCasStr, self.endLine)
kStr += " } while ( !atomic_compare_exchange_weak_explicit(aPtr, &oldValue, newValue, memory_order_relaxed, memory_order_relaxed) );%s" % (self.endLine)
#kStr += " } while ( !atomic_compare_exchange_weak(aPtr, &oldValue, newValue) );%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
"""
kStr += "typedef union {%s" % (self.endLine)
kStr += " unsigned int ui;%s" % (self.endLine)
kStr += " float f;%s" % (self.endLine)
kStr += "} AtomicFloat;%s" % (self.endLine)
kStr += self.endLine
kStr += "%svoid atomicAddType(%s%sfloat *fPtr, float operand) {%s" \
% ("__device__ " if self.language == "HIP" else "", \
self.volatileStr, self.globalPtrStr, self.endLine)
kStr += " AtomicFloat newVal;%s" % (self.endLine)
kStr += " AtomicFloat prevVal;%s" % (self.endLine)
kStr += " %s%sunsigned int *uPtr = (%s%sunsigned int *)fPtr;%s" \
% (self.volatileStr, self.globalPtrStr, self.volatileStr, \
self.globalPtrStr, self.endLine)
kStr += " unsigned int prevReturn = *uPtr;%s" % (self.endLine)
kStr += " do {%s" % (self.endLine)
kStr += " prevVal.ui = prevReturn;%s" % (self.endLine)
kStr += " newVal.f = prevVal.f + operand;%s" % (self.endLine)
kStr += " prevReturn = %s(uPtr, prevVal.ui, newVal.ui);%s" \
% (self.atomicCasStr, self.endLine)
kStr += " } while (prevVal.ui != prevReturn);%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
else:
"""
kStr += "%svoid atomicAddType(%s%sfloat *fPtr, float operand) {%s" \
% ("__device__ " if self.language == "HIP" else "", \
self.volatileStr, self.globalPtrStr, self.endLine)
kStr += " %s%sunsigned int *uPtr = (%s%sunsigned int *)fPtr;%s" \
% (self.volatileStr, self.globalPtrStr, self.volatileStr, \
self.globalPtrStr, self.endLine)
#kStr += " unsigned int old = *uPtr;%s" % (self.endLine)
kStr += " unsigned int old = atomicAdd(uPtr, 0); // atomic read%s" % (self.endLine)
kStr += " unsigned int assumed, newValue;%s" % (self.endLine)
kStr += " do {%s" % (self.endLine)
kStr += " assumed = old;%s" % (self.endLine)
kStr += " newValue = __float_as_uint(operand + __uint_as_float(assumed));%s" % (self.endLine)
kStr += " old = %s(uPtr, assumed, newValue);%s" \
% (self.atomicCasStr, self.endLine)
kStr += " } while (assumed != old);%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
"""
if globalParameters["CxxCompiler"] == "hipcc":
kStr += self.endLine
kStr += "__device__ inline int atomicAddType(int *fPtr, int operand)%s" % (self.endLine)
kStr += "{%s" % (self.endLine)
kStr += " return atomicAdd(fPtr,operand);%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
kStr += self.endLine
kStr += "__device__ inline unsigned int atomicAddType(unsigned int *fPtr, unsigned int operand)%s" % (self.endLine)
kStr += "{%s" % (self.endLine)
kStr += " return atomicAdd(fPtr,operand);%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
kStr += self.endLine
kStr += "__device__ inline unsigned long long int atomicAddType(unsigned long long int *fPtr, unsigned long long int operand)%s" % (self.endLine)
kStr += "{%s" % (self.endLine)
kStr += " return atomicAdd(fPtr,operand);%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
kStr += self.endLine
kStr += "__device__ inline float atomicAddType(float *fPtr, float operand)%s" % (self.endLine)
kStr += "{%s" % (self.endLine)
kStr += " return atomicAdd(fPtr,operand);%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
kStr += self.endLine
kStr += "__device__ inline double atomicAddType(double *fPtr, double operand)%s" % (self.endLine)
kStr += "{%s" % (self.endLine)
kStr += " return atomicAdd(fPtr,operand);%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
kStr += self.endLine
else:
kStr += self.endLine
kStr += "template <typename T>%s" % (self.endLine)
kStr += "__device__ inline void atomicAddType(%s%sT *fPtr, T operand) {%s" \
% (self.volatileStr, self.globalPtrStr, self.endLine)
kStr += " std::atomic<T> *aPtr = reinterpret_cast<std::atomic<T>*>(fPtr);%s" % (self.endLine)
kStr += " T oldValue, newValue;%s" % (self.endLine)
kStr += " oldValue = aPtr->load(std::memory_order_relaxed);%s" % (self.endLine)
kStr += " do {%s" % (self.endLine)
kStr += " newValue = oldValue + operand;%s" % (self.endLine)
#kStr += " prevReturn = %s(uPtr, prevVal.ui, newVal.ui);%s" \
# % (self.atomicCasStr, self.endLine)
#kStr += " } while ( !std::atomic_compare_exchange_weak_explicit(aPtr, &oldValue, newValue, std::memory_order_acq_rel, std::memory_order_release) );%s" % (self.endLine)
kStr += " } while ( !std::atomic_compare_exchange_weak_explicit(aPtr, &oldValue, newValue, std::memory_order_relaxed, std::memory_order_release) );%s" % (self.endLine)
kStr += "}%s" % (self.endLine)
kStr += "#endif%s" % self.endLine
kStr += "#define MAGIC_DIV1(dividend, magicNumber, magicShift) ((uint64_t)(dividend) * magicNumber >> magicShift)%s" % self.endLine
####################################
# MACs
kStr += self.endLine
kStr += "/* MAC's */" + self.endLine
if self.language == "OCL":
kStr += "#define MAC(A,B,DST) mad(A,B,DST)"
else:
if kernel["ProblemType"]["HighPrecisionAccumulate"] and kernel["ProblemType"]["DataType"].isHalf():
kStr += "#define MAC(A,B,DST) DST += static_cast<float>(A) * static_cast<float>(B)"
elif kernel["ProblemType"]["HighPrecisionAccumulate"] and kernel["ProblemType"]["DataType"].isInt8x4():
kStr += "#define MAC(A,B,DST) DST = GenDot4(static_cast<int>(A), static_cast<int>(B), static_cast<int>(DST))"
elif kernel["ProblemType"]["HighPrecisionAccumulate"] and kernel["ProblemType"]["DataType"].isBFloat16():
kStr += "#define MAC(A,B,DST) DST += static_cast<float>(A) * static_cast<float>(B);"
else:
kStr += "#define MAC(A,B,DST) DST += A*B"
kStr += self.endLine
if kernel["ProblemType"]["DataType"].isReal():
# real data
if ((kernel["ThreadTileA"] % 2 == 0) and (kernel["ProblemType"]["DataType"].isHalf())):
if kernel["ProblemType"]["HighPrecisionAccumulate"]:
kStr += "#define TYPE_MAC(MULA0,MULB0,DST0,MULA1,MULB1,DST1) " + self.endLinePP
kStr += " DST0 = MAC(MULA0,MULB0,DST0);" + self.endLinePP
kStr += " DST1 = MAC(MULA1,MULB1,DST1);" + self.endLinePP
kStr += self.endLine
else:
kStr += "#define TYPE_MAC(MULA0,MULB0,DST0,MULA1,MULB1,DST1) " + self.endLinePP
kStr += " a_pk_fma[0] = MULA0; %s " % (self.endLinePP)
kStr += " a_pk_fma[1] = MULA1; %s " % (self.endLinePP)
kStr += " b_pk_fma[0] = MULB0; %s " % (self.endLinePP)
kStr += " b_pk_fma[1] = MULB1; %s " % (self.endLinePP)
kStr += " c_pk_fma[0] = DST0; %s " % (self.endLinePP)
kStr += " c_pk_fma[1] = DST1; %s " % (self.endLinePP)
kStr += " c_pk_fma = tensile_fmadd_half2(a_pk_fma, b_pk_fma, c_pk_fma); %s " % (self.endLinePP)
kStr += " DST0 = c_pk_fma[0]; %s " % (self.endLinePP)
kStr += " DST1 = c_pk_fma[1]; %s " % (self.endLinePP)
kStr += self.endLine
else:
kStr += "#define TYPE_MAC(MULA,MULB,DST) " \
+ "DST = MAC(MULA,MULB,DST);" + self.endLine
# GSU
if kernel["GlobalSplitU"] > 1: # 1st kernel will have taken care of B
if kernel["ProblemType"]["UseBeta"]:
kStr += "#define TYPE_MAC_WRITE(DST,SRC,ALPHA,REG,BETA) atomicAddType(&(DST), (ALPHA)*(REG));"
else:
kStr += "#define TYPE_MAC_WRITE(DST,ALPHA,REG) atomicAddType(&(DST), (ALPHA)*(REG));"
else:
if kernel["ProblemType"]["UseBeta"]:
# dst = alpha*reg + dst*beta
if kernel["ProblemType"]["HighPrecisionAccumulate"] and kernel["ProblemType"]["DataType"].isBFloat16():
kStr += "#define TYPE_MAC_WRITE(DST,SRC,ALPHA,REG,BETA) " \
+ "DST = 0 != (BETA) ? " \
+ "static_cast<tensile_bfloat16>((ALPHA)*(REG) + (BETA) * static_cast<float>(SRC)) : " \
+ "static_cast<tensile_bfloat16>((ALPHA)*(REG));" + self.endLine
else:
kStr += "#define TYPE_MAC_WRITE(DST,SRC,ALPHA,REG,BETA) " \
+ "DST = 0 != (BETA) ? (ALPHA)*(REG) + (BETA)*(SRC) : (ALPHA)*(REG);" + self.endLine
else:
# dst = alpha*reg
kStr += "#define TYPE_MAC_WRITE(DST,ALPHA,REG) " \
+ "DST = (ALPHA)*(REG);" + self.endLine
else:
# complex data
if not kernel["ProblemType"]["ComplexConjugateA"] and not kernel["ProblemType"]["ComplexConjugateB"]:
# neither conjugate
kStr += (
"#define TYPE_MAC(MULA,MULB,DST) " + self.endLinePP +
" DST.s0 = MAC( MULA.s0, MULB.s0, DST.s0 ); " + self.endLinePP +
" DST.s0 = MAC( -MULA.s1, MULB.s1, DST.s0 ); " + self.endLinePP +
" DST.s1 = MAC( MULA.s0, MULB.s1, DST.s1 ); " + self.endLinePP +
" DST.s1 = MAC( MULA.s1, MULB.s0, DST.s1 );" + self.endLine )
elif kernel["ProblemType"]["ComplexConjugateA"] and not kernel["ProblemType"]["ComplexConjugateB"]:
# A conjugate (negate imaginary A.s1)
kStr += (
"#define TYPE_MAC(MULA,MULB,DST) " + self.endLinePP +
" DST.s0 = MAC( MULA.s0, MULB.s0, DST.s0 ); " + self.endLinePP +
" DST.s0 = MAC( MULA.s1, MULB.s1, DST.s0 ); " + self.endLinePP +
" DST.s1 = MAC( MULA.s0, MULB.s1, DST.s1 ); " + self.endLinePP +
" DST.s1 = MAC( -MULA.s1, MULB.s0, DST.s1 );" + self.endLine )
elif not kernel["ProblemType"]["ComplexConjugateA"] and kernel["ProblemType"]["ComplexConjugateB"]:
# B conjugate (negate imaginary B.s1)
kStr += (
"#define TYPE_MAC(MULA,MULB,DST) " + self.endLinePP +
" DST.s0 = MAC( MULA.s0, MULB.s0, DST.s0 ); " + self.endLinePP +
" DST.s0 = MAC( -MULA.s1, -MULB.s1, DST.s0 ); " + self.endLinePP +
" DST.s1 = MAC( MULA.s0, -MULB.s1, DST.s1 ); " + self.endLinePP +
" DST.s1 = MAC( MULA.s1, MULB.s0, DST.s1 );" + self.endLine )
else:
# A & B conjugate (negate imaginary .s1)
kStr += (
"#define TYPE_MAC(MULA,MULB,DST) " + self.endLinePP +
" DST.s0 = MAC( MULA.s0, MULB.s0, DST.s0 ); " + self.endLinePP +
" DST.s0 = MAC( MULA.s1, -MULB.s1, DST.s0 ); " + self.endLinePP +
" DST.s1 = MAC( MULA.s0, -MULB.s1, DST.s1 ); " + self.endLinePP +
" DST.s1 = MAC( -MULA.s1, MULB.s0, DST.s1 );" + self.endLine )
if kernel["GlobalSplitU"] > 1: # 1st kernel will have taken care of B
if kernel["ProblemType"]["UseBeta"]:
kStr += "#define TYPE_MAC_WRITE(DST,SRC,ALPHA,REG,BETA) atomicAddType(&(DST), (ALPHA)*(REG));"
else:
kStr += "#define TYPE_MAC_WRITE(DST,ALPHA,REG) atomicAddType(&(DST), (ALPHA)*(REG));"
else:
if kernel["ProblemType"]["UseBeta"]:
# dst = alpha*reg + beta*dst
kStr += (
"#define TYPE_MAC_WRITE( DST, SRC, ALPHA, REG, BETA ) "+self.endLinePP +
" /* (1) */ " + self.endLinePP +
" type_mac_tmp = REG.s0; " + self.endLinePP +
" REG.s0 *= ALPHA.s0; " + self.endLinePP +
" REG.s0 = MAC( -ALPHA.s1, REG.s1, REG.s0 ); " + self.endLinePP +
" REG.s1 *= ALPHA.s0; " + self.endLinePP +
" REG.s1 = MAC( ALPHA.s1, type_mac_tmp, REG.s1 ); "+self.endLinePP+
" /* (2) */ " + self.endLinePP +
" if(BETA.s0 != 0) { " + self.endLinePP +
" REG.s0 = MAC( BETA.s0, SRC.s0, REG.s0 ); " + self.endLinePP +
" REG.s1 = MAC( BETA.s0, SRC.s1, REG.s1 ); " + self.endLinePP +
" } " + self.endLinePP +
" if (BETA.s1 != 0) { " + self.endLinePP +
" REG.s0 = MAC( -BETA.s1, SRC.s1, REG.s0 ); " + self.endLinePP +
" REG.s1 = MAC( BETA.s1, SRC.s0, REG.s1 ); " + self.endLinePP +
" } " + self.endLinePP +
" /* (3) */ " + self.endLinePP +
" DST = REG;" + self.endLine )
else:
# dst = alpha*reg
kStr += (
"#define TYPE_MAC_WRITE( DST, ALPHA, REG ) "+self.endLinePP+
" /* (1) */ " + self.endLinePP +
" type_mac_tmp = REG.s0; " + self.endLinePP +
" REG.s0 *= ALPHA.s0; " + self.endLinePP +
" REG.s0 = MAC( -ALPHA.s1, REG.s1, REG.s0 ); " + self.endLinePP +
" REG.s1 *= ALPHA.s0; " + self.endLinePP +
" REG.s1 = MAC( ALPHA.s1, type_mac_tmp, REG.s1 ); "+self.endLinePP+
" /* (3) */ " + self.endLinePP +
" DST = REG;" + self.endLine )
####################################
# sumation unroll
kStr += self.endLine
kStr += "/* %dx%d micro-tile */%s" \
% (kernel["ThreadTile0"], kernel["ThreadTile1"], self.endLine)
numMacs = 2 if kernel["PrefetchLocalRead"] else 1
for m in range(0, numMacs):
kStr += "#define MAC_%ux%u" \
% (kernel["ThreadTile0"], kernel["ThreadTile1"])
if kernel["PrefetchLocalRead"]:
kStr += ("" if m==0 else "_BLK")
kStr += self.endLinePP
"""
if False:
if kernel["VectorWidth"] == 1:
kStr += " printf(\\\"MAC: T[%%02u]: %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f; %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f\\\\n\\\", serial, rA[0], rA[1], rA[2], rA[3], rA[4], rA[5], rA[6], rA[7], rB[0], rB[1], rB[2], rB[3], rB[4], rB[5], rB[6], rB[7]); %s" % (self.endLinePP)
if kernel["VectorWidth"] == 2:
kStr += " printf(\\\"MAC: T[%%02u]: %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f; %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f\\\\n\\\", serial, rA[0].%s, rA[0].%s, rA[1].%s, rA[1].%s, rA[2].%s, rA[2].%s, rA[3].%s, rA[3].%s, rB[0].%s, rB[0].%s, rB[1].%s, rB[1].%s, rB[2].%s, rB[2].%s, rB[3].%s, rB[3].%s); %s" % ( \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[0], self.vectorComponents[1], \
self.endLinePP)
if kernel["VectorWidth"] == 4:
kStr += " printf(\\\"MAC: T[%%02u]: %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f; %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f, %%.0f\\\\n\\\", serial, rA[0].%s, rA[0].%s, rA[0].%s, rA[0].%s, rA[1].%s, rA[1].%s, rA[1].%s, rA[1].%s, rB[0].%s, rB[0].%s, rB[0].%s, rB[0].%s, rB[1].%s, rB[1].%s, rB[1].%s, rB[1].%s); %s" % ( \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[2], self.vectorComponents[3], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[2], self.vectorComponents[3], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[2], self.vectorComponents[3], \
self.vectorComponents[0], self.vectorComponents[1], \
self.vectorComponents[2], self.vectorComponents[3], \
self.endLinePP)
"""
for b in range(0, kernel["ThreadTileB"]):
for a in range(0, kernel["ThreadTileA"]):
strC = "rC[%d+%d*TT%s]" % (a, b, self.tileChar0 )
strA = "rA[%d%s]" % (a, ("+TT%s"%self.tileCharA) if m>0 else "")
strB = "rB[%d%s]" % (b, ("+TT%s"%self.tileCharB) if m>0 else "")
if ((kernel["ThreadTileA"] % 2 == 0) and (kernel["ProblemType"]["DataType"].isHalf())):
if a % 2 == 0:
kStr += " TYPE_MAC(%s,%s,%s , " % (strA, strB, strC)
else:
kStr += "%s,%s,%s); %s" % (strA, strB, strC, self.endLinePP)
else:
kStr += " TYPE_MAC(%s,%s,%s); %s" % (strA, strB, strC, \
self.endLinePP)
if kernel["UnrollMemFence"]:
kStr += " " + self.fenceStr
kStr += self.endLine
"""
for b in range(0, kernel["ThreadTileB"]):
for a in range(0, kernel["ThreadTileA"]):
# a
vecA = a / kernel["VectorWidth"]
elemA = a % kernel["VectorWidth"]
strA = "rA[%d%s]" % (vecA, ("+TT%s/VECTOR_WIDTH"%self.tileCharA) \
if m>0 else "")
if kernel["VectorWidth"] > 1:
strA += ".%s" % self.vectorComponents[elemA]
# b
vecB = b / kernel["VectorWidth"]
elemB = b % kernel["VectorWidth"]
strB = "rB[%d%s]" % (vecB, ("+TT%s/VECTOR_WIDTH"%self.tileCharB) \
if m>0 else "")
if kernel["VectorWidth"] > 1:
strB += ".%s" % self.vectorComponents[elemB]
# c
strC = "rC[%d+%d*TT%s/VECTOR_WIDTH]" % (vecA, b, self.tileChar0 )
elemC = elemA
if kernel["VectorWidth"] > 1:
strC += ".%s" % self.vectorComponents[elemC]
#kStr += " printf(\\\"T[%%u,%u,%u]: %s:%%.0f += %s:%%.0f * %s:%%.0f\\\\n\\\", serial, %s, %s, %s); %s" % (a, b, strC, strA, strB, strC, strA, strB, self.endLinePP)
kStr += " TYPE_MAC(%s,%s,%s); %s" % (strA, strB, strC, \
self.endLinePP)
if kernel["UnrollMemFence"]:
kStr += " " + self.fenceStr
kStr += self.endLine
"""
####################################
# preprocessor definitions of kernel arguments
firstStride = 0
if kernel["ProblemType"]["UseInitialStridesCD"]:
# no strides #defined
lastStrideD = 0
lastStrideC = 0
else:
# #define initial stride
kStr += "/* hard-coded initial strides CD*/%s" \
% self.endLine
lastStrideD = 1
lastStrideC = 1
if kernel["ProblemType"]["UseInitialStridesAB"]:
lastStrideA = 0
lastStrideB = 0
else:
kStr += "/* hard-coded initial strides AB */%s" \
% self.endLine
lastStrideA = 1
lastStrideB = 1
for i in range(firstStride, lastStrideD):
kStr += "#define strideD" + self.indexChars[i] + " 1" + self.endLine
for i in range(firstStride, lastStrideC):
kStr += "#define strideC" + self.indexChars[i] + " 1" + self.endLine
for i in range(firstStride, lastStrideA):
kStr += "#define strideA" \
+ self.indexChars[kernel["ProblemType"]["IndexAssignmentsA"][i]] \
+ " 1" + self.endLine
for i in range(firstStride, lastStrideB):
kStr += "#define strideB" \
+ self.indexChars[kernel["ProblemType"]["IndexAssignmentsB"][i]] \
+ " 1" + self.endLine
return kStr
##############################################################################
# Function Signature Prefix
##############################################################################
def functionSignaturePrefix(self, kernel):
s = ""
if self.language == "HIP":
s += "#pragma clang diagnostic push" + self.endLine
s += "#pragma clang diagnostic ignored \"-Wunused-parameter\"" + self.endLine
return s
##############################################################################
# Function Signature
##############################################################################
def functionSignature(self, kernel ):
kernelName = self.getKernelName(kernel)
problemType = kernel["ProblemType"]
# determine chars for fast access
self.indexChars = []
for i in range(0, len(globalParameters["IndexChars"])):
self.indexChars.append(globalParameters["IndexChars"][i])
self.indexChars[kernel["ProblemType"]["Index0"]] \
= "0" + self.indexChars[kernel["ProblemType"]["Index0"]]
self.indexChars[kernel["ProblemType"]["Index1"]] \
= "1" + self.indexChars[kernel["ProblemType"]["Index1"]]
self.tileChar0 = self.indexChars[kernel["ProblemType"]["Index0"]]
self.tileChar1 = self.indexChars[kernel["ProblemType"]["Index1"]]
s = ""
# kernel name
if self.language == "OCL":
s += "__attribute__((reqd_work_group_size(NUM_THREADS,1,1)))"
s += self.endLine
s += "__kernel "
else:
s += "extern \"C\"\n"
s += "__global__ "
s += "void %s" % ( kernelName )
s += "(" + self.endLine
# pointers
globalStr = "__global "
if self.language == "HIP":
#s += " hipLaunchParm lp," + self.endLine
globalStr = ""
restrictStr = "restrict"
if self.language == "HIP":
restrictStr = "__restrict__"
ptrStr = kernel["ProblemType"]["DestDataType"].toDevice(self.language)
s += " " + globalStr + ptrStr \
+ " *D,"
s += self.endLine
ptrStr = kernel["ProblemType"]["DestDataType"].toDevice(self.language)
s += " " + globalStr + ptrStr \
+ " const * " + restrictStr + " C,"
s += self.endLine
ptrStr = kernel["ProblemType"]["DataType"].toDevice(self.language)
s += " " + globalStr + ptrStr \
+ " const * " + restrictStr + " A,"
s += self.endLine
s += " " + globalStr + ptrStr \
+ " const * " + restrictStr + " B"
# alpha & beta
s += "," + self.endLine + " " \
+ kernel["ProblemType"]["ComputeDataType"].toDevice(self.language) + " const alpha"
if kernel["ProblemType"]["UseBeta"]:
s += "," + self.endLine + " " \
+ kernel["ProblemType"]["ComputeDataType"].toDevice(self.language) + " const beta"
# strides
firstStrideAB = firstStrideCD = 1
if kernel["ProblemType"]["UseInitialStridesAB"]:
firstStrideAB = 0
if kernel["ProblemType"]["UseInitialStridesCD"]:
firstStrideCD = 0
lastStrideD = kernel["ProblemType"]["NumIndicesC"]
lastStrideC = kernel["ProblemType"]["NumIndicesC"]
lastStrideA = len(kernel["ProblemType"]["IndexAssignmentsA"])
lastStrideB = len(kernel["ProblemType"]["IndexAssignmentsB"])
for i in range(firstStrideCD, lastStrideD):
s += "," + self.endLine + " unsigned int const strideD" + self.indexChars[i]
for i in range(firstStrideCD, lastStrideC):
s += "," + self.endLine + " unsigned int const strideC" + self.indexChars[i]
for i in range(firstStrideAB, lastStrideA):
s += "," + self.endLine + " unsigned int const strideA" \
+ self.indexChars[kernel["ProblemType"]["IndexAssignmentsA"][i]]
for i in range(firstStrideAB, lastStrideB):
s += "," + self.endLine + " unsigned int const strideB" \
+ self.indexChars[kernel["ProblemType"]["IndexAssignmentsB"][i]]
# sizes
for i in range(0, kernel["ProblemType"]["TotalIndices"]):
s += "," + self.endLine + " unsigned int const size" + self.indexChars[i]
for idxChar in self.magicSumChars:
s += ",%s unsigned magicNumberNumIter%s /*PSD*/" % (self.endLine, idxChar)
s += ",%s unsigned magicShiftNumIter%s /*PSD*/" % (self.endLine, idxChar)
if kernel["GlobalSplitU"]>1 and idxChar==self.unrollChar:
s += ",%s unsigned magicNumberNumIter%s_GsuRemainder /*PSD */" % (self.endLine, idxChar)
s += ",%s unsigned magicShiftNumIter%s_GsuRemainder /*PSD */" % (self.endLine, idxChar)
for idxChar in self.magicNonSumChars:
s += ",%s unsigned magicNumberSize%s" % (self.endLine, idxChar)
s += ",%s unsigned magicShiftSize%s" % (self.endLine, idxChar)
for idx in problemType["IndicesSummation"]:
for tc in ('A','B'):
for zp in kernel["ProblemType"]["ZeroPad%s"%tc]:
(freeDim, sumDim) = zp[:2]
freeDimChar = globalParameters["IndexChars"][freeDim]
sumChar = self.indexChars[sumDim]
if sumDim == idx:
s += ",%s int padStart%s%s%s" % (self.endLine, tc, freeDimChar, sumChar)
s += ",%s int padEnd%s%s%s" % (self.endLine, tc, freeDimChar, sumChar)
s += "," + self.endLine + " unsigned int staggerUIterParm"
# kernel["PersistentKernel"]:
s += "," + self.endLine + " unsigned int problemNumGroupTiles0"
s += "," + self.endLine + " unsigned int problemNumGroupTiles1"
s += "," + self.endLine + " unsigned int magicNumberProblemNumGroupTiles0"
s += " )"
return s
##############################################################################
# Function Signature Suffix
##############################################################################
def functionSignatureSuffix(self, kernel):
s = ""
if self.language == "HIP":
s += self.endLine
s += "#pragma clang diagnostic pop" + self.endLine
return s
##############################################################################
# Function Begin
##############################################################################
def functionBegin(self, kernel):
s = ""
s += " {" + self.endLine
return s
##############################################################################
# Allocate Resources
##############################################################################
def allocateResources(self, kernel):
kStr = ""
kStr += " unsigned int serial = %s(0);%s" \
% (self.getLocalIdStr, self.endLine)
kStr += " unsigned int sgId = serial / (SG%s*SG%s);%s" \
% (self.tileChar0, self.tileChar1, self.endLine)
####################################
# zero
if kernel["ProblemType"]["DataType"].isHalf() \
and kernel["VectorWidth"] > 1 \
and (kernel["LoopTail"] or kernel["EdgeType"] == "Branch"):
kStr += "#define SCALAR_ZERO 0%s" % self.endLine
elif kernel["ProblemType"]["DestDataType"].isBFloat16():
kStr += "#define SCALAR_ZERO 0.0f%s" % self.endLine
else:
kStr += "#define SCALAR_ZERO %s%s" % ( kernel["ProblemType"][\
"DataType"].zeroString(self.language, 1), \
self.endLine )
# TODO - use a different value for OOB data
# Currently use zero since Tensile already has handy functions to create zero in different types
if kernel["ProblemType"]["HighPrecisionAccumulate"] and kernel["ProblemType"]["DataType"].isBFloat16():
kStr += "#define SCALAR_OOB_DATA static_cast<tensile_bfloat16>(0.0f)%s" % self.endLine
else:
kStr += "#define SCALAR_OOB_DATA SCALAR_ZERO%s" % self.endLine
kStr += " /* registers for MAC's */" + self.endLine
# TODO: change to kStr += " COMPUTE_DATA_TYPE rC[TT%s*TT%s];%s" \ % (self.tileChar0, self.tileChar1, self.endLine )
# with above there is no need for the if below
if kernel["ProblemType"]["HighPrecisionAccumulate"] and (kernel["ProblemType"]["DataType"].isHalf() or kernel["ProblemType"]["DataType"].isBFloat16()):
kStr += " float rC[TT%s*TT%s];%s" \
% (self.tileChar0, self.tileChar1, self.endLine )
else:
kStr += " DEST_DATA_TYPE rC[TT%s*TT%s];%s" \
% (self.tileChar0, self.tileChar1, self.endLine )
# registers for valuAB
kStr += " DATA_TYPE rA[TT%s%s];%s" \
% (self.tileChar0, ("*2" if kernel["PrefetchLocalRead"] else ""), \
self.endLine)
kStr += " DATA_TYPE rB[TT%s%s];%s" \
% (self.tileChar1, ("*2" if kernel["PrefetchLocalRead"] else ""), \
self.endLine)
####################################
# registers for global -> local load
kStr += self.endLine
kStr += " /* registers for global->local */%s" % self.endLine
for perp in range(0, kernel["NumLoadsPerpendicularA"]):
for sPerp in range(0, self.numReadsPerpVecCompA):
for para in range(0, kernel["NumLoadsCoalescedA"]):
for sPara in range(0, self.numReadsCoalVecCompA):
kStr += " DATA_TYPE a_%u_%u_%u_%u;%s" \
% (para, sPara, perp, sPerp, self.endLine)
for perp in range(0, kernel["NumLoadsPerpendicularB"]):
for sPerp in range(0, self.numReadsPerpVecCompB):
for para in range(0, kernel["NumLoadsCoalescedB"]):
for sPara in range(0, self.numReadsCoalVecCompB):
kStr += " DATA_TYPE b_%u_%u_%u_%u;%s" \
% (para, sPara, perp, sPerp, self.endLine)
"""
for perp in range(0, kernel["NumLoadsPerpendicularA"]):
for para in range(0, kernel["NumLoadsCoalescedA"]):
kStr += "a_" + str(para) + "_" + str(perp)
if para == kernel["NumLoadsCoalescedA"]-1 \
and perp == kernel["NumLoadsPerpendicularA"]-1:
kStr += ";" + self.endLine
else:
kStr += ", "
kStr += " VECTOR_TYPE "
for perp in range(0, kernel["NumLoadsPerpendicularB"]):
for para in range(0, kernel["NumLoadsCoalescedB"]):
kStr += "b_" + str(para) + "_" + str(perp)
if para == kernel["NumLoadsCoalescedB"]-1 \
and perp == kernel["NumLoadsPerpendicularB"]-1:
kStr += ";" + self.endLine
else:
kStr += ", "
"""
####################################
# allocate tensile_half2 memory
if kernel["ProblemType"]["DataType"].isHalf():
kStr += self.endLine
kStr += " /* allocate tensile_half2 memory */" + self.endLine
kStr += " tensile_half2 a_pk_fma;" + self.endLine
kStr += " tensile_half2 b_pk_fma;" + self.endLine
kStr += " tensile_half2 c_pk_fma;" + self.endLine
####################################
# allocate local memory
kStr += self.endLine
kStr += " /* allocate local memory */" + self.endLine
kStr += " %sDATA_TYPE localMemory[LDS_NUM_ELEMENTS];%s" \
% (self.sharedDeclStr, self.endLine )
if 0:
# in some cases we know the pad values at compile time and could hard-code here. Not enabled.
for tc in ('A', 'B'):
for zp in kernel["ProblemType"]["ZeroPad%s"%tc]:
(freeDim, sumDim, padStart, padEnd) = zp
freeDimChar = globalParameters["IndexChars"][freeDim]
sumChar = self.indexChars[sumDim]
kStr += self.endLine
kStr += " unsigned int padStart%s%s%s = %u;" % (tc, freeDimChar, sumChar, padStart) + self.endLine
kStr += " unsigned int padEnd%s%s%s = %u;" % (tc, freeDimChar, sumChar, padEnd) + self.endLine
self.magicSumChars = []
if kernel["PackSummationDims"]:
self.magicSumChars += [globalParameters["IndexChars"][c] for \
c in kernel["ProblemType"]["IndicesSummation"][1:]]
self.magicNonSumChars = kernel["PackedC0IdxChars"][:-1] + kernel["PackedC1IdxChars"][:-1]
if kernel["MagicDivAlg"] == 2:
kStr += "typedef struct MagicStruct {unsigned M; int a; int s;} MagicStruct;" + self.endLine
kStr += "const unsigned MAGIC_STRUCT_A = 0x80000000; // for extracting a-bit from shift kernarg" + self.endLine
kStr += "#define MAGIC_DIV2(dividend, magic) (((((uint64_t)(dividend) * magic.M) >> 32) + dividend*magic.a) >> magic.s)%s" % self.endLine
sumParms=[(idxChar, "magicStruct%s"%idxChar, "NumIter%s"%idxChar) for idxChar in self.magicSumChars]
if kernel["PackSummationDims"] and kernel["GlobalSplitU"] > 1 and sumParms:
sumParms.append([self.unrollChar, "magicStruct%s_GsuRemainder"%self.unrollChar, "NumIter%s_GsuRemainder" % self.unrollChar])
for (idxChar, magicStruct, parmName) in sumParms + [(idxChar, "magicStruct%s"%idxChar, "Size%s"%idxChar) for idxChar in self.magicNonSumChars]:
kStr += self.endLine
kStr += " MagicStruct %s;"%(magicStruct) + self.endLine
kStr += " %s.M = magicNumber%s;" % (magicStruct, parmName) + self.endLine
kStr += " %s.a = (magicShift%s & MAGIC_STRUCT_A) ? 1:0;" %(magicStruct, parmName) + self.endLine
kStr += " %s.s = magicShift%s & (~MAGIC_STRUCT_A);" %(magicStruct, parmName) + self.endLine
return kStr
##############################################################################
# Open Persistent Loop
# init iteration counter, define loop target
##############################################################################
def openPersistentLoop(self, kernel):
kStr = ""
if kernel["PersistentKernel"]:
wg0 = "wg%s" % self.tileChar0
wg1 = "wg%s" % self.tileChar1
kStr += " %s serialWgIter = %s(0);%s" \
% (self.uint64Str, self.getGroupIdStr, self.endLine)
kStr += " unsigned int n%s = problemNumGroupTiles0;%s" \
% ( wg0, self.endLine)
kStr += " unsigned int n%s = problemNumGroupTiles1;%s" \
% ( wg1, self.endLine)
kStr += " unsigned int %s;%s" % ( wg0, self.endLine)
kStr += " unsigned int %s;%s" % ( wg1, self.endLine)
#kStr += "if (serial==0) printf(\"WG%%u_%%u probWG:%%u_%%u %s\", hc_get_group_id(0), hc_get_group_id(1), %s, %s);" % (self.endLinePP, wg0, wg1)+ self.endLine
kStr += "%swhile (1) { // persistent loop %s" % (self.endLine, self.endLine)
kStr += " %s = serialWgIter %% problemNumGroupTiles0;%s" \
% ( wg0, self.endLine)
kStr += " %s = serialWgIter / problemNumGroupTiles0;%s" \
% ( wg1, self.endLine)
return kStr
##############################################################################
# Global Read Addresses: Work-Group
##############################################################################
def graWorkGroup(self, kernel, isPap):
kStr = ""
wg0 = "wg%s" % self.tileChar0
wg1 = "wg%s" % self.tileChar1
nwgg = kernel["WorkGroupMapping"] >= 0
n0 = 0 if nwgg else 1
n1 = 1 if nwgg else 0
if kernel["PersistentKernel"]:
kStr += " %s = serialWgIter %% problemNumGroupTiles0;%s" \
% ( wg0, self.endLine)
kStr += " %s = serialWgIter / problemNumGroupTiles0;%s" \
% ( wg1, self.endLine)
else:
# optionally transpose work-group grid
kStr += " unsigned int %s = %s(%u);%s" \
% ( wg0, self.getGroupIdStr, n0, self.endLine)
kStr += " unsigned int %s = %s(%u);%s" \
% ( wg1, self.getGroupIdStr, n1, self.endLine)
kStr += " unsigned int n%s = %s(%u);%s" \
% ( wg0, self.getNumGroupsStr, n0, self.endLine)
kStr += " unsigned int n%s = %s(%u);%s" \
% ( wg1, self.getNumGroupsStr, n1, self.endLine)
if kernel["GlobalSplitU"] > 1:
kStr += " n%s /= GLOBAL_SPLITU;%s" % (wg1, self.endLine)
# split up work-group grid
if kernel["GlobalSplitU"] > 1:
kStr += " unsigned int gsuSumIdx;%s" % self.endLine
if kernel["GlobalSplitUWorkGroupMappingRoundRobin"]:
kStr += " gsuSumIdx = %s / n%s;%s" \
% (wg1, wg1, self.endLine)
kStr += " %s = %s %% n%s;%s" \
% (wg1, wg1, wg1, self.endLine)
else:
kStr += " gsuSumIdx = %s %% GLOBAL_SPLITU;%s" \
% (wg1, self.endLine)
kStr += " %s = %s / GLOBAL_SPLITU;%s" \
% (wg1, wg1, self.endLine)
########################################
# Blocked rows or columns
if kernel["WorkGroupMappingType"] == "B" and abs(kernel["WorkGroupMapping"]) > 1:
kStr += self.endLine
kStr += " %s wgSerial = %s + (%s %% WORK_GROUP_MAPPING) * n%s;// within block%s" \
% (self.uint64Str, wg0, wg1, wg0, self.endLine)
kStr += " unsigned int block = %s / WORK_GROUP_MAPPING;%s" \
% (wg1, self.endLine );
kStr += " unsigned int blockRemainder = (%s < n%s-(n%s %% WORK_GROUP_MAPPING) ) ? 0 : n%s %% WORK_GROUP_MAPPING;%s" % \
( wg1, wg1, wg1, wg1, self.endLine )
for blockRemainder in range(0, abs(kernel["WorkGroupMapping"])):
blockWidth = abs(kernel["WorkGroupMapping"]) if blockRemainder==0 else blockRemainder
if blockRemainder > 0:
kStr += " else "
else:
kStr += " "
if blockRemainder < abs(kernel["WorkGroupMapping"])-1:
kStr += "if ( blockRemainder == %u) " % (blockRemainder)
kStr += "{%s" % self.endLine
kStr += " %s = wgSerial / %u;%s" \
% (wg0, blockWidth, self.endLine)
kStr += " %s = wgSerial %% %u + block*WORK_GROUP_MAPPING;%s" \
% (wg1, blockWidth, self.endLine)
kStr += " }"
kStr += "%s" % self.endLine
########################################
# Generalized Z-Order
elif kernel["WorkGroupMappingType"] == "Z":
kStr += " unsigned int nwg0 = (size%s + MT%s - 1) / MT%s;%s" \
% (self.tileChar0, self.tileChar0, self.tileChar0, self.endLine)
kStr += " unsigned int nwg1 = (size%s + MT%s - 1) / MT%s;%s" \
% (self.tileChar1, self.tileChar1, self.tileChar1, self.endLine)
if abs(kernel["WorkGroupMapping"]) == 1: # Generalized Z-Order
kStr += " generalized_z_order(&%s, &%s, %s, %s, 0, nwg0, nwg1);%s" \
% ( wg0, wg1, wg0, wg1, self.endLine)
elif abs(kernel["WorkGroupMapping"]) == 2: # Z-Order round up and return early
kStr += " unsigned int wgSerial = %s + %s * n%s;%s" % (wg0, wg1, wg0 if nwgg else wg1, self.endLine)
kStr += " z_order(&%s, &%s, wgSerial);%s" % (wg0, wg1, self.endLine)
kStr += " if (%s >= nwg0 || %s >= nwg1) return; // wg mapped out of bounds after z-ordering%s" \
% (wg0, wg1, self.endLine)
else:
printExit("WorkGroupMappingType=Z and WorkGroupMapping=%u not supported"%kernel["WorkGroupMapping"])
#kStr += "if (serial==0) printf(\"WG:%%u_%%u progWG:%%u_%%u \\n\", hc_get_group_id(0), hc_get_group_id(1), %s, %s);" \
# % (wg0, wg1)+ self.endLine
if kernel["PersistentKernel"]:
# could compare serialWgIter against problem nwg0*nwg1?
kStr += " if ((%s >= n%s) || (%s >= n%s)) break; // persistent loop%s" \
% (wg1, wg1, wg0, wg0, self.endLine)
#kStr += "if (serial==0) printf(\"WG%%u_%%u probWG:%%u_%%u probNumWG=%%u_%%u\\n%s\", hc_get_group_id(0), hc_get_group_id(1), %s, %s, problemNumGroupTiles0, problemNumGroupTiles1);" % (self.endLinePP, wg0, wg1)+ self.endLine
return kStr
##############################################################################
# Global Read Addresses: Tile Assignment A/B
##############################################################################
def graTileAssignment(self, kernel, tP):
kStr = ""
kStr += " unsigned int globalReadOffset%s%s = (serial%s" \
% (tP["tensorChar"], tP["tileChar"], ("%" if tP["grcg"] == tP["tlu"] else "/") )
if tP["grcg"]:
kStr += (tP["lvc"] if tP["grcv"] else tP["lsc"])
else:
kStr += (tP["lsp"] if tP["grcv"] else tP["lvp"])
kStr += ")"
if tP["grcv"] == tP["tlu"]:
kStr += "*GLOBAL_LOAD_VECTOR_WIDTH_%s" % tP["tensorChar"]
kStr += " + ("
kStr += "wg%s" % (tP["tileChar"])
kStr += ")*MT%s;%s" % (tP["tileChar"], self.endLine)
return kStr
##############################################################################
# Global Read Addresses: Unroll Assignment A/B
##############################################################################
def graUnrollAssignment(self, kernel, tP):
kStr = ""
kStr += " unsigned int globalReadOffset%s%s = (serial%s" \
% (tP["tensorChar"], self.unrollChar, ("/" if tP["grcg"] == tP["tlu"] else "%") )
if tP["grcg"]:
kStr += (tP["lvc"] if tP["grcv"] else tP["lsc"])
else:
kStr += (tP["lsp"] if tP["grcv"] else tP["lvp"])
kStr += ")"
if tP["grcv"] != tP["tlu"]:
kStr += "*GLOBAL_LOAD_VECTOR_WIDTH_%s"% tP["tensorChar"]
if kernel["GlobalSplitU"] > 1:
if kernel["GlobalSplitUSummationAssignmentRoundRobin"]:
kStr += " + LOCAL_DEPTHU*"
else:
kStr += " + (size%s/GLOBAL_SPLITU)*" % self.unrollChar
kStr += "gsuSumIdx"
kStr += ";%s" % self.endLine
return kStr
##############################################################################
# Global Read Addresses: Other Free Assignments
##############################################################################
def graOtherFreeAssignments(self, kernel):
kStr = ""
# packed free dims don't use 'wg' level vars for dims
nonTileFreeIndices = list(range(0, kernel["ProblemType"]["NumIndicesC"]))
nonTileFreeIndices.remove(kernel["ProblemType"]["Index0"])
nonTileFreeIndices.remove(kernel["ProblemType"]["Index1"])
for i in range(0, len(nonTileFreeIndices)):
index = nonTileFreeIndices[i]
if isPackedIndex(kernel, index):
continue
kStr += " unsigned int wg" + self.indexChars[index] \
+ " = ( " + self.getGroupIdStr + "(2)"
for j in reversed(list(range( i+1, len(nonTileFreeIndices)))):
index2 = nonTileFreeIndices[j]
kStr += " / size" + self.indexChars[index2]
kStr += " ) % size" + self.indexChars[index] + ";" + self.endLine
return kStr
##############################################################################
# Global Read Addresses: Other Summation Assignments
##############################################################################
def graOtherSummationAssignments(self, kernel):
kStr = ""
for i in range(self.otherSummations):
index = i + kernel["ProblemType"]["NumIndicesC"]
kStr += "#define globalReadOffsetA%s 0%s" \
% (self.indexChars[index], self.endLine)
kStr += "#define globalReadOffsetB%s 0%s" \
% (self.indexChars[index], self.endLine)
return kStr
##############################################################################
# Global Read Addresses: Tile Offsets A/B
##############################################################################
def graTileOffsets(self, kernel, tP):
kStr = ""
tc = tP["tensorChar"]
for l in range(0, tP["nrt"]):
for s in range(0, 1 if tP["rc"] else tP["nrtv"]):
flattenedOffset = "flattenedOffset%s_%u_%u"%(tc,l,s)
gro = "globalReadOffset%s%s_%u_%u" % (tc, tP["tileChar"], l, s)
kStr += " unsigned int %s = globalReadOffset%s%s + %u + %d*%s;%s" \
% (flattenedOffset, tc, tP["tileChar"], s, l, \
(tP["lsc"] if tP["tlu"] else tP["lsp"]), \
self.endLine)
# clip to edge if the flattened offset is OOB:
tP["packedSizeList"] = ["size%s"%self.indexChars[idx] for idx in kernel["PackedC%dIndicesX"%tP["tensorIdx"]]]
sizeStr = " * ".join(tP["packedSizeList"])
kStr += " %s = (%s > (%s-1)) ? (%s-1):%s;%s" \
% (flattenedOffset, flattenedOffset, sizeStr, sizeStr, flattenedOffset, self.endLine)
# Create additional vector address components for any packed dimensions
lastGro = flattenedOffset
firstPrintedIdx=1
lastIdx = -1
for idx in kernel["ProblemType"]["IndexAssignments%s"%tc]:
if idx < kernel["ProblemType"]["NumIndicesC"] and isPackedIndex(kernel, idx, tP["PackBatchDims"]):
gro = "globalReadOffset%s%s_%u_%u" % (tc, self.indexChars[idx], l, s)
# unpacked batch dims do not to declare a GRO ; they use WG
# packed batch dims and free dims do need a GRO defined here, and may need to 'unpack'
# process in order of index assignments for A/B.
if firstPrintedIdx:
# no unpacking from prev needed:
firstPrintedIdx = 0
kStr += " unsigned int %s = %s;%s" % (gro, flattenedOffset, self.endLine)
#kStr += "printf(\"gro: serial:%%u wg0:%%u wg1:%%u %s:%%u\\n\", serial, wg0I, wg1J, %s);%s" % (gro, gro, self.endLine)
else:
# if another free dim or a packed batch dim
if kernel["MagicDivAlg"]:
c = globalParameters["IndexChars"][lastIdx]
if kernel["MagicDivAlg"]==1:
kStr += " unsigned int %s = MAGIC_DIV1(%s, magicNumberSize%s, magicShiftSize%s);%s" \
% (gro, lastGro, c, c, self.endLine)
elif kernel["MagicDivAlg"]==2:
kStr += " unsigned int %s = MAGIC_DIV2(%s, magicStruct%s);%s" \
% (gro, lastGro, c, self.endLine)
kStr += " %s -= (%s*size%s);%s" \
% (lastGro, gro, self.indexChars[lastIdx], self.endLine)
else:
kStr += " unsigned int %s = %s / size%s; // extract packed index%s" \
% (gro, lastGro, self.indexChars[lastIdx], self.endLine)
kStr += " %s %%= size%s;%s" % (lastGro, self.indexChars[lastIdx], self.endLine)
lastGro = gro
lastIdx = idx
if 0 and tP["isA"]:
kStr += "printf(\"gro-0: serial:%%u wg0:%%u wg1:%%u globalReadOffsetA0I_0_0:%%u\\n\", serial, wg0I, wg1J, globalReadOffsetA0I_0_0);%s" \
% (self.endLine)
if 0 and tP["isB"]:
kStr += "printf(\"gro-0: serial:%%u wg0:%%u wg1:%%u globalReadOffsetA0J_0_0:%%u\\n\", serial, wg0I, wg1J, globalReadOffsetA0J_0_0);%s" \
% (self.endLine)
if 0 and tP["isB"]:
kStr += "printf(\"gro-1: serial:%%u wg0:%%u wg1:%%u globalReadOffsetA0I_0_0:%%u globalReadOffsetB1J_0_0:%%u\\n\", serial, wg0I, wg1J, globalReadOffsetA0I_0_0, globalReadOffsetB1J_0_0);%s" \
% (self.endLine)
return kStr
##############################################################################
# Global Read Addresses: Unroll Offsets A/B
##############################################################################
def graUnrollOffsets(self, kernel, tP):
kStr = ""
for l in range(0, tP["nru"]):
for s in range(0, 1 if tP["rc"] else kernel["VectorWidth"]):
kStr += " unsigned int globalReadOffset%s%s_%u_%u = globalReadOffset%s%s + %u + %d*%s;%s" \
% (tP["tensorChar"], self.unrollChar, l, s, \
tP["tensorChar"], self.unrollChar, s, l, \
(tP["lsp"] if tP["tlu"] else tP["lsc"]), \
self.endLine)
#else:
# kStr += " unsigned int globalReadOffset%s%s_%u = globalReadOffset%s%s + %d*%s;%s" \
# % (tP["tensorChar"], self.unrollChar, l, tP["tensorChar"], self.unrollChar, l, \
# (tP["lsp"] if tP["tlu"] else tP["lsc"]), \
# self.endLine)
return kStr
##############################################################################
# Global Read Addresses: Branch A/B - TODO
##############################################################################
def graBranch(self, kernel, tP):
kStr = ""
for l in range(0, tP["nrt"]):
gro = "(globalReadOffset%s%s_%u_0%s)" \
% (tP["tensorChar"], tP["tileChar"], l, \
(" + (VECTOR_WIDTH-1)" if tP["rtc"] else "") )
limit = "size%s" % (tP["tileChar"])
kStr += " bool inBounds%s_%u = %s < %s;%s" \
% (tP["tensorChar"], l, gro, limit, self.endLine)
return kStr
##############################################################################
# Global Read Addresses: Shift A/B
##############################################################################
def graShift(self, kernel, tP):
kStr = ""
for l in range(0, tP["nrt"]):
for s in range(0, 1 if tP["rc"] else tP["nrtv"]):
#gro = "globalReadOffset%s%s_%u_%u" \
# % (tP["tensorChar"], tP["tileChar"], l, s )
#limit = "(size%s-GLOBAL_LOAD_VECTOR_WIDTH_%s)" % (tP["tileChar"], tP["tensorChar"] )
#kStr += " %s = (%s > %s) ? %s+%u : %s;%s" \
# % (gro, gro, limit, limit, s, gro, self.endLine)
kStr += " globalReadOffset%s%s_%u_%u" \
% (tP["tensorChar"], tP["tileChar"], l, s )
kStr += " = ("
kStr += " globalReadOffset%s%s_%u_%u" \
% (tP["tensorChar"], tP["tileChar"], l, s )
kStr += " > "
kStr += "size%s-%s" % (tP["tileChar"], "GLOBAL_LOAD_VECTOR_WIDTH_%s+%u"%(tP["tensorChar"], s) if tP["rtv"] else "1")
kStr += ") ? "
kStr += "size%s-%s" % (tP["tileChar"], "GLOBAL_LOAD_VECTOR_WIDTH_%s+%u"%(tP["tensorChar"], s) if tP["rtv"] else "1")
kStr += " : "
kStr += "globalReadOffset%s%s_%u_%u" \
% (tP["tensorChar"], tP["tileChar"], l, s )
kStr += ";%s" % self.endLine
return kStr
##############################################################################
# Global Read Addresses: Final Offsets A/B
##############################################################################
def graFinalOffsets(self, kernel, tP):
kStr = ""
tc = tP["tensorChar"]
problemType = kernel["ProblemType"]
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nrpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1 if tP["rc"] else tP["nrcv"]):
# Pass parms to GLOBAL_OFFSET_ macro:
gro = "globalReadOffset%s_%u_%u_%u_%u" \
% (tP["tensorChar"], para, sPara, perp, sPerp)
kStr += " %s %s = GLOBAL_OFFSET_%s( " \
% (self.int64Str, gro, tP["tensorChar"])
for i in range(0, len(tP["ia"])):
index = tP["ia"][i]
if index < kernel["ProblemType"]["NumIndicesC"]:
if index == tP["tileIdx"]:
kStr += "(globalReadOffset%s%s_%u_%u)" \
% (tP["tensorChar"], tP["tileChar"], \
(para if tP["tlu"] else perp), \
(sPara if tP["tlu"] else sPerp) )
else:
if isPackedIndex(kernel, index):
# pass vector per-tensor-dim offset (rather than scalar wg)
if index in problemType["IndicesBatch"] and not tP["PackBatchDims"]:
# pass 0, this is is the non-packed batch dim and must be 0
kStr += "0"
else:
kStr += "(globalReadOffset%s%s_%u_%u)" \
% (tc, \
self.indexChars[index],
(para if tP["tlu"] else perp), \
(sPara if tP["tlu"] else sPerp) )
else:
# just a non-vector group index
kStr += "wg" + self.indexChars[index]
else: # summation index
if index == kernel["ProblemType"]["IndexUnroll"]:
kStr += "(globalReadOffset%s%s_%u_%u)" \
% (tP["tensorChar"], self.unrollChar, \
(perp if tP["tlu"] else para), \
(sPerp if tP["tlu"] else sPara) )
else:
kStr += "(globalReadOffset%s%s)" \
% (tP["tensorChar"], self.indexChars[index])
if i < len(tP["ia"])-1:
kStr += ", "
kStr += " );%s" % self.endLine
for zp in kernel["ProblemType"]["ZeroPad%s"%tc]:
# subtract pad - this both helps efficiently detect OOB on the summation start and also
# corrects the valid offsets for the start pad.
(freeDim,sumDim) = zp[:2]
freeDimChar = globalParameters["IndexChars"][freeDim]
freeDimChar2 = self.indexChars[freeDim]
sumChar = self.indexChars[sumDim]
kStr += self.indent + gro + " -= padStart%s%s%s;"%(tc,freeDimChar, sumChar) + self.endLine
freeOffset = "globalReadOffset%s%s_%u_%u" \
% (tc, freeDimChar2, \
(para if tP["tlu"] else perp), \
(sPara if tP["tlu"] else sPerp) )
if sumDim == kernel["ProblemType"]["IndexUnroll"]:
sumOffset = "globalReadOffset%s%s_%u_%u" \
% (tc, sumChar,
(perp if tP["tlu"] else para), \
(sPerp if tP["tlu"] else sPara) )
else:
sumOffset = "globalReadOffset%s%s" % (tc, sumChar)
kStr += self.indent + \
"unsigned" + " " +\
gro + "_ZP%s%s = %s*stride%s%s + %s*stride%s%s - padStart%s%s%s;" \
% (freeDimChar, sumChar,
freeOffset, tc,freeDimChar2, sumOffset, tc, sumChar, \
tc, freeDimChar, sumChar) + \
self.endLine
if 0 and tP["isA"]:
kStr += "printf(%sgid0=%%u %s=%%lu%s, %s(0), %s);" \
% (self.quote, gro, self.endLineQuote, \
self.getGlobalIdStr, gro) + self.endLine
return kStr
##############################################################################
# Global Read Addresses: Addresses A/B
##############################################################################
def graAddresses(self, kernel, tP):
kStr = ""
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nrpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1 if tP["rc"] else tP["nrcv"]):
kStr += " %sDATA_TYPE const *globalRead%s_%u_%u_%u_%u = %s + globalReadOffset%s_%u_%u_%u_%u;%s" \
% (self.globalPtrStr, tP["tensorChar"], \
para, sPara, perp, sPerp, \
tP["tensorChar"], tP["tensorChar"], \
para, sPara, perp, sPerp, \
self.endLine)
#else:
# kStr += " %sVECTOR_TYPE const *globalRead%s_%u_%u = (%sVECTOR_TYPE const *)(%s + globalReadOffset%s_%u_%u);%s" \
# % (self.globalPtrStr, tP["tensorChar"], para, perp, self.globalPtrStr, \
# tP["tensorChar"], tP["tensorChar"], para, perp, self.endLine)
return kStr
##############################################################################
# Global Read Addresses: Increments A/B
##############################################################################
def graIncrements(self, kernel, loopIdx, tP):
kStr = ""
tc = tP["tensorChar"]
loopChar = self.indexChars[ \
kernel["ProblemType"]["IndicesSummation"][loopIdx]]
declStr = "%s%s globalReadInc%s%s = (%s)stride%s%s" \
% (self.indent, self.int64Str, tc, loopChar, \
self.int64Str, tc, loopChar)
if loopIdx==self.unrollIdx:
kStr += declStr
if not kernel["PackSummationDims"]:
# PSD recomputes load address using globalReadIncrementFromBase and includes LOCAL_DEPTHU multiple
#- don't include it here
kStr += "*LOCAL_DEPTHU"
if kernel["GlobalSplitU"] > 1 \
and kernel["GlobalSplitUSummationAssignmentRoundRobin"]:
kStr += "*GLOBAL_SPLITU"
else:
if kernel["PackSummationDims"]:
# Skip the subtract of previous iteration since PSD compute load address using globalReadIncrementFromBase
kStr += declStr
else:
# For Source kernel the address moves during the unroll loop
# but not during the tail loop - so higher-order summations
# need to only subtract the increments performed in the unroll
# loop (truncate the iterations that are handled in tail loop).
tmpChar = self.indexChars[kernel["ProblemType"]["IndicesSummation"][loopIdx+1]]
if loopIdx+1 == self.unrollIdx:
# special case needs to adjust (subtract) address incs made during unroll loop
if kernel["GlobalSplitU"] > 1:
numIter = "incNumIter%s_%s" % (self.unrollChar, tc)
kStr += self.indent + "unsigned int %s = size%s/LOCAL_DEPTHU;" \
% (numIter, tmpChar) + self.endLine
kStr += self.calculateLoopNumIterGsu(kernel, numIter, numIter, hidden=True)
numIter += "*GLOBAL_SPLITU"
else:
numIter = "size%s/LOCAL_DEPTHU" % tmpChar
kStr += declStr
kStr += " - stride%s%s*(" % (tc, tmpChar) + numIter + ")*LOCAL_DEPTHU"
else:
# other summation that does not immediately wrap the unroll inc:
kStr += declStr
kStr += " - stride%s%s*(size%s)" % (tc, tmpChar, tmpChar)
kStr += ";" + self.endLine
return kStr
##############################################################################
# Local Write Addresses: Tile Assignment A/B
##############################################################################
def lwaTileAssignment(self, kernel, tP):
kStr = self.comment("local write addresses: tile assignment %s"%tP["tensorChar"])
kStr += " unsigned int lw%s%s = (serial%s" \
% (tP["tensorChar"], tP["tileChar"], ("%" if tP["grcg"] \
== tP["tlu"] else "/") )
if tP["grcg"]:
kStr += (tP["lvc"] if tP["grcv"] else tP["lsc"])
else:
kStr += (tP["lsp"] if tP["grcv"] else tP["lvp"])
kStr += ")";
if tP["grcv"] == tP["tlu"]:
kStr += "*GLOBAL_LOAD_VECTOR_WIDTH_%s" % tP["tensorChar"]
kStr += ";%s" % self.endLine
return kStr
##############################################################################
# Local Write Addresses: Unroll Assignment A/B
##############################################################################
def lwaUnrollAssignment(self, kernel, tP):
kStr = self.comment("local write addresses: unroll assignment %s"%tP["tensorChar"])
kStr += " unsigned int lw%s%s = (serial%s" \
% (tP["tensorChar"], self.unrollChar, ("/" if tP["grcg"] \
== tP["tlu"] else "%") )
if tP["grcg"]:
kStr += (tP["lvc"] if tP["grcv"] else tP["lsc"])
else:
kStr += (tP["lsp"] if tP["grcv"] else tP["lvp"])
kStr += ")";
if tP["grcv"] != tP["tlu"]:
kStr += "*GLOBAL_LOAD_VECTOR_WIDTH_%s" % tP["tensorChar"]
kStr += ";%s" % self.endLine
return kStr
##############################################################################
# Local Write Addresses: First Offset A/B
##############################################################################
def lwaFirstOffset(self, kernel, tP):
kStr = ""
kStr += " unsigned int localWriteFirstOffset%s = lw%s%s + lw%s%s*(MT%s+PAD)%s;%s" \
% (tP["tensorChar"], tP["tensorChar"], tP["tileChar"], \
tP["tensorChar"], self.unrollChar, tP["tileChar"], \
" + LDS_OFFSET_B" if tP["isB"] else "", self.endLine)
return kStr
##############################################################################
# Local Write Addresses: Final Offsets A/B
##############################################################################
def lwaFinalOffsets(self, kernel, tP):
kStr = self.comment("local write addresses: final offsets %s" % tP["tensorChar"])
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nwpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1): # tP["nwcv"]):
kStr += " unsigned int localWriteOffset%s_%u_%u_%u_%u = localWriteFirstOffset%s + (%u + %d*%s)" \
% (tP["tensorChar"], para, sPara, perp, sPerp, \
tP["tensorChar"], sPara if tP["tlu"] else sPerp, para, \
(tP["lsc"] if not tP["tlu"] else tP["lsc"]) )
if not tP["tlu"]:
kStr += "*(MT%s+PAD)" % (tP["tileChar"])
kStr += " + (%u + %d*%s)" % (
sPerp if tP["tlu"] else sPara, perp, \
(tP["lsp"] if tP["tlu"] else tP["lsp"]) )
if tP["tlu"]:
kStr += "*(MT%s+PAD)" % (tP["tileChar"])
kStr += ";%s" % self.endLine
return kStr
##############################################################################
# Local Write Addresses: Declare Addresses A/B
##############################################################################
def lwaDeclareAddresses(self, kernel, tP):
kStr = self.comment("local write addresses: declare addresses %s" % tP["tensorChar"])
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nwpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1): # tP["nwcv"]):
kStr += " %sDATA_TYPE *localWrite%s_%u_%u_%u_%u;%s"\
% (self.sharedPtrStr, tP["tensorChar"], \
para, sPara, perp, sPerp, self.endLine )
return kStr
##############################################################################
# Local Read Addresses: Tile Assignment A
##############################################################################
def lraTileAssignmentA(self, kernel, tP):
kStr = ""
kStr += " unsigned int lr%s = (serial %% SG%s);%s" \
% (tP["tileChar"], self.tileChar0, self.endLine)
return kStr
##############################################################################
# Local Read Addresses: Tile Assignment B
##############################################################################
def lraTileAssignmentB(self, kernel, tP):
kStr = ""
kStr += " unsigned int lr%s = (serial / SG%s) %% SG%s;%s" \
% (tP["tileChar"], self.tileChar0, self.tileChar1, self.endLine)
return kStr
##############################################################################
# Local Read Addresses: Final Offset A
##############################################################################
def lraFinalOffset(self, kernel, tP):
kStr = ""
kStr += " unsigned int localReadOffset%s = lr%s*VECTOR_WIDTH + sgId*(MT%s+PAD)%s;%s" \
% ( tP["tensorChar"], tP["tileChar"], tP["tileChar"], \
" + LDS_OFFSET_B" if tP["isB"] else "", self.endLine)
return kStr
##############################################################################
# Local Read Addresses: Declare Addresses A/B
##############################################################################
def lraDeclareAddresses(self, kernel, tP):
kStr = ""
kStr += " %sDATA_TYPE *localRead%s;%s" % (self.sharedPtrStr, \
tP["tensorChar"], self.endLine)
return kStr
##############################################################################
# openShadowInit
##############################################################################
def openShadowInit(self, kernel):
return ""
##############################################################################
# closeShadowInit
##############################################################################
def closeShadowInit(self, kernel):
return ""
##############################################################################
# Initialize C
##############################################################################
def initC(self, kernel):
kStr = ""
# init rC, in pf this is called twice
kStr += self.endLine
for i in range(0, kernel["ThreadTile0"]*kernel["ThreadTile1"]):
kStr += " rC[%u] = SCALAR_ZERO;%s" % (i, self.endLine)
return kStr
##############################################################################
# Declare Loop Num Iterations
##############################################################################
def declareLoopNumIter(self, kernel):
kStr = ""
for loopIdx in kernel["ProblemType"]["IndicesSummation"]:
loopChar = self.indexChars[loopIdx]
kStr += "%sint numIter%s;%s" \
% (self.indent, loopChar, self.endLine)
return kStr
##############################################################################
# Declare stagger parms used for both A and B
# Input is the number of loop iterations
# Defines staggerUIter
# staggerUIter must be power-of-2 to simplify masking implementation
##############################################################################
def declareStaggerParms(self, kernel):
kStr = ""
loopChar = self.indexChars[ \
kernel["ProblemType"]["IndicesSummation"][self.unrollIdx]]
# Number of elements in U accessed by the unroll loop:
# Does not include elements accessed in tail loop
kStr += " const unsigned origNumIter = numIter%s;%s" % (loopChar, self.endLine)
if kernel["StaggerUMapping"] == 0:
staggerInput = ("wg%s" % self.tileChar0)
elif kernel["StaggerUMapping"] == 1:
staggerInput = "wg%s" % self.tileChar1
elif kernel["StaggerUMapping"] == 2:
staggerInput = "wg2"
elif kernel["StaggerUMapping"] == 3:
staggerInput = "wgSerial"
elif kernel["StaggerUMapping"] == 4:
staggerInput = "0xffffffff" # all WG compute same stagger, this is test mode
else:
assert(0) # unsupported
#kStr += "if (serial==0) printf(\"xWG:%u_%u progWG:%u_%u staggerUIterParm=%u\\n\", hc_get_group_id(0), hc_get_group_id(1), wg0I, wg1J, staggerUIterParm);" + self.endLine
kStr += " unsigned staggerUIter = (%s & staggerUIterParm);%s" % (staggerInput, self.endLine)
bpeAB = int(4*kernel["ProblemType"]["DataType"].numRegisters())
kStr += " staggerUIter = (staggerUIter << %u); // shift so each stagger has %u-byte stride%s" \
% (kernel["_staggerStrideShift"], \
(1<<kernel["_staggerStrideShift"])*kernel["DepthU"]*bpeAB, self.endLine)
#kStr += "if (serial==0) printf(\"WG:%u_%u progWG:%u_%u staggerUIter=%u\\n\", hc_get_group_id(0), hc_get_group_id(1), wg0I, wg1J, staggerUIter);" + self.endLine
#kStr += " staggerUIter = 0;\n"
if self.db["PrintStagger"]:
kStr += "if (%s(2)==0 && %s(1)==0 && %s(0) == 0)%s" % \
(self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, self.endLine)
kStr += " printf(%sStaggerOffset loop init: numIter=%%u, staggerUIter=%%u, globalReadIncAL=%%lu globalReadIncBL=%%lu %s,\
numIter%s, staggerUIter, globalReadIncAL, globalReadIncBL);%s" \
% (self.quote, self.endLineQuote, loopChar, self.endLine)
return kStr
##############################################################################
# Calculate and apply stagger offsets
#
# To help with cache and memory parking, offset the start position in the
# summation dimension so each group starts at a different K
##############################################################################
def calculateStagger(self, kernel, tP):
kStr = ""
tc = tP["tensorChar"]
loopIdx = self.unrollIdx
loopChar = self.indexChars[kernel["ProblemType"]["IndicesSummation"][loopIdx]]
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nrpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1 if tP["rc"] else tP["nrcv"]):
gr = "globalRead%s_%u_%u_%u_%u" \
% (tc, para, sPara, perp, sPerp)
ti= "globalReadOffset%s%s_%u_%u" \
% (tc, tP["tileChar"], \
(para if tP["tlu"] else perp), \
(sPara if tP["tlu"] else sPerp) )
kStr += " %s += (staggerUIter * globalReadInc%s%s); // apply stagger offset%s" \
% (gr, tc, loopChar, self.endLine)
if self.db["PrintStagger"]:
kStr += "if (%s(2)==0 && %s(1)==0 && %s(0) <= 16)%s" % \
(self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, self.endLine)
# typecasting to work around hcc printf bugs:
kStr += "printf(%sStaggerOffset init: gid=%%u.%%u.%%u, ti=0x%%x %s-%s=0x%%x%s, \
%s(2),%s(1),%s(0), %s, (unsigned)(size_t)(%s-%s));%s" \
% (self.quote,\
gr, tc,
self.endLineQuote,\
self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr,\
ti, gr, tc, \
self.endLine)
# if prefetching then wrap iteration needs to change since already prefetched
# some tiles before entering the unroll loop
kStr += self.endLine
if tP["isB"]:
kStr += " staggerUIter += %u; // add PrefetchGlobalRead%s" \
% (kernel["PrefetchGlobalRead"], self.endLine)
# StaggerUIter is now the loop iteration where we should wrap the offset back to 0
return kStr
##############################################################################
# Remove the stagger offset for the kernel
# (used in tail loop)
##############################################################################
def removeStagger(self, kernel, tP):
kStr = ""
tc = tP["tensorChar"]
loopIdx = self.unrollIdx
loopChar = self.indexChars[kernel["ProblemType"]["IndicesSummation"][loopIdx]]
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nrpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1 if tP["rc"] else tP["nrcv"]):
gr = "globalRead%s_%u_%u_%u_%u" \
% (tP["tensorChar"], para, sPara, perp, sPerp)
if self.staggerU:
kStr += " %s += ((origNumIter - (staggerUIter - %u)) * globalReadInc%s%s); // remove stagger offset%s" \
% (gr, kernel["PrefetchGlobalRead"], tc, loopChar, self.endLine)
if self.db["PrintStagger"]:
kStr += "if (%s(2)==0 && %s(1)==0 && %s(0) <= 8)%s" % \
(self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, self.endLine)
kStr += "printf(%sStaggerOffset remove: gid=%%u.%%u.%%u, origNumIter=%%u staggerUIter=%%u %s=%%p %s=%%p %s, \
%s(2),%s(1),%s(0), origNumIter, staggerUIter, %s, %s);%s" \
% (self.quote, \
tc, gr, \
self.endLineQuote, \
self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr,
tc, gr, \
self.endLine)
return kStr
##############################################################################
# Emit code to compute loop iterations for GSU.
# If the unroll summation size is not evenly divisible by GSU, then
# some of the CUs working on same output space may perform different
# numbers of unroll loop iterations. Account for that here.
# This is a separate function since the graInc for multiple summations
# needs to know the #loop iters as well, so this code allows the
# code to be replicated in multiple places.
##############################################################################
def calculateLoopNumIterGsu(self, kernel, srcIterVar, dstIterVar, hidden):
kStr = ""
if hidden:
kStr += self.indent + "{" + self.endLine
indent = self.indent + " "
else:
indent = self.indent
kStr += "%sunsigned int numIterMyWg = %s / GLOBAL_SPLITU;%s" \
% (indent, srcIterVar, self.endLine)
kStr += "%sunsigned int numIterPerWgRemainder = %s %% GLOBAL_SPLITU;%s" \
% (indent, srcIterVar, self.endLine)
kStr += "%sif (gsuSumIdx < numIterPerWgRemainder) {%s" \
% (indent, self.endLine)
kStr += indent + " numIterMyWg ++;" + self.endLine
kStr += "%s}%s" % (indent, self.endLine)
kStr += "%s%s = numIterMyWg;%s" \
% (indent, dstIterVar, self.endLine)
if hidden:
kStr += self.indent + "}" + self.endLine
return kStr
##############################################################################
# Calculate Loop Num Iterations
##############################################################################
def calculateLoopNumIter(self, kernel, loopIdx, isPap):
tailLoop = loopIdx < 0
if tailLoop:
loopIdx = self.unrollIdx
kStr = ""
problemType = kernel["ProblemType"]
loopDim = problemType["IndicesSummation"][loopIdx]
loopChar = self.indexChars[loopDim]
if tailLoop:
kStr += self.endLine + " /* Compute tail loop num iter */" + self.endLine
if kernel["PackSummationDims"]:
totalIters = "(size%s" % self.unrollChar
for os in range(self.otherSummations):
otherSumChar = self.indexChars[problemType["IndicesSummation"][os]]
totalIters += "*size%s" % otherSumChar
totalIters += ")"
else:
totalIters = "size%s" % self.unrollChar
kStr += "%snumIter%s = (((%s %% LOCAL_DEPTHU) + LOCAL_SPLITU - 1) / LOCAL_SPLITU);%s" \
% (self.indent, self.unrollChar, totalIters, self.endLine)
if kernel["GlobalSplitU"] > 1:
kStr += "%sif (gsuSumIdx != numIterPerWgRemainder) {%s" \
% (self.indent, self.endLine)
kStr += "%s numIter%s = 0;%s" \
% (self.indent, self.unrollChar, self.endLine)
kStr += "%s}%s" % (self.indent, self.endLine)
#kStr += "if (serial==0) printf(\\\"WG%u_%u TK:%u\\\\n\\\", get_group_id(0), get_group_id(1), numIterK);" + self.endLine
else:
kStr += self.endLine + " /* Compute summation loop num iter */" + self.endLine
if loopIdx == self.unrollIdx and kernel["GlobalSplitU"] > 1:
kStr += self.calculateLoopNumIterGsu(kernel, "(size%s / LOCAL_DEPTHU)"%loopChar, \
"numIter%s"%loopChar, hidden=False)
#kStr += "if (serial==0) printf(\\\"WG%u_%u UK:%u\\\\n\\\", get_group_id(0), get_group_id(1), numIterK);" + self.endLine
if self.unrollIncIsDepthU:
kStr += self.indent + "numIter%s *= LOCAL_DEPTHU;"%loopChar + self.endLine
else:
kStr += self.indent + "numIter%s = size%s" \
% (loopChar, loopChar)
if not self.unrollIncIsDepthU and loopIdx == self.unrollIdx:
kStr += " / LOCAL_DEPTHU"
kStr += ";" + self.endLine
if self.unrollIncIsDepthU and loopIdx==self.unrollIdx:
kStr += self.indent + "unsigned int psdIter=0; // packed summation dim iterator" + self.endLine
zpA = self.zpForSumIdx(loopDim, problemType["ZeroPadA"])
zpB = self.zpForSumIdx(loopDim, problemType["ZeroPadB"])
for (zp,tc) in ((zpA,'A'), (zpB,'B')):
if zp:
(freeDim,sumDim) = zp[:2]
freeDimChar = globalParameters["IndexChars"][freeDim]
freeDimChar2 = self.indexChars[freeDim]
sumChar = self.indexChars[sumDim]
kStr += "%sunsigned int elementEdge%s%s = stride%s%s * size%s + stride%s%s*(size%s - 1) - padStart%s%s%s - padEnd%s%s%s;" \
% (self.indent, tc, loopChar, tc, freeDimChar2, freeDimChar2, tc, loopChar, loopChar, tc, freeDimChar, sumChar, tc, freeDimChar, sumChar) \
+ self.endLine
if sumChar not in self.definedIter:
kStr += self.indent + "unsigned int iter%s = 0;" % sumChar + self.endLine
self.definedIter.add(sumChar)
return kStr
##############################################################################
# Open Loop
##############################################################################
def openLoop(self, kernel, loopIdx):
problemType = kernel["ProblemType"]
tailLoop = loopIdx < 0
if tailLoop:
loopIdx = self.unrollIdx
kStr = ""
loopChar = self.indexChars[ \
kernel["ProblemType"]["IndicesSummation"][loopIdx]]
if kernel["LoopDoWhile"]:
kStr += "%sdo {%s" % (self.indent, self.endLine)
assert(not self.unrollIncIsDepthU)
else:
if self.unrollIncIsDepthU and loopIdx==self.unrollIdx and not tailLoop:
if kernel["PackSummationDims"]:
totalIters = "("
totalIters += "*".join(["numIter%s"%(self.indexChars[os]) for os in problemType["IndicesSummation"]])
totalIters += ")"
else:
totalIters = "numIter%s" % loopChar
kStr += self.indent \
+ "while (psdIter < %s) {" % (totalIters) \
+ self.endLine
else:
kStr += "%swhile (numIter%s-- > %u) {%s" \
% (self.indent, loopChar, \
(1 if (kernel["PrefetchGlobalRead"] and loopIdx == self.unrollIdx \
and not tailLoop) else 0), self.endLine)
self.indent += " "
#if tailLoop:
# kStr += "if (serial==0) printf(\\\"WG%u_%u: ti=%u\\\\n\\\", get_group_id(0), get_group_id(1), numIterK);" + self.endLine
#else:
# kStr += "if (serial==0) printf(\\\"WG%u_%u: ui=%u\\\\n\\\", get_group_id(0), get_group_id(1), numIterK);" + self.endLine
return kStr
##############################################################################
# Close Loop
##############################################################################
def closeLoop(self, kernel, loopIdx, finalLoop):
kStr = ""
problemType = kernel["ProblemType"]
loopDim = problemType["IndicesSummation"][loopIdx]
loopChar = self.indexChars[loopDim]
for tc in ('A', 'B'):
# assume A and B don't specify same summation idx
zp = next((zpi for zpi in problemType["ZeroPad%s"%tc] if zpi[1] == loopDim), None)
if zp:
if loopIdx == self.unrollIdx:
incAmount = "LOCAL_DEPTHU"
if kernel["GlobalSplitU"] > 1 \
and kernel["GlobalSplitUSummationAssignmentRoundRobin"]:
incAmount += "*GLOBAL_SPLITU"
else:
incAmount = "1"
self.indent = self.indent[2:]
if kernel["LoopDoWhile"]:
kStr += "%s} while (--numIter%s > %u);%s" \
% (self.indent, loopChar, \
(1 if kernel["PrefetchGlobalRead"] else 0), self.endLine )
else:
kStr += "%s}%s" % (self.indent, self.endLine)
#kStr += "if (serial==0) printf(\\\"WG%u_%u: rc0=%.0f\\\\n\\\", get_group_id(0), get_group_id(1), rC[0]);" + self.endLine
return kStr
##############################################################################
# Close Loop
##############################################################################
def openLoopCopy(self, kernel, lc):
return ""
##############################################################################
# End Summation
##############################################################################
def endSummation(self,kernel):
return ""
##############################################################################
# MAC Iteration
##############################################################################
def macIter(self, kernel, black, iuiCount, useMacro):
kStr = ""
for iui in range(0,iuiCount):
kStr += "%sMAC_%ux%u" % (self.indent, \
kernel["ThreadTile0"],kernel["ThreadTile1"])
if black:
kStr += "_BLK"
kStr += self.endLine
return kStr
##############################################################################
# At Least 1 Unroll
##############################################################################
def openSumAtLeastUnroll(self, kernel, prefetch, isPap, isOptNLL):
kStr = ""
if kernel["GlobalSplitU"] > 1:
kStr += "%sif (numIterMyWg >= 1) {%s" \
% (self.indent, self.endLine)
else:
kStr += "%sif (size%s >= LOCAL_DEPTHU) {%s" \
% (self.indent, self.unrollChar, self.endLine)
self.indent += " "
return kStr
def closeSumAtLeastUnroll(self, kernel, prefetch, isOptNLL):
kStr = ""
self.indent = self.indent[2:]
kStr += "%s} // end %s%s" % \
(self.indent, "PrefetchGlobalRead" if prefetch else "unroll", self.endLine)
if prefetch:
kStr += "%selse { // still need to initC even if skipped prefetch%s" % (self.indent, self.endLine)
kStr += self.initC(kernel)
kStr += "%s}%s" % (self.indent, self.endLine)
return kStr
def globalReadIncCheckStagger(self, iterVar, loopChar, tP, para, sPara, perp, sPerp):
kStr = ""
tc = tP["tensorChar"]
# Check to see if GRA wraps around edge:
gr = "globalRead%s_%u_%u_%u_%u" \
% (tP["tensorChar"], para, sPara, perp, sPerp)
kStr += "%sif ((%s) == staggerUIter) {%s" \
% (self.indent, iterVar, self.endLine)
if self.db["PrintStagger"]:
# note loop counter numIterK/numIterL hard-coded, manually hack if needed
kStr += "if (%s(2)==0 && %s(1)==0 && %s(0) <= 16)%s" % \
(self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, self.endLine)
kStr += "printf(%sStaggerOffset wrap-gro: gid=%%u.%%u.%%u, old GR-%s=0x%%x numIter=%%u staggerUIter=%%u%s,\
%s(2),%s(1),%s(0), (unsigned)(size_t)(%s-%s), numIterL, staggerUIter);%s" \
% (self.quote, \
tc, \
self.endLineQuote, \
self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, \
gr,tc, \
self.endLine)
kStr += " %s%s -= (origNumIter * globalReadInc%s%s); // wrap staggered offset back to row start%s" \
% (self.indent, \
gr, tc, loopChar,
self.endLine)
kStr += "%s}%s" % (self.indent, self.endLine)
if self.db["PrintStagger"]:
kStr += "if (%s(2)==0 && %s(1)==0 && %s(0) <= 8)%s" % \
(self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, self.endLine)
kStr += "printf(%sStaggerOffset check-gro: gid=%%u.%%u.%%u, GR-%s=0x%%x %s, \
%s(2),%s(1),%s(0), (unsigned)(size_t)(%s-%s));%s" \
% (self.quote, \
tc, \
self.endLineQuote, \
self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, \
gr,tc, \
self.endLine)
return kStr
##############################################################################
# Global Read: Increment either A or B
# Called from globalReadIncrementAB below
##############################################################################
def globalReadIncrement(self, kernel, loopIdx, tP, prefetchIndex, incs=1):
kStr = ""
tc = tP["tensorChar"]
loopChar = self.indexChars[kernel["ProblemType"]["IndicesSummation"][loopIdx]]
kStr += self.comment("global read inc %s for sum%c"%(tc,loopChar))
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nrpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1 if tP["rc"] else tP["nrcv"]):
globalRead = "globalRead%s_%u_%u_%u_%u" % (tc, para, sPara, perp, sPerp)
kStr += "%s%s = (%sDATA_TYPE const *)( ((%sDATA_TYPE const *)%s) + %s*globalReadInc%s%s);%s" \
% (self.indent, globalRead,
self.globalPtrStr, self.globalPtrStr,
globalRead,
incs, tc, loopChar, \
self.endLine)
if self.staggerU and loopIdx==self.unrollIdx:
kStr += self.globalReadIncCheckStagger("numIter%s"%loopChar, loopChar, tP, para, sPara, perp, sPerp)
#else:
# kStr += "%sglobalRead%s_%u_%u%s += globalReadInc%s%s%s;%s" \
# % (self.indent, tP["tensorChar"], para, perp, \
# (("_s%u"%s) if (tP["rtc"] \
# or tP["ruc"]) else ""), \
# tP["tensorChar"], loopChar, "" if (tP["rtc"] \
# or tP["ruc"]) else "/VECTOR_WIDTH", \
# self.endLine)
return kStr
def globalReadIncrementFromBase(self, kernel, tP, sumOffset, loopChar):
""" Recompute the address, starting from base address pointer + initial offset + summation offset """
kStr = ""
tc = tP["tensorChar"]
kStr += self.comment("global read inc %s from base"%(tc))
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nrpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1 if tP["rc"] else tP["nrcv"]):
globalRead = "globalRead%s_%u_%u_%u_%u" % (tc, para, sPara, perp, sPerp)
kStr += self.indent + \
"%s = %s + globalReadOffset%s_%u_%u_%u_%u + %s;" % (
globalRead,
tc,
tc, para, sPara, perp, sPerp,
sumOffset
) + self.endLine
if self.staggerU:
kStr += self.globalReadIncCheckStagger("iter%s"%loopChar, loopChar, tP, para, sPara, perp, sPerp)
return kStr
##############################################################################
# Global Read: Increment A and B
# Called from KernelWriter
# If PackSummationDims=1, this increments all counters for A and B
##############################################################################
def globalReadIncrementAB(self, kernel, loopIdx, prefetchIndex, incs=1):
imod = Code.Module("globalReadIncrementAB%s")
problemType = kernel["ProblemType"]
unrollChar = self.indexChars[problemType["IndicesSummation"][self.unrollIdx]]
headerCode = ""
if self.unrollIncIsDepthU and loopIdx==self.unrollIdx:
headerCode += self.endLine
headerCode += self.indent + "psdIter += LOCAL_DEPTHU;" + self.endLine
if loopIdx==self.unrollIdx and kernel["PackSummationDims"] and self.actualSummationLoops==1:
kStr = headerCode
if prefetchIndex>0:
psdPackedBits = "(LOCAL_DEPTHU)"
else:
psdPackedBits = "(psdIter)"
for os in reversed(range(problemType["NumIndicesSummation"])):
# Only get here if we are packing summation dims
sumDim = problemType["IndicesSummation"][os]
sumChar = self.indexChars[sumDim]
firstIter = (os==problemType["NumIndicesSummation"]-1)
lastIter = (os==0)
kStr += self.endLine
iterType = "" if sumChar in self.definedIter else "unsigned int "
if not lastIter:
c = "//" if self.psdUuseMagic else "" # show non-magic code commented out
kStr += self.indent + c + iterType + "iter%s = %s %% numIter%s;" % \
(sumChar, psdPackedBits, sumChar) + self.endLine
kStr += self.indent + c
if firstIter:
kStr += "unsigned int "
kStr += "psdPackedBits = %s / numIter%s;" % (psdPackedBits, sumChar) + self.endLine
if self.psdUuseMagic:
assert kernel["MagicDivAlg"] == 2 # older alg not supported
kStr += self.indent
if firstIter:
kStr += "unsigned int "
if os == self.unrollIdx and kernel["GlobalSplitU"]>1:
magicStruct = "((gsuSumIdx < numIterPerWgRemainder) ? magicStruct%s_GsuRemainder : magicStruct%s)"\
% (sumChar, sumChar)
else:
magicStruct = "magicStruct%s" % sumChar
kStr += "tmpBits = MAGIC_DIV2(%s, %s);" % (psdPackedBits, magicStruct) + self.endLine
kStr += self.indent + iterType + "iter%s = %s - tmpBits*numIter%s;" % \
(sumChar, psdPackedBits, sumChar) + self.endLine
kStr += self.indent
if firstIter:
kStr += "unsigned int "
kStr += "psdPackedBits = tmpBits;" + self.endLine
# set up bits for next iteration:
psdPackedBits = "psdPackedBits"
else:
# last iteration:
kStr += self.indent + iterType + "iter%s = %s;" % (sumChar, psdPackedBits) + self.endLine
# update psdOffset:
for (tc) in ('A','B'):
kStr += self.indent
if firstIter:
kStr += self.int64Str + " psdOffset%s = " % tc
else:
kStr += "psdOffset%s += " % tc
kStr += "iter%s*globalReadInc%s%s;" % (sumChar, tc, sumChar)
kStr += self.endLine
# Add the psdOffsets for A and B:
for (tc,tP) in (('A',self.tPA),('B',self.tPB)):
# makeSchedule is linked to the modules names - update both together
incCode = Code.Module("globalReadIncrement%s"%tc)
kStr += self.indent + self.globalReadIncrementFromBase(kernel, tP, "psdOffset%s"%tc, unrollChar)
incCode.addText(kStr)
kStr = ""
imod.addCode(incCode)
else:
# Non pack-summation-dims code path:
incA = Code.Module("globalReadIncrementA")
incA.addText(headerCode)
incA.addText(self.globalReadIncrement(kernel, loopIdx, self.tPA, prefetchIndex, incs))
imod.addCode(incA)
incB = Code.Module("globalReadIncrementB")
incB.addText(self.globalReadIncrement(kernel, loopIdx, self.tPB, prefetchIndex, incs))
imod.addCode(incB)
return imod
##############################################################################
# DirectToLds M0 update: Do It A/B
##############################################################################
def directToLdsM0Update(self, kernel, mode, tP):
tc = tP["tensorChar"]
imod = Code.Module("directToLdsM0Update%s_%u"%(tc,mode))
return imod
##############################################################################
# Global Read: Do It A/B
##############################################################################
def globalReadDo(self, kernel, mode, tP):
kStr = ""
tc = tP["tensorChar"]
guardK = (mode==2)
kStr += self.comment("global read %s")%tc
#for perp in range(0, tP["nrp"]):
# for para in range(0, tP["nrc"]):
# for s in range(0, numUnrollVectorComponents):
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nrpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, tP["nrcv"]):
dest ="%s_%u_%u_%u_%u" \
% (tP["tensorChar"].lower(), \
para, sPara, perp, sPerp )
kStr += "%s%s = " % (self.indent, dest)
# guard around K
guarded = 0
if guardK:
guarded = 1
kStr += "( globalReadOffset%s%s_%u_%u + %u >= (size%s %% LOCAL_DEPTHU%s)%s )" \
% (tP["tensorChar"], self.unrollChar, \
(perp if tP["tlu"] else para), \
(sPerp if tP["tlu"] else 0), (0 if tP["tlu"] else sPara), self.unrollChar, \
(" + LOCAL_DEPTHU*gsuSumIdx" if kernel["GlobalSplitU"]>1 \
else ""), (" || !numIter%s"%self.unrollChar) \
if kernel["GlobalSplitU"] > 1 else "")
# guard around pad
for zp in kernel["ProblemType"]["ZeroPad%s"%tc]:
if guarded:
kStr += " || "
guarded = 1
(freeDim, sumDim) = zp[:2]
freeDimChar = globalParameters["IndexChars"][freeDim]
sumChar = self.indexChars[sumDim]
assert self.unrollIncIsDepthU
if kernel["PackSummationDims"]:
iterVar = "iter"+sumChar
elif sumDim == kernel["ProblemType"]["IndicesSummation"][self.unrollIdx]:
iterVar = "psdIter"
else:
raise RuntimeError("ZP not supported with multiple summations and PSD==0")
globalReadOffsetZp = "globalReadOffset%s_%u_%u_%u_%u_ZP%s%s + %u" \
% (tc, para, 0 if tP["rc"] else sPara, perp, sPerp, \
freeDimChar, sumChar,
sPara if tP["rc"] else 0);
kStr += " ( (%s * stride%s%s + %s) >= elementEdge%s%s )" \
% (iterVar, tc, sumChar, globalReadOffsetZp, tc, sumChar)
# guard around edge
if kernel["EdgeType"] == "Branch":
if guarded:
kStr += " || "
guarded = 1
kStr += "( !inBounds%s_%u )" % ( \
(tP["tensorChar"], para if tP["tlu"] else perp) )
if guarded:
kStr += " ? SCALAR_OOB_DATA : "
kStr += "*(globalRead%s_%u_%u_%u_%u + %u);%s" \
% (tP["tensorChar"], para, 0 if tP["rc"] else sPara, perp, sPerp, sPara if tP["rc"] else 0, \
self.endLine)
#if self.db["PrintStagger"] and tP["isA"]:
if 0 and self.db["PrintStagger"]:
kStr += "if (%s(2)==0 && %s(1)==0 && %s(0) <= 16)%s" % \
(self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr, self.endLine)
kStr += " printf(%sglobalRead: gid=%%u.%%u.%%u, %s loaded:%%.0f%s, \
%s(2),%s(1),%s(0), %s );%s" \
% (self.quote,\
tc,
self.endLineQuote, \
self.getGlobalIdStr, self.getGlobalIdStr, self.getGlobalIdStr,\
dest, \
self.endLine)
return kStr
##############################################################################
# Local Write: Swap Offsets A/B
##############################################################################
def localWriteSwapOffsets(self, kernel, tP):
kStr = ""
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nwpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1): # tP["nwcv"]):
kStr += "%slocalWriteOffset%s_%u_%u_%u_%u = (localWriteOffset%s_%u_%u_%u_%u + LDS_OFFSET_BLK)%%(LDS_OFFSET_BLK*2);%s" \
% (self.indent, tP["tensorChar"], \
para, sPara, perp, sPerp, tP["tensorChar"], \
para, sPara, perp, sPerp, self.endLine )
return kStr
##############################################################################
# Local Write: Reset Offsets A/B
##############################################################################
def localWriteResetOffsets(self, kernel, tP):
kStr = ""
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nwpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1): # tP["nwcv"]):
kStr += "%slocalWriteOffset%s_%u_%u_%u_%u %%= LDS_OFFSET_BLK;%s" \
% (self.indent, tP["tensorChar"], \
para, sPara, perp, sPerp, self.endLine )
return kStr
##############################################################################
# Local Write: Init Pointers A/B
##############################################################################
def localWriteInitPointers(self, kernel, tP):
kStr = self.comment("local write init pointers %s" % tP["tensorChar"])
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nwpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, 1): # tP["nwcv"]):
kStr += "%slocalWrite%s_%u_%u_%u_%u = (%sDATA_TYPE *)(localMemory + localWriteOffset%s_%u_%u_%u_%u);%s"\
% (self.indent, tP["tensorChar"], \
para, sPara, perp, sPerp, self.sharedPtrStr, tP["tensorChar"], \
para, sPara, perp, sPerp, self.endLine)
return kStr
##############################################################################
# Local Write: Do It A/B
##############################################################################
def localWriteDo(self, kernel, tP):
kStr = ""
if self.language == "HIP":
kStr += "#pragma clang diagnostic push" + self.endLine
kStr += "#pragma clang diagnostic ignored \"-Wconditional-uninitialized\"" + self.endLine
for perp in range(0, tP["nrp"]):
for sPerp in range(0, tP["nwpv"]):
for para in range(0, tP["nrc"]):
for sPara in range(0, tP["nwcv"]):
kStr += "%s*(localWrite%s_%u_%u_%u_%u + %u) = %s_%u_%u_%u_%u;%s" \
% (self.indent, tP["tensorChar"], \
para, 0, perp, sPerp, sPara, \
tP["tensorChar"].lower(), \
para, \
sPara if tP["tlu"] else sPerp, \
perp, \
sPerp if tP["tlu"] else sPara, \
self.endLine)
if self.language == "HIP":
kStr += "#pragma clang diagnostic pop" + self.endLine
if False and tP["isB"]:
kStr += "%s%s" % (self.syncStr, self.endLine)
kStr += " /* print Local state */" + self.endLine
kStr += " for (unsigned int i = serial; i < LDS_NUM_ELEMENTS; i+=NUM_THREADS) {%s" % self.endLine
kStr += " printf(\\\"lds[%%06u] = %%.0f\\\\n\\\", i, localMemory[i]);%s" % self.endLine
kStr += " }" + self.endLine
return kStr
##############################################################################
# Local Read: Swap Offsets A/B
##############################################################################
def localReadSwapOffsets(self, kernel, internalPointerSwap, tP):
kStr = ""
kStr += "%slocalReadOffset%s = (localReadOffset%s + LDS_OFFSET_BLK)%%(LDS_OFFSET_BLK*2);%s" \
% (self.indent, tP["tensorChar"], tP["tensorChar"], self.endLine)
return kStr
##############################################################################
# Local Read: Reset Offsets A/B
##############################################################################
def localReadResetOffsets(self, kernel, tP):
kStr = ""
kStr += "%slocalReadOffset%s %%= LDS_OFFSET_BLK;%s" \
% (self.indent, tP["tensorChar"], self.endLine)
return kStr
##############################################################################
# Local Read: Init Pointers A/B
##############################################################################
def localReadInitPointers(self, kernel, tP):
kStr = ""
kStr += "%slocalRead%s = (%sDATA_TYPE *)(localMemory + localReadOffset%s);%s" \
% (self.indent, tP["tensorChar"], self.sharedPtrStr, \
tP["tensorChar"], self.endLine)
return kStr
##############################################################################
# Local Read: Increment A/B
##############################################################################
def localReadInc(self, kernel, iui, tP):
kStr = ""
kStr += "%slocalRead%s += LOCAL_SPLITU*(MT%s+PAD);%s" \
% (self.indent, tP["tensorChar"], tP["tileChar"], self.endLine)
return kStr
##############################################################################
# Local Read: Do It A/B
##############################################################################
def localReadDo(self, kernel, black, iui, epsi, uIdx, tP):
kStr = ""
for r in range(0, kernel[tP["tt"]]//kernel["VectorWidth"]):
for s in range(0, kernel["VectorWidth"]):
kStr += "%sr%s[%u*VECTOR_WIDTH+%u%s] = localRead%s[%u*SG%s*VECTOR_WIDTH + %u]; %s" \
% (self.indent, tP["tensorChar"], r, s, \
(("+TT%s"%tP["tileChar"]) if black else ""), \
tP["tensorChar"], r, tP["tileChar"], s, self.endLine)
return kStr
##############################################################################
# Shift Vector Components d0,1
##############################################################################
def shiftVectorComponents(self, kernel, tP):
kStr = ""
kStr += " unsigned int wgMT%s = size%s - wg%s*MT%s;%s" \
% (tP["tileChar"], tP["tileChar"], tP["tileChar"], \
tP["tileChar"], self.endLine)
kStr += " if (wgMT%s > MT%s) wgMT%s = MT%s;%s" \
%(tP["tileChar"], tP["tileChar"], tP["tileChar"], \
tP["tileChar"], self.endLine)
kStr += " unsigned int r%s = wgMT%s %% GLOBAL_LOAD_VECTOR_WIDTH_%s;%s" \
% (tP["tileChar"], tP["tileChar"], tP["tensorChar"], self.endLine)
kStr += " if (r%s > 0 && ((wgMT%s/VECTOR_WIDTH) %% SG%s) == (serial %s SG%s)%s ) {%s" \
% (tP["tileChar"], tP["tileChar"], tP["tileChar"], "%" if tP["isA"] else "/", \
self.tileChar0, (" %% SG%s"%self.tileChar1) if tP["isB"] else "", self.endLine)
# old
#kStr += " unsigned int s%s = (wgMT%s/VECTOR_WIDTH)/SG%s;%s" \
# % (tP["tileChar"], tP["tileChar"], tP["tileChar"], self.endLine)
# new
# (wgMT/(SG0*VW))*(VW/glvw) + (wgMT%VW) / glvw
kStr += " unsigned int s%s = (wgMT%s%%VECTOR_WIDTH)/GLOBAL_LOAD_VECTOR_WIDTH_%s + (wgMT%s/(SG%s*VECTOR_WIDTH))*(VECTOR_WIDTH/GLOBAL_LOAD_VECTOR_WIDTH_%s);%s" \
% (tP["tileChar"], tP["tileChar"], tP["tensorChar"], \
tP["tileChar"], tP["tileChar"], tP["tensorChar"], self.endLine)
for r in range(1, tP["glvw"]):
kStr += " if (r%s == %u) {%s" % (tP["tileChar"], r, self.endLine)
numVectors = kernel["ThreadTile%s"%tP["tileIdx"]]//tP["glvw"]
for vIdx in range(0, numVectors):
if vIdx == 0:
kStr += " "
else:
kStr += " else "
if vIdx < numVectors-1:
kStr += "if (s%s == %u) " % (tP["tileChar"], vIdx)
kStr += "{%s" % self.endLine
for tt in range(0, kernel["ThreadTile%u"%((tP["tileIdx"]+1)%2)]):
for s in range(0, r):
if tP["isA"]:
kStr += " rC[%u + %u*GLOBAL_LOAD_VECTOR_WIDTH_A + %u*TT%s] = rC[%u + %u*GLOBAL_LOAD_VECTOR_WIDTH_A + %u*TT%s];%s" \
% (s, vIdx, tt, self.tileChar0, \
s+tP["glvw"]-r, vIdx, tt, self.tileChar0, \
self.endLine)
else:
kStr += " rC[%u + %u*TT%s*GLOBAL_LOAD_VECTOR_WIDTH_B + %u*TT%s] = rC[%u + %u*TT%s*GLOBAL_LOAD_VECTOR_WIDTH_B + %u*TT%s];%s" \
% (tt, vIdx, self.tileChar0, s, self.tileChar0, \
tt, vIdx, self.tileChar0, \
s+tP["glvw"]-r, self.tileChar0, self.endLine)
#kStr += "printf(\\\"sv %u %u\\\");%s" % (r, vIdx, self.endLine)
kStr += " }"
if vIdx == numVectors-1:
kStr += self.endLine
kStr += " }%s" % self.endLine
kStr += " }%s" % self.endLine
return kStr
def shiftVectorComponentsForMatrixInst(self, kernel, tP):
return self.shiftVectorComponents(kernel, tP)
##############################################################################
# Shift Vectors Components d1
##############################################################################
def shiftVectorComponents1(self, kernel, tP):
kStr = ""
kStr += " unsigned int wgMT%s = size%s - %s*MT%s;%s" \
% (self.tileChar1, self.tileChar1, "wg%s"%self.tileChar1, \
self.tileChar1, self.endLine)
kStr += " if (wgMT%s > MT%s) wgMT%s = MT%s;%s" \
%(self.tileChar1, self.tileChar1, self.tileChar1, \
self.tileChar1, self.endLine)
kStr += " unsigned int r%s = wgMT%s %% VECTOR_WIDTH;%s" \
% (self.tileChar1, self.tileChar1, self.endLine)
kStr += " if (r%s > 0 && ((wgMT%s/VECTOR_WIDTH) %% SG%s) == ((serial / SG%s) %% SG%s) ) {%s" \
% (self.tileChar1, self.tileChar1, self.tileChar1, \
self.tileChar0, self.tileChar1, \
self.endLine)
kStr += " unsigned int s%s = (wgMT%s/VECTOR_WIDTH)/SG%s;%s" \
% (self.tileChar1, self.tileChar1, self.tileChar1, self.endLine)
for r1 in range(1, tP["glvw"]):
kStr += " if (r%s == %u) {%s" % (self.tileChar1, r1, self.endLine)
numVectors = kernel["ThreadTile1"]/tP["glvw"]
for vIdx in range(0, numVectors):
if vIdx == 0:
kStr += " "
else:
kStr += " else "
if vIdx < numVectors - 1:
kStr += "if (s%s == %u) " % (self.tileChar1, vIdx)
kStr += "{%s" % self.endLine
for s in range(0, r1):
for tt0 in range(0, kernel["ThreadTile0"]):
kStr += " rC[%u+%u*TT%s*VECTOR_WIDTH + %u*TT%s] = rC[%u+%u*TT%s*VECTOR_WIDTH + %u*TT%s];%s" \
% (tt0, vIdx, self.tileChar0, s, self.tileChar0, \
tt0, vIdx, self.tileChar0, \
s+tP["glvw"]-r1, self.tileChar0, self.endLine)
kStr += " }"
if vIdx == numVectors - 1:
kStr += self.endLine
kStr += " }%s" % self.endLine
kStr += " }%s" % self.endLine
return kStr
##############################################################################
# Complex Declare Tmp Registers
##############################################################################
def complexDeclareTmpRegisters(self, kernel):
kStr = ""
if kernel["ProblemType"]["DataType"].value == DataType.complexSingle:
kStr += " float type_mac_tmp;" + self.endLine
if kernel["ProblemType"]["DataType"].value == DataType.complexDouble:
kStr += " double type_mac_tmp;" + self.endLine
return kStr
##############################################################################
# LocalSplitU: Local Write
##############################################################################
def localSplitULocalWrite(self, kernel):
kStr = ""
kStr += " %sDATA_TYPE *localLocalSplitU = (%sDATA_TYPE *)(localMemory);%s" \
% (self.sharedPtrStr, self.sharedPtrStr, self.endLine)
for j in range(0, kernel["ThreadTile1"] // kernel["VectorWidth"]):
for i in range(0, kernel["ThreadTile0"] // kernel["VectorWidth"]):
for s in range(0, kernel["VectorWidth"]):
for vc in range(0, kernel["VectorWidth"]):
kStr += "%slocalLocalSplitU[%u + (lr%s + %u*SG%s + (MT%s/VECTOR_WIDTH)*(lr%s*VECTOR_WIDTH + %u + SG%s*VECTOR_WIDTH*%u) + (MT%s*MT%s/VECTOR_WIDTH)*sgId)*VECTOR_WIDTH] = rC[%u + (%u+%u*(TT%s/VECTOR_WIDTH)+%u*TT%s)*VECTOR_WIDTH];%s" \
% (self.indent, vc, self.tileChar0, i, self.tileChar0, \
self.tileChar0, self.tileChar1, \
s, self.tileChar1, j, self.tileChar0, self.tileChar1, vc, i, s, \
self.tileChar0, j, self.tileChar0, self.endLine)
kStr += self.indent + self.syncStr + self.endLine
"""
kStr += " /* print Local state */" + self.endLine
kStr += " for (unsigned int i = serial; i < MT0I*MT1J*LOCAL_SPLITU; i+=NUM_THREADS) {%s" % self.endLine
kStr += " printf(\\\"localLocalSplitU[%%06u] = %%10.0f, %%10.0f\\\\n\\\", i, localLocalSplitU[i], localLocalSplitU[i]);%s" \
% self.endLine
kStr += " }" + self.endLine
"""
return kStr
##############################################################################
# LocalSplitU: Local Read
##############################################################################
def localSplitULocalRead(self, kernel):
kStr = ""
for i in range(0, kernel["NumGlobalWriteVectorsPerThread"]):
for s in range(0, kernel["VectorWidth"]):
kStr += " rC[%u + %3u*GLOBAL_WRITE_VECTOR_WIDTH] = localLocalSplitU[%u + (serial+%u*NUM_THREADS)*GLOBAL_WRITE_VECTOR_WIDTH];%s" \
% (s, i, s, i, self.endLine)
kStr += self.endLine
return kStr
##############################################################################
# LocalSplitU: Reduction
##############################################################################
def localSplitUReduction(self, kernel):
kStr = ""
for r in range(1, kernel["LocalSplitU"]):
for i in range(0, kernel["NumGlobalWriteVectorsPerThread"]):
for s in range(0, kernel["GlobalWriteVectorWidth"]):
kStr += " rC[%u + %3u*GLOBAL_WRITE_VECTOR_WIDTH] += localLocalSplitU[(%u + serial*GLOBAL_WRITE_VECTOR_WIDTH+%u*NUM_THREADS*GLOBAL_WRITE_VECTOR_WIDTH + %u*MT%s*MT%s)];%s" \
% (s, i, s, i, r, self.tileChar0, self.tileChar1, self.endLine)
kStr += self.endLine
return kStr
##############################################################################
# extractGlobalCDims: Extract the packed dims from mask(s)
#
# tensorIdx:
##############################################################################
def extractGlobalCDims(self, kernel, base, tensorIdx):
kStr = ""
lastIndex = None
if tensorIdx == 0:
flattenedGlobalC = "flattenedGlobalC0"
elif tensorIdx == 1:
flattenedGlobalC = "flattenedGlobalC1"
first = 1
for idx in kernel["PackedC%sIndicesX"%tensorIdx]:
kStr += " globalC%s = " % (self.indexChars[idx])
if first:
# first print just copies flattenedGlobalC1 - no div / mod
first = 0
kStr += " %s" % (flattenedGlobalC)
if base:
kStr += " + %s" % base
kStr += ";" + self.endLine
else:
# later iterations extract dimension from previous using mod,
# then div to remove the extracted bits for next iteration
#kStr += "printf(\"pre: serial:%%u wg0:%%u wg1:%%u globalC0I:%%u globalC1J:%%u\\n\", serial, wg0I, wg1J, globalC0I, globalC1J);%s" % (self.endLine)
if kernel["MagicDivAlg"]:
c = globalParameters["IndexChars"][lastIndex]
if kernel["MagicDivAlg"]==1:
kStr += "MAGIC_DIV1(globalC%s, magicNumberSize%s, magicShiftSize%s);%s" \
% (self.indexChars[lastIndex], c, c, self.endLine)
elif kernel["MagicDivAlg"]==2:
kStr += "MAGIC_DIV2(globalC%s, magicStruct%s);%s" \
% (self.indexChars[lastIndex], c, self.endLine)
kStr += " globalC%s -= (globalC%s*size%s);%s" \
% (self.indexChars[lastIndex], self.indexChars[idx], \
self.indexChars[lastIndex], self.endLine)
else:
kStr += "(globalC%s) / size%s;%s" % (self.indexChars[lastIndex], self.indexChars[lastIndex], self.endLine)
kStr += " globalC%s %%= size%s;%s" % (self.indexChars[lastIndex], self.indexChars[lastIndex], self.endLine)
lastIndex = idx
#kStr += "printf(\"post: serial:%%u wg0:%%u wg1:%%u globalC0I:%%u globalCK=%%u\\n\", serial, wg0I, wg1J, globalC0I, globalCK);%s" % (self.endLine)
return kStr
##############################################################################
# globalWriteWorkGroupInit:
##############################################################################
def globalWriteWorkGroupInit(self, kernel):
return ""
##############################################################################
# LocalSplitU: Global Write Indices
##############################################################################
def localSplitUGlobalWriteIndices(self, kernel):
kStr = ""
# Add Index0
index0 = kernel["ProblemType"]["Index0"]
kStr += " unsigned int localC%s = (serial %% (MT%s/GLOBAL_WRITE_VECTOR_WIDTH))*GLOBAL_WRITE_VECTOR_WIDTH;%s" \
% (self.tileChar0, self.tileChar0, self.endLine)
kStr += " unsigned int globalC%s = (wg%s)" % (self.indexChars[index0], self.indexChars[index0])
kStr += "*MT%s + localC%s;%s" % (self.tileChar0, self.tileChar0, self.endLine)
# Save original flattened C0 before extracting batch components:
kStr += " unsigned int flattenedGlobalC0 = globalC%s;%s" \
% (self.indexChars[index0], self.endLine)
# Add Index1
index1 = kernel["ProblemType"]["Index1"]
kStr += " unsigned int localC%s = serial / (MT%s/GLOBAL_WRITE_VECTOR_WIDTH);%s" \
% (self.tileChar1, self.tileChar0, self.endLine)
kStr += " unsigned int globalC%s = (wg%s)" % (self.indexChars[index1], self.indexChars[index1])
kStr += "*MT%s + localC%s;%s" % (self.tileChar1, self.tileChar1, self.endLine)
kStr += " unsigned int flattenedGlobalC1 = globalC%s;%s" \
% (self.indexChars[index1], self.endLine)
for i in range(0, kernel["ProblemType"]["NumIndicesC"]):
if i != index0 and i != index1:
kStr += " unsigned int globalC%s = " \
% (self.indexChars[i])
if isPackedIndex(kernel,i):
kStr += "0; // define, will be set below%s" % (self.endLine)
else:
kStr += "(wg%s);%s" % (self.indexChars[i], self.endLine)
return kStr
##############################################################################
# LocalSplitU: Global Write
##############################################################################
def localSplitUGlobalWrite(self, kernel):
kStr = ""
packGranularity = kernel["PackGranularity"]
addTensorDimCheck0 = addTensorDimCheck1 = 0
for b in range(0, kernel["NumGlobalWriteVectorsPerThread"]):
loadOffset1 = " %u*CPSV" %b
if packGranularity==2:
addTensorDimCheck1 = 1
base1 = loadOffset1
loadOffset1 = "0"
for s in range(0, kernel["GlobalWriteVectorWidth"]):
loadOffset0 = "%u" % (s)
if packGranularity==2:
addTensorDimCheck0 = 1
base0 = loadOffset0
loadOffset0 = "0"
if kernel["EdgeType"] != "None":
if addTensorDimCheck0 or addTensorDimCheck1:
kStr += self.endLine
if addTensorDimCheck0:
kStr += " /* new 0 offset - inc and extract tensor dims */%s" % (self.endLine)
kStr += self.extractGlobalCDims(kernel, base0, 0)
addTensorDimCheck0 = 0
if addTensorDimCheck1:
kStr += " /* new 1 offset - inc and extract tensor dims */%s" % (self.endLine)
kStr += self.extractGlobalCDims(kernel, base1, 1)
addTensorDimCheck1 = 0
### Bounds checks:
# if packed, check flattened against product of all packed sizes
# The flattened base never changes so add all address offsets before comparison
globalC0ForCheck = "flattenedGlobalC0"
size0ForCheck = " * ".join(self.tPA["packedSizeList"])
kStr += " if (%s%s < %s) {" \
% (globalC0ForCheck, \
((" + %u" %s) if kernel["GlobalWriteVectorWidth"]>1 else ""), \
size0ForCheck)
globalC1ForCheck = "flattenedGlobalC1"
size1ForCheck = " * ".join(self.tPB["packedSizeList"])
kStr += " if (%s + %u*CPSV < %s) {" \
% (globalC1ForCheck, b, size1ForCheck)
kStr += " TYPE_MAC_WRITE( D[ GLOBAL_D( (%s)" % self.uint64Str
for i in range(0, kernel["ProblemType"]["NumIndicesC"]):
kStr += " globalC%s" % self.indexChars[i]
if i == kernel["ProblemType"]["Index0"] and kernel["GlobalWriteVectorWidth"]>1:
kStr += " + %s" % (loadOffset0)
if i == kernel["ProblemType"]["Index1"]:
kStr += " + %s" % (loadOffset1)
if i < kernel["ProblemType"]["NumIndicesC"]-1:
kStr += ", (%s)" % self.uint64Str
kStr += ") ]"
if kernel["ProblemType"]["UseBeta"]:
kStr += ", C[ GLOBAL_C( (%s)" % self.uint64Str
for i in range(0, kernel["ProblemType"]["NumIndicesC"]):
kStr += " globalC%s" % self.indexChars[i]
if i == kernel["ProblemType"]["Index0"] and kernel["GlobalWriteVectorWidth"]>1:
kStr += " + %s" % (loadOffset0)
if i == kernel["ProblemType"]["Index1"]:
kStr += " + %s" % (loadOffset1)
if i < kernel["ProblemType"]["NumIndicesC"]-1:
kStr += ", (%s)" % self.uint64Str
kStr += ") ]"
kStr += ", alpha"
kStr += ", rC[%u + %u*GLOBAL_WRITE_VECTOR_WIDTH]" % (s, b )
if kernel["ProblemType"]["UseBeta"]:
kStr += ", beta"
kStr += ")"
if kernel["EdgeType"] != "None":
kStr += "} }"
kStr += self.endLine
return kStr
##############################################################################
# Not LocalSplitU: Global Write Indices
##############################################################################
def notLocalSplitUGlobalWriteIndices(self, kernel):
kStr = ""
# Add Index0 and Index1:
index0 = kernel["ProblemType"]["Index0"]
kStr += " unsigned int flattenedGlobalC0 = "
kStr += "(wg%s)*MT%s + (serial %% SG%s)*VECTOR_WIDTH;%s" \
% (self.indexChars[index0], self.tileChar0, self.tileChar0, self.endLine)
index1 = kernel["ProblemType"]["Index1"]
kStr += " unsigned int flattenedGlobalC1 = "
kStr += "(wg%s)*MT%s + (serial / SG%s)*VECTOR_WIDTH;%s" \
% (self.indexChars[index1], self.tileChar1, self.tileChar0, self.endLine)
for i in range(0, kernel["ProblemType"]["NumIndicesC"]):
kStr += " unsigned int globalC%s = " % self.indexChars[i]
if i == index0 and len(kernel["PackedC0IndicesX"])==1:
kStr += "flattenedGlobalC0;"
elif i == index1 and len(kernel["PackedC1IndicesX"])==1:
kStr += "flattenedGlobalC1;"
elif isPackedIndex(kernel,i):
kStr += "0; // will be set below"
else:
#kStr += "printf(\"pre: serial:%%u wg0:%%u wg1:%%u globalC0I:%%u globalC1J:%%u\\n\", serial, wg0I, wg1J, globalC0I, globalC1J);%s" % (self.endLine)
kStr += "(wg%s);" % (self.indexChars[i])
kStr += "%s" % self.endLine
return kStr
##############################################################################
# Not LocalSplitU: Global Write
##############################################################################
def notLocalSplitUGlobalWrite(self, kernel):
kStr = ""
packGranularity = kernel["PackGranularity"]
addTensorDimCheck0 = addTensorDimCheck1 = 0
for b in range(0, kernel["ThreadTile1"]//kernel["VectorWidth"]):
for a in range(0, kernel["ThreadTile0"]//kernel["VectorWidth"]):
if packGranularity==2:
addTensorDimCheck0 = 1
base0 = " %u*SG%s*VECTOR_WIDTH" % (a,self.tileChar0)
for s1 in range(0, kernel["VectorWidth"]):
if packGranularity==2:
addTensorDimCheck1 = 1
base1 = "%u + %u*SG%s*VECTOR_WIDTH" % (s1, b,self.tileChar1)
offsetS1 = ""
else:
offsetS1 = ((" + %u"%s1) if kernel["VectorWidth"]>1 else "")
for s0 in range(0, kernel["VectorWidth"]):
# set default offsets, may be overridden in packed mode:
offsetS0 = ((" + %u"%s0) if kernel["VectorWidth"]>1 else "")
offset0 = "%s + %u*SG%s*VECTOR_WIDTH" \
% (offsetS0, a, self.tileChar0)
offset1 = "%s + %u*SG%s*VECTOR_WIDTH" % (\
((" + %u"%s1) if kernel["VectorWidth"]>1 else ""), \
b, self.tileChar1)
if kernel["EdgeType"] == "Branch":
kStr += " if (globalC%s + (VECTOR_WIDTH-1) + %u*SG%s*VECTOR_WIDTH < size%s) {" \
% (self.tileChar0, a, self.tileChar0, self.tileChar0)
kStr += " if (globalC%s + (VECTOR_WIDTH-1) + %u*SG%s*VECTOR_WIDTH < size%s) {" \
% (self.tileChar1, b, self.tileChar1, self.tileChar1)
elif kernel["EdgeType"] == "ShiftPtr":
if addTensorDimCheck0 or addTensorDimCheck1:
kStr += self.endLine
if addTensorDimCheck0:
kStr += " /* new vw0 offset - inc and extract tensor dims */%s" % (self.endLine)
kStr += self.extractGlobalCDims(kernel, base0, 0)
addTensorDimCheck0 = 0
if addTensorDimCheck1:
kStr += " /* new vw1 offset - inc and extract tensor dims */%s" % (self.endLine)
kStr += self.extractGlobalCDims(kernel, base1, 1)
addTensorDimCheck1 = 0
### Bounds checks:
# if packed, check flattened against product of all packed sizes
# The flattened base never changes so add all address offsets before comparison
if packGranularity == 2:
# base contains some addressing components, so just offset here:
offset0 = offsetS0
globalC0ForCheck = "flattenedGlobalC0"
size0ForCheck = " * ".join(self.tPA["packedSizeList"])
# Check 0 dimension against appropriate size limit
kStr += " if (%s%s + %u*SG%s*VECTOR_WIDTH < %s) {" \
% (globalC0ForCheck,
((" + %u"%s0) if kernel["VectorWidth"]>1 else ""), \
a, self.tileChar0, size0ForCheck)
if packGranularity == 2:
offset1 = offsetS1
globalC1ForCheck = "flattenedGlobalC1"
size1ForCheck = " * ".join(self.tPB["packedSizeList"])
kStr += " if (%s%s + %u*SG%s*VECTOR_WIDTH < %s) {" \
% (globalC1ForCheck,
((" + %u"%s1) if kernel["VectorWidth"]>1 else ""), \
b, self.tileChar1, size1ForCheck)
# Write the result
kStr += " TYPE_MAC_WRITE( D[ GLOBAL_D( (%s)" % self.uint64Str
for i in range(0, kernel["ProblemType"]["NumIndicesC"]):
kStr += " globalC%s" % self.indexChars[i]
if i == kernel["ProblemType"]["Index0"]:
kStr += offset0
if i == kernel["ProblemType"]["Index1"]:
kStr += offset1
if i < kernel["ProblemType"]["NumIndicesC"]-1:
kStr += ", (%s)" % self.uint64Str
kStr += ") ]"
if kernel["ProblemType"]["UseBeta"]:
kStr += ", C[ GLOBAL_C( (%s)" % self.uint64Str
for i in range(0, kernel["ProblemType"]["NumIndicesC"]):
kStr += " globalC%s" % self.indexChars[i]
if i == kernel["ProblemType"]["Index0"]:
kStr += offset0
if i == kernel["ProblemType"]["Index1"]:
kStr += offset1
if i < kernel["ProblemType"]["NumIndicesC"]-1:
kStr += ", (%s)" % self.uint64Str
kStr += ") ]"
kStr += ", alpha"
#kStr += ", rC[%d+%d*(TT%s/VECTOR_WIDTH)+%d*TT%s]%s" \
# % (a, s1, self.tileChar0, b, self.tileChar0, \
# ((".%s"%self.vectorComponents[s0]) if kernel["VectorWidth"]>1\
# else "") )
kStr += ", rC[%u*VECTOR_WIDTH+%u + (%u*VECTOR_WIDTH+%u)*TT%s]" \
% (a, s0, b, s1, self.tileChar0 )
if kernel["ProblemType"]["UseBeta"]:
kStr += ", beta"
kStr += ")"
if kernel["EdgeType"] != "None":
kStr += " } }"
kStr += self.endLine
return kStr
def openPrefetchAcrossPersistent(self, kernel):
return ""
def closePrefetchAcrossPersistent(self, kernel):
return ""
##############################################################################
# Function End
##############################################################################
def functionEnd(self, kernel, addLabel):
kStr = ""
if kernel["PersistentKernel"]:
kStr += " serialWgIter += %s(0);%s" \
% (self.getNumGroupsStr, self.endLine)
kStr += "} // End Persistent Loop" + self.endLine
kStr += self.endLine
kStr += "}" + self.endLine
return kStr
##############################################################################
# Function Suffix
##############################################################################
def functionSuffix(self, kernel):
kStr = ""
if globalParameters["MergeFiles"] and self.language == "HIP":
kStr += "#undef UNROLL%s" % self.endLine
kStr += "#undef LOCAL_SPLITU%s" % self.endLine
kStr += "#undef LOCAL_DEPTHU%s" % self.endLine
kStr += "#undef SG%s%s" % (self.tileChar0, self.endLine)
kStr += "#undef SG%s%s" % (self.tileChar1, self.endLine)
kStr += "#undef TT%s%s" % (self.tileChar0, self.endLine)
kStr += "#undef TT%s%s" % (self.tileChar1, self.endLine)
kStr += "#undef MT%s%s" % (self.tileChar0, self.endLine)
kStr += "#undef MT%s%s" % (self.tileChar1, self.endLine)
kStr += "#undef NLCA%s" % (self.endLine )
kStr += "#undef NLCB%s" % (self.endLine )
kStr += "#undef NLPA%s" % (self.endLine )
kStr += "#undef NLPB%s" % (self.endLine )
kStr += "#undef LSCA%s" % (self.endLine)
kStr += "#undef LSPA%s" % (self.endLine)
kStr += "#undef LSCB%s" % (self.endLine)
kStr += "#undef LSPB%s" % (self.endLine)
kStr += "#undef GLOBAL_C%s" % (self.endLine)
kStr += "#undef GLOBAL_OFFSET_A%s" % (self.endLine)
kStr += "#undef GLOBAL_OFFSET_B%s" % (self.endLine)
kStr += "#undef DATA_TYPE%s" % (self.endLine)
kStr += "#undef DEST_DATA_TYPE%s" % (self.endLine)
kStr += "#undef COMPUTE_DATA_TYPE%s" % (self.endLine)
#kStr += "#undef VECTOR_TYPE%s" % (self.endLine)
kStr += "#undef LDS_OFFSET_B%s" % (self.endLine)
kStr += "#undef LDS_OFFSET_BLK%s" % (self.endLine)
kStr += "#undef LDS_NUM_ELEMENTS%s" % (self.endLine)
kStr += "#undef NUM_THREADS%s" % (self.endLine)
kStr += "#undef WORK_GROUP_MAPPING%s" % (self.endLine)
kStr += "#undef VECTOR_WIDTH%s" % (self.endLine)
kStr += "#undef GLOBAL_LOAD_VECTOR_WIDTH_A%s" % (self.endLine)
kStr += "#undef GLOBAL_LOAD_VECTOR_WIDTH_B%s" % (self.endLine)
kStr += "#undef GLOBAL_WRITE_VECTOR_WIDTH%s" % (self.endLine)
kStr += "#undef MAC%s" % (self.endLine)
kStr += "#undef TYPE_MAC%s" % (self.endLine)
kStr += "#undef TYPE_MAC_WRITE%s" % (self.endLine)
kStr += "#undef GLOBAL_SPLITU%s" % (self.endLine)
# zero
kStr += "#undef SCALAR_ZERO%s" % (self.endLine )
kStr += "#undef SCALAR_OOB_DATA%s" % (self.endLine )
numMacs = 2 if kernel["PrefetchLocalRead"] else 1
for m in range(0, numMacs):
kStr += "#undef MAC_%ux%u" \
% (kernel["ThreadTile0"], kernel["ThreadTile1"])
if kernel["PrefetchLocalRead"]:
kStr += ("" if m==0 else "_BLK")
kStr += self.endLine
# initial strides
firstStride = 0
if kernel["ProblemType"]["UseInitialStridesCD"]:
lastStrideD = 0
lastStrideC = 0
else:
lastStrideD = 1
lastStrideC = 1
if kernel["ProblemType"]["UseInitialStridesAB"]:
lastStrideA = 0
lastStrideB = 0
else:
lastStrideA = 1
lastStrideB = 1
for i in range(firstStride, lastStrideD):
kStr += "#undef strideD" + self.indexChars[i] + self.endLine
for i in range(firstStride, lastStrideC):
kStr += "#undef strideC" + self.indexChars[i] + self.endLine
for i in range(firstStride, lastStrideA):
kStr += "#undef strideA" \
+ self.indexChars[kernel["ProblemType"]["IndexAssignmentsA"][i]] \
+ self.endLine
for i in range(firstStride, lastStrideB):
kStr += "#undef strideB" \
+ self.indexChars[kernel["ProblemType"]["IndexAssignmentsB"][i]] \
+ self.endLine
# other summation indices
for i in range(0,kernel["ProblemType"]["NumIndicesSummation"]-1):
index = i + kernel["ProblemType"]["NumIndicesC"]
kStr += "#undef globalReadOffsetA%s%s" \
% (self.indexChars[index], self.endLine)
kStr += "#undef globalReadOffsetB%s%s" \
% (self.indexChars[index], self.endLine)
kStr += self.endLine + self.endLine
return kStr
##############################################################################
# Kernel Body Prefix
##############################################################################
def kernelBodyPrefix(self, kernel, tPA, tPB ):
kStr = ""
kernelName = self.getKernelFileBase(kernel)
if not globalParameters["MergeFiles"]:
kStr += "\n"
kStr += "#include \"%s.h\"\n" % kernelName
kStr += "\n"
return kStr
##############################################################################
# Kernel Body Suffix
##############################################################################
def kernelBodySuffix(self, kernel, tPA, tPB ):
kStr = ""
kernelName = self.getKernelName(kernel)
if self.language == "OCL":
kStr += "std::string %s_src_concatenated = \n %s_src_0" \
% (kernelName, kernelName)
for i in range(1, self.stringIdx):
kStr += "\n + %s_src_%u" % (kernelName, i)
kStr += ";\n"
kStr += "const char * const %s_src = %s_src_concatenated.c_str();" \
% (kernelName, kernelName)
kStr += "\n"
return kStr
##############################################################################
# WaitCnt
##############################################################################
def wait(self, kernel, tPA, tPB, globalRead, localWrite, localRead, comment):
return ""
##############################################################################
# SyncThreads
##############################################################################
def syncThreads(self, kernel, comment=""):
return self.indent + self.syncStr + " //" + comment + self.endLine
##############################################################################
# MapAcctoArch
##############################################################################
def MapAcctoArchRegs(self, kernel, option):
return ""
##############################################################################
#
# Beta-Only Kernel
#
##############################################################################
##############################################################################
# Function Signature
##############################################################################
def functionSignatureBetaOnly(self, kernel):
kernelName = self.getKernelNameBetaOnly(kernel)
# determine chars for fast access
self.indexChars = []
for i in range(0, len(globalParameters["IndexChars"])):
self.indexChars.append(globalParameters["IndexChars"][i])
self.indexChars[kernel["ProblemType"]["Index0"]] \
= "0" + self.indexChars[kernel["ProblemType"]["Index0"]]
self.indexChars[kernel["ProblemType"]["Index1"]] \
= "1" + self.indexChars[kernel["ProblemType"]["Index1"]]
self.tileChar0 = self.indexChars[kernel["ProblemType"]["Index0"]]
self.tileChar1 = self.indexChars[kernel["ProblemType"]["Index1"]]
kStr = ""
# kernel name
if self.language == "OCL":
kStr += "__attribute__((reqd_work_group_size(8,8,1)))"
kStr += self.endLine
kStr += "__kernel "
else:
kStr += self.endLine
kStr += "extern \"C\"\n"
kStr += "__global__ "
kStr += "void %s" % ( kernelName )
kStr += "(" + self.endLine
# pointers
globalStr = "__global "
if self.language == "HIP":
#kStr += " hipLaunchParm lp," + self.endLine
globalStr = ""
restrictStr = "restrict"
if self.language == "HIP":
restrictStr = "__restrict__"
ptrStr = kernel["ProblemType"]["DestDataType"].toDevice(self.language)
kStr += " " + globalStr + ptrStr \
+ " *D,"
kStr += self.endLine
kStr += " " + globalStr + ptrStr \
+ " const * " + restrictStr + " C,"
kStr += self.endLine
# strides
firstStrideCD = 1
if kernel["ProblemType"]["UseInitialStridesCD"]:
firstStrideCD = 0
lastStrideC = kernel["ProblemType"]["NumIndicesC"]
for i in range(firstStrideCD, lastStrideC):
kStr += " unsigned int const strideD%s,%s" \
% (self.indexChars[i], self.endLine)
for i in range(firstStrideCD, lastStrideC):
kStr += " unsigned int const strideC%s,%s" \
% (self.indexChars[i], self.endLine)
# sizes
for i in range(0, kernel["ProblemType"]["NumIndicesC"]):
kStr += " unsigned int const size%s" % self.indexChars[i]
if i < kernel["ProblemType"]["NumIndicesC"]-1 \
or kernel["ProblemType"]["UseBeta"]:
kStr += ","
else:
kStr += ")"
kStr += self.endLine
# beta
if kernel["ProblemType"]["UseBeta"]:
kStr += " %s const beta)%s" \
% (kernel["ProblemType"]["ComputeDataType"].toDevice(self.language), \
self.endLine )
return kStr
##############################################################################
##############################################################################
def extractIndices(self, extractFrom, varPrefix, indices):
kStr = ""
for (i,index) in enumerate(indices):
kStr += " unsigned int " + varPrefix + self.indexChars[index] \
+ " = ( " + extractFrom
for j in reversed(list(range(i+1, len(indices)))):
index2 = indices[j]
kStr += " / size" + self.indexChars[index2]
kStr += ")"
#if i!=0:
if len(indices) > 1:
kStr += " % size" + self.indexChars[index]
kStr += ";" + self.endLine
return kStr
##############################################################################
# Kernel Body Beta-Only
##############################################################################
def kernelBodyBetaOnly(self, kernel):
kStr = ""
kStr += "{%s" % self.endLine
problemType = kernel["ProblemType"]
########################################
# defined initial strides
firstStride = 0
if problemType["UseInitialStridesCD"]:
# no strides #defined
lastStrideC = 0
assert 0 # need to fix beta-clear routine to pass initial stride parms
else:
# #define initial stride
kStr += "/* hard-coded initial strides */%s" \
% self.endLine
lastStrideC = 1
for i in range(firstStride, lastStrideC):
kStr += "#define strideD" + self.indexChars[i] + " 1" + self.endLine
for i in range(firstStride, lastStrideC):
kStr += "#define strideC" + self.indexChars[i] + " 1" + self.endLine
########################################
# GLOBAL_D()
kStr += "#define GLOBAL_D(IDX%s" % self.indexChars[0]
for i in range(1, problemType["NumIndicesC"]):
kStr += ", IDX%s" % self.indexChars[i]
indexChar = self.indexChars[0]
kStr += ") (( (IDX%s)*strideD%s" % (indexChar, indexChar)
for i in range(1, problemType["NumIndicesC"]):
indexChar = self.indexChars[i]
kStr += " + (IDX%s)*strideD%s" % (indexChar, indexChar)
kStr += " ))" + self.endLine
# GLOBAL_C()
kStr += "#define GLOBAL_C(IDX%s" % self.indexChars[0]
for i in range(1, problemType["NumIndicesC"]):
kStr += ", IDX%s" % self.indexChars[i]
indexChar = self.indexChars[0]
kStr += ") (( (IDX%s)*strideC%s" % (indexChar, indexChar)
for i in range(1, problemType["NumIndicesC"]):
indexChar = self.indexChars[i]
kStr += " + (IDX%s)*strideC%s" % (indexChar, indexChar)
kStr += " ))" + self.endLine
########################################
# wg d0, d1
#kStr += " unsigned int wg" + self.tileChar0 + " = " \
# + self.getGroupIdStr + "(0);" + self.endLine
#kStr += " unsigned int wg" + self.tileChar1 + " = " \
# + self.getGroupIdStr + "(1);" + self.endLine
########################################
# wg other : batch dims
freeIdxC0 = [idx for idx in range(problemType["NumIndicesC"]) \
if idx in problemType["IndexAssignmentsA"] and idx in problemType["IndicesFree"]]
freeIdxC1 = [idx for idx in range(problemType["NumIndicesC"]) \
if idx in problemType["IndexAssignmentsB"] and idx in problemType["IndicesFree"]]
batchSizes = "*".join(["size%s"%self.indexChars[idx] for idx in problemType["IndicesBatch"]])
freeSizesC0 = "*".join(["size%s"%self.indexChars[idx] for idx in freeIdxC0])
freeSizesC1 = "*".join(["size%s"%self.indexChars[idx] for idx in freeIdxC1])
t = []
if freeSizesC0:
t.append("(%s(0) >= %s)" % (self.getGlobalIdStr, freeSizesC0))
if freeSizesC1:
t.append("(%s(1) >= %s)" % (self.getGlobalIdStr, freeSizesC1))
if batchSizes:
t.append("(%s(2) >= %s)" % (self.getGlobalIdStr, batchSizes))
kStr += " if ("
kStr += "\n || ".join(t) + ")\n"
kStr += " return;\n"
kStr += self.extractIndices(self.getGroupIdStr+"(2)", "wg", problemType["IndicesBatch"])
kStr += self.extractIndices(self.getGlobalIdStr+"(0)", "globalC", freeIdxC0)
kStr += self.extractIndices(self.getGlobalIdStr+"(1)", "globalC", freeIdxC1)
########################################
# D index
kStr += " %s idxD = GLOBAL_D( (%s)" % (self.uint64Str, self.uint64Str)
kStr += ', '.join(["wg%s" % self.indexChars[i] if i in problemType["IndicesBatch"] else "globalC%s" % self.indexChars[i] \
for i in range(problemType["NumIndicesC"])])
kStr += ");%s" % (self.endLine)
# C index
kStr += " %s idxC = GLOBAL_C( (%s)" % (self.uint64Str, self.uint64Str)
kStr += ', '.join(["wg%s" % self.indexChars[i] if i in problemType["IndicesBatch"] else "globalC%s" % self.indexChars[i] \
for i in range(problemType["NumIndicesC"])])
kStr += ");%s" % (self.endLine)
#kStr += "printf(\\\"%%09llu\\\\n\\\", idx);%s" % (self.endLine)
########################################
# zero
kStr += "#define SCALAR_ZERO %s%s" % ( problemType[\
"DataType"].zeroString(self.language, 1), \
self.endLine )
########################################
# zero
if problemType["UseBeta"]:
if problemType["DataType"].isComplex():
kStr += " if((beta.s0 == 0) && (beta.s1 == 0)) {%s" % self.endLine
else:
kStr += " if(beta == SCALAR_ZERO) {%s" % self.endLine
kStr += " D[idxD] = SCALAR_ZERO;%s" % self.endLine
kStr += " } else {%s" % self.endLine
kStr += " D[idxD] = C[idxC]*beta;%s" % self.endLine
kStr += " }%s" % self.endLine
else:
kStr += " D[idxD] = SCALAR_ZERO;%s" % (self.endLine)
########################################
# end
kStr += "}%s" % self.endLine
kStr += "#undef GLOBAL_D%s" % (self.endLine)
kStr += "#undef GLOBAL_C%s" % (self.endLine)
kStr += "#undef SCALAR_ZERO%s" % ( self.endLine)
kStr += "#undef SCALAR_OOB_DATA%s" % (self.endLine )
return kStr
| 47.118649 | 344 | 0.509403 |
2b0636d7d62aa88ec4ca851c3f566379fa1975f7 | 140,428 | py | Python | synapseclient/client.py | lingyunsong/synapse_scripts | a6fd694abbd970b2b78ef89130ef2e88b10a102e | [
"Apache-2.0"
] | null | null | null | synapseclient/client.py | lingyunsong/synapse_scripts | a6fd694abbd970b2b78ef89130ef2e88b10a102e | [
"Apache-2.0"
] | null | null | null | synapseclient/client.py | lingyunsong/synapse_scripts | a6fd694abbd970b2b78ef89130ef2e88b10a102e | [
"Apache-2.0"
] | null | null | null | """
**************
Synapse Client
**************
The `Synapse` object encapsulates a connection to the Synapse service and is
used for building projects, uploading and retrieving data, and recording
provenance of data analysis.
~~~~~
Login
~~~~~
.. automethod:: synapseclient.client.login
~~~~~~~
Synapse
~~~~~~~
.. autoclass:: synapseclient.Synapse
:members:
~~~~~~~~~~~~~~~~
More information
~~~~~~~~~~~~~~~~
See also the `Synapse API documentation <http://rest.synapse.org>`_.
"""
import ConfigParser
import collections
import os, sys, stat, re, json, time
import os.path
import base64, hashlib, hmac
import urllib, urlparse, requests, webbrowser
import zipfile
import mimetypes
import tempfile
import warnings
import getpass
import synapseclient
import synapseclient.utils as utils
import synapseclient.cache
import synapseclient.exceptions as exceptions
from synapseclient.exceptions import *
from synapseclient.version_check import version_check
from synapseclient.utils import id_of, get_properties, KB, MB, _is_json, _extract_synapse_id_from_query, nchunks, get_chunk, find_data_file_handle
from synapseclient.annotations import from_synapse_annotations, to_synapse_annotations
from synapseclient.annotations import to_submission_status_annotations, from_submission_status_annotations
from synapseclient.activity import Activity
from synapseclient.entity import Entity, File, Project, Folder, split_entity_namespaces, is_versionable, is_container
from synapseclient.table import Schema, Column, RowSet, Row, TableQueryResult, CsvFileTable
from synapseclient.dict_object import DictObject
from synapseclient.evaluation import Evaluation, Submission, SubmissionStatus
from synapseclient.wiki import Wiki, WikiAttachment
from synapseclient.retry import _with_retry
PRODUCTION_ENDPOINTS = {'repoEndpoint':'https://repo-prod.prod.sagebase.org/repo/v1',
'authEndpoint':'https://auth-prod.prod.sagebase.org/auth/v1',
'fileHandleEndpoint':'https://file-prod.prod.sagebase.org/file/v1',
'portalEndpoint':'https://www.synapse.org/'}
STAGING_ENDPOINTS = {'repoEndpoint':'https://repo-staging.prod.sagebase.org/repo/v1',
'authEndpoint':'https://auth-staging.prod.sagebase.org/auth/v1',
'fileHandleEndpoint':'https://file-staging.prod.sagebase.org/file/v1',
'portalEndpoint':'https://staging.synapse.org/'}
CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.synapseConfig')
SESSION_FILENAME = '.session'
FILE_BUFFER_SIZE = 4*KB
CHUNK_SIZE = 5*MB
QUERY_LIMIT = 1000
CHUNK_UPLOAD_POLL_INTERVAL = 1 # second
ROOT_ENTITY = 'syn4489'
PUBLIC = 273949 #PrincipalId of public "user"
AUTHENTICATED_USERS = 273948
DEBUG_DEFAULT = False
# Defines the standard retry policy applied to the rest methods
## The retry period needs to span a minute because sending
## messages is limited to 10 per 60 seconds.
STANDARD_RETRY_PARAMS = {"retry_status_codes": [502,503,504],
"retry_errors" : ["proxy error", "slow down", "timeout", "timed out",
"connection reset by peer", "unknown ssl protocol error",
"couldn't connect to host", "slowdown", "try again"],
"retry_exceptions" : ["ConnectionError", "Timeout", "timeout"],
"retries" : 8,
"wait" : 1,
"back_off" : 2}
# Add additional mimetypes
mimetypes.add_type('text/x-r', '.R', strict=False)
mimetypes.add_type('text/x-r', '.r', strict=False)
mimetypes.add_type('text/tab-separated-values', '.maf', strict=False)
mimetypes.add_type('text/tab-separated-values', '.bed5', strict=False)
mimetypes.add_type('text/tab-separated-values', '.bed', strict=False)
mimetypes.add_type('text/tab-separated-values', '.vcf', strict=False)
mimetypes.add_type('text/tab-separated-values', '.sam', strict=False)
mimetypes.add_type('text/yaml', '.yaml', strict=False)
mimetypes.add_type('text/x-markdown', '.md', strict=False)
mimetypes.add_type('text/x-markdown', '.markdown', strict=False)
def login(*args, **kwargs):
"""
Convience method to create a Synapse object and login.
See :py:func:`synapseclient.Synapse.login` for arguments and usage.
Example::
import synapseclient
syn = synapseclient.login()
"""
syn = Synapse()
syn.login(*args, **kwargs)
return syn
def _test_import_sftp():
"""
Check if pysftp is installed and give instructions if not.
"""
try:
import pysftp
except ImportError as e1:
sys.stderr.write(
("\n\nLibraries required for SFTP are not installed!\n"
"The Synapse client uses pysftp in order to access SFTP storage "
"locations. This library in turn depends on pycrypto.\n"
"To install these libraries on Unix variants including OS X, make "
"sure the python devel libraries are installed, then:\n"
" (sudo) pip install pysftp\n\n"
"For Windows systems without a C/C++ compiler, install the appropriate "
"binary distribution of pycrypto from:\n"
" http://www.voidspace.org.uk/python/modules.shtml#pycrypto\n\n"
"For more information, see: http://python-docs.synapse.org/sftp.html"
"\n\n\n"))
raise
class Synapse:
"""
Constructs a Python client object for the Synapse repository service
:param repoEndpoint: Location of Synapse repository
:param authEndpoint: Location of authentication service
:param fileHandleEndpoint: Location of file service
:param portalEndpoint: Location of the website
:param serviceTimeoutSeconds: Wait time before timeout (currently unused)
:param debug: Print debugging messages if True
:param skip_checks: Skip version and endpoint checks
:param configPath: Path to config File with setting for Synapse
defaults to ~/.synapseConfig
Typically, no parameters are needed::
import synapseclient
syn = synapseclient.Synapse()
See:
- :py:func:`synapseclient.Synapse.login`
- :py:func:`synapseclient.Synapse.setEndpoints`
"""
def __init__(self, repoEndpoint=None, authEndpoint=None, fileHandleEndpoint=None, portalEndpoint=None,
debug=DEBUG_DEFAULT, skip_checks=False, configPath=CONFIG_FILE):
# Check for a config file
self.configPath=configPath
if os.path.isfile(configPath):
config = self.getConfigFile(configPath)
if config.has_option('cache', 'location'):
self.cache = synapseclient.cache.Cache(cache_root_dir=config.get('cache', 'location'))
else:
self.cache = synapseclient.cache.Cache()
if config.has_section('debug'):
debug = True
elif debug:
# Alert the user if no config is found
sys.stderr.write("Could not find a config file (%s). Using defaults." % os.path.abspath(configPath))
self.setEndpoints(repoEndpoint, authEndpoint, fileHandleEndpoint, portalEndpoint, skip_checks)
self.default_headers = {'content-type': 'application/json; charset=UTF-8', 'Accept': 'application/json; charset=UTF-8'}
self.username = None
self.apiKey = None
self.debug = debug
self.skip_checks = skip_checks
self.table_query_sleep = 2
self.table_query_backoff = 1.1
self.table_query_max_sleep = 20
self.table_query_timeout = 300
def getConfigFile(self, configPath):
"""Returns a ConfigParser populated with properties from the user's configuration file."""
try:
config = ConfigParser.ConfigParser()
config.read(configPath) # Does not fail if the file does not exist
return config
except ConfigParser.Error:
sys.stderr.write('Error parsing Synapse config file: %s' % configPath)
raise
def setEndpoints(self, repoEndpoint=None, authEndpoint=None, fileHandleEndpoint=None, portalEndpoint=None, skip_checks=False):
"""
Sets the locations for each of the Synapse services (mostly useful for testing).
:param repoEndpoint: Location of synapse repository
:param authEndpoint: Location of authentication service
:param fileHandleEndpoint: Location of file service
:param portalEndpoint: Location of the website
:param skip_checks: Skip version and endpoint checks
To switch between staging and production endpoints::
syn.setEndpoints(**synapseclient.client.STAGING_ENDPOINTS)
syn.setEndpoints(**synapseclient.client.PRODUCTION_ENDPOINTS)
"""
endpoints = {'repoEndpoint' : repoEndpoint,
'authEndpoint' : authEndpoint,
'fileHandleEndpoint' : fileHandleEndpoint,
'portalEndpoint' : portalEndpoint}
# For unspecified endpoints, first look in the config file
config = self.getConfigFile(self.configPath)
for point in endpoints.keys():
if endpoints[point] is None and config.has_option('endpoints', point):
endpoints[point] = config.get('endpoints', point)
# Endpoints default to production
for point in endpoints.keys():
if endpoints[point] is None:
endpoints[point] = PRODUCTION_ENDPOINTS[point]
# Update endpoints if we get redirected
if not skip_checks:
response = requests.get(endpoints[point], allow_redirects=False, headers=synapseclient.USER_AGENT)
if response.status_code == 301:
endpoints[point] = response.headers['location']
self.repoEndpoint = endpoints['repoEndpoint']
self.authEndpoint = endpoints['authEndpoint']
self.fileHandleEndpoint = endpoints['fileHandleEndpoint']
self.portalEndpoint = endpoints['portalEndpoint']
def login(self, email=None, password=None, apiKey=None, sessionToken=None, rememberMe=False, silent=False):
"""
Authenticates the user using the given credentials (in order of preference):
- supplied email and password
- supplied email and API key (base 64 encoded)
- supplied session token
- supplied email and cached API key
- most recent cached email and API key
- email in the configuration file and cached API key
- email and API key in the configuration file
- email and password in the configuraton file
- session token in the configuration file
:param apiKey: Base64 encoded
:param rememberMe: Whether the authentication information should be cached locally
for usage across sessions and clients.
:param silent: Defaults to False. Suppresses the "Welcome ...!" message.
Example::
syn.login('me@somewhere.com', 'secret-password', rememberMe=True)
#> Welcome, Me!
After logging in with the *rememberMe* flag set, an API key will be cached and
used to authenticate for future logins::
syn.login()
#> Welcome, Me!
"""
# Note: the order of the logic below reflects the ordering in the docstring above.
# Check version before logging in
if not self.skip_checks: version_check(synapseclient.__version__)
# Make sure to invalidate the existing session
self.logout()
if email is not None and password is not None:
self.username = email
sessionToken = self._getSessionToken(email=self.username, password=password)
self.apiKey = self._getAPIKey(sessionToken)
elif email is not None and apiKey is not None:
self.username = email
self.apiKey = base64.b64decode(apiKey)
elif sessionToken is not None:
try:
self._getSessionToken(sessionToken=sessionToken)
self.username = self.getUserProfile(sessionToken=sessionToken)['userName']
self.apiKey = self._getAPIKey(sessionToken)
except SynapseAuthenticationError:
# Session token is invalid
pass
# If supplied arguments are not enough
# Try fetching the information from the API key cache
if self.apiKey is None:
cachedSessions = self._readSessionCache()
if email is None and "<mostRecent>" in cachedSessions:
email = cachedSessions["<mostRecent>"]
if email is not None and email in cachedSessions:
self.username = email
self.apiKey = base64.b64decode(cachedSessions[email])
# Resort to reading the configuration file
if self.apiKey is None:
# Resort to checking the config file
config = ConfigParser.ConfigParser()
try:
config.read(self.configPath)
except ConfigParser.Error:
sys.stderr.write('Error parsing Synapse config file: %s' % self.configPath)
raise
if config.has_option('authentication', 'username'):
self.username = config.has_option('authentication', 'username')
if self.username in cachedSessions:
self.apiKey = base64.b64decode(cachedSessions[self.username])
# Just use the configuration file
if self.apiKey is None:
if config.has_option('authentication', 'username') and config.has_option('authentication', 'apikey'):
self.username = config.get('authentication', 'username')
self.apiKey = base64.b64decode(config.get('authentication', 'apikey'))
elif config.has_option('authentication', 'username') and config.has_option('authentication', 'password'):
self.username = config.get('authentication', 'username')
password = config.get('authentication', 'password')
token = self._getSessionToken(email=self.username, password=password)
self.apiKey = self._getAPIKey(token)
elif config.has_option('authentication', 'sessiontoken'):
sessionToken = config.get('authentication', 'sessiontoken')
try:
self._getSessionToken(sessionToken=sessionToken)
self.username = self.getUserProfile(sessionToken=sessionToken)['userName']
self.apiKey = self._getAPIKey(sessionToken)
except SynapseAuthenticationError:
raise SynapseAuthenticationError("No credentials provided. Note: the session token within your configuration file has expired.")
# Final check on login success
if self.username is not None and self.apiKey is None:
raise SynapseAuthenticationError("No credentials provided.")
# Save the API key in the cache
if rememberMe:
cachedSessions = self._readSessionCache()
cachedSessions[self.username] = base64.b64encode(self.apiKey)
# Note: make sure this key cannot conflict with usernames by using invalid username characters
cachedSessions["<mostRecent>"] = self.username
self._writeSessionCache(cachedSessions)
if not silent:
profile = self.getUserProfile(refresh=True)
sys.stdout.write(("Welcome, %s!\n" % (profile['displayName'] if 'displayName' in profile else self.username)).encode('utf-8'))
def _getSessionToken(self, email=None, password=None, sessionToken=None):
"""Returns a validated session token."""
if email is not None and password is not None:
# Login normally
try:
req = {'email' : email, 'password' : password}
session = self.restPOST('/session', body=json.dumps(req), endpoint=self.authEndpoint, headers=self.default_headers)
return session['sessionToken']
except SynapseHTTPError as err:
if err.response.status_code == 403 or err.response.status_code == 404:
raise SynapseAuthenticationError("Invalid username or password.")
raise
elif sessionToken is not None:
# Validate the session token
try:
token = {'sessionToken' : sessionToken}
response = self.restPUT('/session', body=json.dumps(token), endpoint=self.authEndpoint, headers=self.default_headers)
# Success!
return sessionToken
except SynapseHTTPError as err:
if err.response.status_code == 401:
raise SynapseAuthenticationError("Supplied session token (%s) is invalid." % sessionToken)
raise
else:
raise SynapseAuthenticationError("No credentials provided.")
def _getAPIKey(self, sessionToken):
"""Uses a session token to fetch an API key."""
headers = {'sessionToken' : sessionToken, 'Accept': 'application/json'}
secret = self.restGET('/secretKey', endpoint=self.authEndpoint, headers=headers)
return base64.b64decode(secret['secretKey'])
def _readSessionCache(self):
"""Returns the JSON contents of CACHE_DIR/SESSION_FILENAME."""
sessionFile = os.path.join(self.cache.cache_root_dir, SESSION_FILENAME)
if os.path.isfile(sessionFile):
try:
file = open(sessionFile, 'r')
return json.load(file)
except: pass
return {}
def _writeSessionCache(self, data):
"""Dumps the JSON data into CACHE_DIR/SESSION_FILENAME."""
sessionFile = os.path.join(self.cache.cache_root_dir, SESSION_FILENAME)
with open(sessionFile, 'w') as file:
json.dump(data, file)
file.write('\n') # For compatibility with R's JSON parser
def _loggedIn(self):
"""Test whether the user is logged in to Synapse."""
if self.apiKey is None or self.username is None:
return False
try:
user = self.restGET('/userProfile')
if 'displayName' in user:
if user['displayName'] == 'Anonymous':
# No session token, not logged in
return False
return user['displayName']
except SynapseHTTPError as err:
if err.response.status_code == 401:
return False
raise
def logout(self, forgetMe=False):
"""
Removes authentication information from the Synapse client.
:param forgetMe: Set as True to clear any local storage of authentication information.
See the flag "rememberMe" in :py:func:`synapseclient.Synapse.login`.
"""
# Since this client does not store the session token,
# it cannot REST DELETE /session
# Delete the user's API key from the cache
if forgetMe:
cachedSessions = self._readSessionCache()
if self.username in cachedSessions:
del cachedSessions[self.username]
self._writeSessionCache(cachedSessions)
# Remove the authentication information from memory
self.username = None
self.apiKey = None
def invalidateAPIKey(self):
"""Invalidates authentication across all clients."""
# Logout globally
if self._loggedIn():
self.restDELETE('/secretKey', endpoint=self.authEndpoint)
@utils.memoize
def getUserProfile(self, id=None, sessionToken=None, refresh=False):
"""
Get the details about a Synapse user.
Retrieves information on the current user if 'id' is omitted.
:param id: The 'userId' (aka 'ownerId') of a user or the userName
:param sessionToken: The session token to use to find the user profile
:param refresh: If set to True will always fetch the data from Synape otherwise
will used cached information
:returns: JSON-object
Example::
my_profile = syn.getUserProfile()
freds_profile = syn.getUserProfile('fredcommo')
"""
try:
## if id is unset or a userID, this will succeed
id = '' if id is None else int(id)
except ValueError:
principals = self._findPrincipals(id)
for principal in principals:
if principal.get('userName', None).lower()==id.lower():
id = principal['ownerId']
break
else: # no break
raise ValueError('Can\'t find user "%s": ' % id)
uri = '/userProfile/%s' % id
return DictObject(**self.restGET(uri, headers={'sessionToken' : sessionToken} if sessionToken else None))
def _findPrincipals(self, query_string):
"""
Find users or groups by name or email.
:returns: A list of userGroupHeader objects with fields displayName, email, firstName, lastName, isIndividual, ownerId
Example::
syn._findPrincipals('test')
[{u'displayName': u'Synapse Test',
u'email': u'syn...t@sagebase.org',
u'firstName': u'Synapse',
u'isIndividual': True,
u'lastName': u'Test',
u'ownerId': u'1560002'},
{u'displayName': ... }]
"""
uri = '/userGroupHeaders?prefix=%s' % query_string
return [DictObject(**result) for result in self._GET_paginated(uri)]
def onweb(self, entity, subpageId=None):
"""
Opens up a browser window to the entity page or wiki-subpage.
:param entity: Either an Entity or a Synapse ID
:param subpageId: (Optional) ID of one of the wiki's sub-pages
"""
if subpageId is None:
webbrowser.open("%s#!Synapse:%s" % (self.portalEndpoint, id_of(entity)))
else:
webbrowser.open("%s#!Wiki:%s/ENTITY/%s" % (self.portalEndpoint, id_of(entity), subpageId))
def printEntity(self, entity):
"""Pretty prints an Entity."""
if utils.is_synapse_id(entity):
entity = self._getEntity(entity)
try:
print json.dumps(entity, sort_keys=True, indent=2)
except TypeError:
print str(entity)
############################################################
## Get / Store methods ##
############################################################
def get(self, entity, **kwargs):
"""
Gets a Synapse entity from the repository service.
:param entity: A Synapse ID, a Synapse Entity object,
a plain dictionary in which 'id' maps to a Synapse ID or
a local file that is stored in Synapse (found by hash of file)
:param version: The specific version to get.
Defaults to the most recent version.
:param downloadFile: Whether associated files(s) should be downloaded.
Defaults to True
:param downloadLocation: Directory where to download the Synapse File Entity.
Defaults to the local cache.
:param ifcollision: Determines how to handle file collisions.
May be "overwrite.local", "keep.local", or "keep.both".
Defaults to "keep.both".
:param limitSearch: a Synanpse ID used to limit the search in Synapse if entity is
specified as a local file. That is, if the file is stored in multiple
locations in Synapse only the ones in the specified folder/project will be
returned.
:returns: A new Synapse Entity object of the appropriate type
Example::
## download file into cache
entity = syn.get('syn1906479')
print entity.name
print entity.path
## download file into current working directory
entity = syn.get('syn1906479', downloadLocation='.')
print entity.name
print entity.path
## Determine the provenance of a localy stored file as indicated in Synapse
entity = syn.get('/path/to/file.txt', limitSearch='syn12312')
print syn.getProvenance(entity)
"""
#If entity is a local file determine the corresponding synapse entity
if isinstance(entity, basestring) and os.path.isfile(entity):
bundle = self.__getFromFile(entity, kwargs.get('limitSearch', None))
kwargs['downloadFile'] = False
kwargs['path'] = entity
elif isinstance(entity, basestring) and not utils.is_synapse_id(entity):
raise SynapseFileNotFoundError(('The parameter %s is neither a local file path '
' or a valid entity id' %entity))
else:
version = kwargs.get('version', None)
bundle = self._getEntityBundle(entity, version)
# Check and warn for unmet access requirements
if len(bundle['unmetAccessRequirements']) > 0:
warning_message = ("\nWARNING: This entity has access restrictions. Please visit the "
"web page for this entity (syn.onweb(\"%s\")). Click the downward "
"pointing arrow next to the file's name to review and fulfill its "
"download requirement(s).\n" % id_of(entity))
if kwargs.get('downloadFile', True):
raise SynapseUnmetAccessRestrictions(warning_message)
warnings.warn(warning_message)
return self._getWithEntityBundle(entityBundle=bundle, entity=entity, **kwargs)
def __getFromFile(self, filepath, limitSearch=None):
"""
Gets a Synapse entityBundle based on the md5 of a local file
See :py:func:`synapseclient.Synapse.get`.
:param filepath: path to local file
:param limitSearch: Limits the places in Synapse where the file is searched for.
"""
results = self.restGET('/entity/md5/%s' %utils.md5_for_file(filepath).hexdigest())['results']
if limitSearch is not None:
#Go through and find the path of every entity found
paths = [self.restGET('/entity/%s/path' %ent['id']) for ent in results]
#Filter out all entities whose path does not contain limitSearch
results = [ent for ent, path in zip(results, paths) if
utils.is_in_path(limitSearch, path)]
if len(results)==0: #None found
raise SynapseFileNotFoundError('File %s not found in Synapse' % (filepath,))
elif len(results)>1:
sys.stderr.write('\nWARNING: The file %s is associated with many entities in Synapse. '
'You can limit to a specific project or folder by setting the '
'limitSearch to a synapse Id. Will use the first one returned: '
'%s version %i\n' %(filepath, results[0]['id'], results[0]['versionNumber']))
entity = results[0]
bundle = self._getEntityBundle(entity)
self.cache.add(file_handle_id=bundle['entity']['dataFileHandleId'], path=filepath)
return bundle
def _getWithEntityBundle(self, entityBundle, entity=None, **kwargs):
"""
Creates a :py:mod:`synapseclient.Entity` from an entity bundle returned by Synapse.
An existing Entity can be supplied in case we want to refresh a stale Entity.
:param entityBundle: Uses the given dictionary as the meta information of the Entity to get
:param entity: Optional, entity whose local state will be copied into the returned entity
:param submission: Optional, access associated files through a submission rather than
through an entity.
See :py:func:`synapseclient.Synapse.get`.
See :py:func:`synapseclient.Synapse._getEntityBundle`.
See :py:mod:`synapseclient.Entity`.
"""
# Note: This version overrides the version of 'entity' (if the object is Mappable)
version = kwargs.get('version', None)
downloadFile = kwargs.get('downloadFile', True)
downloadLocation = kwargs.get('downloadLocation', None)
ifcollision = kwargs.get('ifcollision', 'keep.both')
submission = kwargs.get('submission', None)
# Make a fresh copy of the Entity
local_state = entity.local_state() if entity and isinstance(entity, Entity) else {}
if 'path' in kwargs:
local_state['path'] = kwargs['path']
properties = entityBundle['entity']
annotations = from_synapse_annotations(entityBundle['annotations'])
entity = Entity.create(properties, annotations, local_state)
if isinstance(entity, File):
fileName = entity['name']
# Fill in information about the file, even if we don't download it
# Note: fileHandles will be an empty list if there are unmet access requirements
for handle in entityBundle['fileHandles']:
if handle['id'] == entityBundle['entity']['dataFileHandleId']:
entity.md5 = handle.get('contentMd5', '')
entity.fileSize = handle.get('contentSize', None)
entity.contentType = handle.get('contentType', None)
fileName = handle['fileName']
if handle['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle':
entity['externalURL'] = handle['externalURL']
#Determine if storage location for this entity matches the url of the
#project to determine if I should synapseStore it in the future.
#This can fail with a 404 for submissions who's original entity is deleted
try:
storageLocation = self.__getStorageLocation(entity)
entity['synapseStore'] = utils.is_same_base_url(storageLocation.get('url', 'S3'), entity['externalURL'])
except SynapseHTTPError:
warnings.warn("Can't get storage location for entity %s" % entity['id'])
if not downloadFile:
return entity
# Make sure the download location is a fully resolved directory
if downloadLocation is not None:
downloadLocation = os.path.expanduser(downloadLocation)
if os.path.isfile(downloadLocation):
raise ValueError("Parameter 'downloadLocation' should be a directory, not a file.")
# Determine if the file should be downloaded
# downloadPath = None if downloadLocation is None else os.path.join(downloadLocation, fileName)
# if downloadFile:
# downloadFile = cache.local_file_has_changed(entityBundle, True, downloadPath)
# # Determine where the file should be downloaded to
# if downloadFile:
# _, localPath, _ = cache.determine_local_file_location(entityBundle)
cached_file_path = self.cache.get(file_handle_id=entityBundle['entity']['dataFileHandleId'], path=downloadLocation)
# if we found a cached copy, return it
# if downloadFile
# download it
# add it to the cache
if cached_file_path is not None:
entity.path = cached_file_path
entity.files = [None if cached_file_path is None else os.path.basename(cached_file_path)]
entity.cacheDir = None if cached_file_path is None else os.path.dirname(cached_file_path)
elif downloadFile:
# By default, download to the local cache
if downloadLocation is None:
downloadLocation = self.cache.get_cache_dir(entityBundle['entity']['dataFileHandleId'])
downloadPath = os.path.join(downloadLocation, fileName)
# If the file already exists but has been modified since caching
if os.path.exists(downloadPath):
if ifcollision == "overwrite.local":
pass
elif ifcollision == "keep.local":
downloadFile = False
elif ifcollision == "keep.both":
downloadPath = utils.unique_filename(downloadPath)
else:
raise ValueError('Invalid parameter: "%s" is not a valid value '
'for "ifcollision"' % ifcollision)
entity.update(self._downloadFileEntity(entity, downloadPath, submission))
self.cache.add(file_handle_id=entityBundle['entity']['dataFileHandleId'], path=downloadPath)
if 'path' in entity and (entity['path'] is None or not os.path.exists(entity['path'])):
entity['synapseStore'] = False
return entity
def store(self, obj, **kwargs):
"""
Creates a new Entity or updates an existing Entity,
uploading any files in the process.
:param obj: A Synapse Entity, Evaluation, or Wiki
:param used: The Entity, Synapse ID, or URL
used to create the object
:param executed: The Entity, Synapse ID, or URL
representing code executed to create the object
:param activity: Activity object specifying the user's provenance
:param activityName: Activity name to be used in conjunction with *used* and *executed*.
:param activityDescription: Activity description to be used in conjunction with *used* and *executed*.
:param createOrUpdate: Indicates whether the method should automatically perform an update if the 'obj'
conflicts with an existing Synapse object. Defaults to True.
:param forceVersion: Indicates whether the method should increment the version of the object even if
nothing has changed. Defaults to True.
:param versionLabel: Arbitrary string used to label the version.
:param isRestricted: If set to true, an email will be sent to the Synapse access control team
to start the process of adding terms-of-use
or review board approval for this entity.
You will be contacted with regards to the specific data being restricted
and the requirements of access.
:returns: A Synapse Entity, Evaluation, or Wiki
Example::
from synapseclient import Project
project = Project('My uniquely named project')
project = syn.store(project)
Adding files with `provenance <Activity.html>`_::
from synapseclient import File, Activity
## A synapse entity *syn1906480* contains data
## entity *syn1917825* contains code
activity = Activity(
'Fancy Processing',
description='No seriously, really fancy processing',
used=['syn1906480', 'http://data_r_us.com/fancy/data.txt'],
executed='syn1917825')
test_entity = File('/path/to/data/file.xyz', description='Fancy new data', parent=project)
test_entity = syn.store(test_entity, activity=activity)
"""
createOrUpdate = kwargs.get('createOrUpdate', True)
forceVersion = kwargs.get('forceVersion', True)
versionLabel = kwargs.get('versionLabel', None)
isRestricted = kwargs.get('isRestricted', False)
## _before_store hook
## give objects a chance to do something before being stored
if hasattr(obj, '_before_synapse_store'):
obj._before_synapse_store(self)
## _synapse_store hook
## for objects that know how to store themselves
if hasattr(obj, '_synapse_store'):
return obj._synapse_store(self)
# Handle all non-Entity objects
if not (isinstance(obj, Entity) or type(obj) == dict):
if isinstance(obj, Wiki):
return self._storeWiki(obj)
if 'id' in obj: # If ID is present, update
obj.update(self.restPUT(obj.putURI(), obj.json()))
return obj
try: # If no ID is present, attempt to POST the object
obj.update(self.restPOST(obj.postURI(), obj.json()))
return obj
except SynapseHTTPError as err:
# If already present and we want to update attempt to get the object content
if createOrUpdate and err.response.status_code == 409:
newObj = self.restGET(obj.getByNameURI(obj.name))
newObj.update(obj)
obj = obj.__class__(**newObj)
obj.update(self.restPUT(obj.putURI(), obj.json()))
return obj
raise
# If the input object is an Entity or a dictionary
entity = obj
properties, annotations, local_state = split_entity_namespaces(entity)
bundle = None
# Anything with a path is treated as a cache-able item
if entity.get('path', False):
if 'concreteType' not in properties:
properties['concreteType'] = File._synapse_entity_type
# Make sure the path is fully resolved
entity['path'] = os.path.expanduser(entity['path'])
# Check if the File already exists in Synapse by fetching metadata on it
bundle = self._getEntityBundle(entity)
if bundle:
# Check if the file should be uploaded
fileHandle = find_data_file_handle(bundle)
if fileHandle and fileHandle['concreteType'] == "org.sagebionetworks.repo.model.file.ExternalFileHandle":
needs_upload = False
else:
cached_path = self.cache.get(bundle['entity']['dataFileHandleId'], entity['path']) if bundle else None
needs_upload = cached_path is None
else:
needs_upload = True
if needs_upload:
fileLocation, local_state = self.__uploadExternallyStoringProjects(entity, local_state)
fileHandle = self._uploadToFileHandleService(fileLocation, \
synapseStore=entity.get('synapseStore', True),
mimetype=local_state.get('contentType', None))
properties['dataFileHandleId'] = fileHandle['id']
## Add file to cache, unless it's an external URL
if fileHandle['concreteType'] != "org.sagebionetworks.repo.model.file.ExternalFileHandle":
self.cache.add(fileHandle['id'], path=entity['path'])
elif 'dataFileHandleId' not in properties:
# Handle the case where the Entity lacks an ID
# But becomes an update() due to conflict
properties['dataFileHandleId'] = bundle['entity']['dataFileHandleId']
# Create or update Entity in Synapse
if 'id' in properties:
properties = self._updateEntity(properties, forceVersion, versionLabel)
else:
try:
properties = self._createEntity(properties)
except SynapseHTTPError as ex:
if createOrUpdate and ex.response.status_code == 409:
# Get the existing Entity's ID via the name and parent
existing_entity_id = self._findEntityIdByNameAndParent(properties['name'], properties.get('parentId', ROOT_ENTITY))
if existing_entity_id is None: raise
# get existing properties and annotations
if not bundle:
bundle = self._getEntityBundle(existing_entity_id, bitFlags=0x1|0x2)
# Need some fields from the existing entity: id, etag, and version info.
existing_entity = bundle['entity']
# Update the conflicting Entity
existing_entity.update(properties)
properties = self._updateEntity(existing_entity, forceVersion, versionLabel)
# Merge new annotations with existing annotations
existing_annos = from_synapse_annotations(bundle['annotations'])
existing_annos.update(annotations)
annotations = existing_annos
else:
raise
# Deal with access restrictions
if isRestricted:
self._createAccessRequirementIfNone(properties)
# Update annotations
annotations['etag'] = properties['etag']
annotations = self.setAnnotations(properties, annotations)
properties['etag'] = annotations['etag']
# If the parameters 'used' or 'executed' are given, create an Activity object
activity = kwargs.get('activity', None)
used = kwargs.get('used', None)
executed = kwargs.get('executed', None)
if used or executed:
if activity is not None:
raise SynapseProvenanceError('Provenance can be specified as an Activity object or as used/executed item(s), but not both.')
activityName = kwargs.get('activityName', None)
activityDescription = kwargs.get('activityDescription', None)
activity = Activity(name=activityName, description=activityDescription, used=used, executed=executed)
# If we have an Activity, set it as the Entity's provenance record
if activity:
activity = self.setProvenance(properties, activity)
# 'etag' has changed, so get the new Entity
properties = self._getEntity(properties)
# Return the updated Entity object
return Entity.create(properties, annotations, local_state)
def _createAccessRequirementIfNone(self, entity):
"""
Checks to see if the given entity has access requirements.
If not, then one is added
"""
existingRestrictions = self.restGET('/entity/%s/accessRequirement' % id_of(entity))
if existingRestrictions['totalNumberOfResults'] <= 0:
self.restPOST('/entity/%s/lockAccessRequirement' % id_of(entity), body="")
def _getEntityBundle(self, entity, version=None, bitFlags=0x800 | 0x400 | 0x2 | 0x1):
"""
Gets some information about the Entity.
:parameter entity: a Synapse Entity or Synapse ID
:parameter version: the entity's version (defaults to None meaning most recent version)
:parameter bitFlags: Bit flags representing which entity components to return
EntityBundle bit-flags (see the Java class org.sagebionetworks.repo.model.EntityBundle)::
ENTITY = 0x1
ANNOTATIONS = 0x2
PERMISSIONS = 0x4
ENTITY_PATH = 0x8
ENTITY_REFERENCEDBY = 0x10
HAS_CHILDREN = 0x20
ACL = 0x40
ACCESS_REQUIREMENTS = 0x200
UNMET_ACCESS_REQUIREMENTS = 0x400
FILE_HANDLES = 0x800
For example, we might ask for an entity bundle containing file handles, annotations, and properties::
bundle = syn._getEntityBundle('syn111111', bitFlags=0x800|0x2|0x1)
:returns: An EntityBundle with the requested fields or by default Entity header, annotations, unmet access requirements, and file handles
"""
# If 'entity' is given without an ID, try to find it by 'parentId' and 'name'.
# Use case:
# If the user forgets to catch the return value of a syn.store(e)
# this allows them to recover by doing: e = syn.get(e)
if isinstance(entity, collections.Mapping) and 'id' not in entity and 'name' in entity:
entity = self._findEntityIdByNameAndParent(entity['name'], entity.get('parentId',ROOT_ENTITY))
# Avoid an exception from finding an ID from a NoneType
try: id_of(entity)
except ValueError:
return None
if version is not None:
uri = '/entity/%s/version/%d/bundle?mask=%d' %(id_of(entity), version, bitFlags)
else:
uri = '/entity/%s/bundle?mask=%d' %(id_of(entity), bitFlags)
bundle = self.restGET(uri)
return bundle
def delete(self, obj):
"""
Removes an object from Synapse.
:param obj: An existing object stored on Synapse
such as Evaluation, File, Project, WikiPage etc
"""
# Handle all strings as the Entity ID for backward compatibility
if isinstance(obj, basestring):
self.restDELETE(uri='/entity/%s' % id_of(obj))
elif hasattr(obj, "_synapse_delete"):
return obj._synapse_delete(self)
else:
try:
self.restDELETE(obj.deleteURI())
except AttributeError as ex1:
SynapseError("Can't delete a %s" % type(obj))
_user_name_cache = {}
def _get_user_name(self, user_id):
if user_id not in self._user_name_cache:
self._user_name_cache[user_id] = utils.extract_user_name(self.getUserProfile(user_id))
return self._user_name_cache[user_id]
def _list(self, parent, recursive=False, long_format=False, show_modified=False, indent=0, out=sys.stdout):
"""
List child objects of the given parent, recursively if requested.
"""
fields = ['id', 'name', 'nodeType']
if long_format:
fields.extend(['createdByPrincipalId','createdOn','versionNumber'])
if show_modified:
fields.extend(['modifiedByPrincipalId', 'modifiedOn'])
query = 'select ' + ','.join(fields) + \
' from entity where %s=="%s"' % ('id' if indent==0 else 'parentId', id_of(parent))
results = self.chunkedQuery(query)
results_found = False
for result in results:
results_found = True
fmt_fields = {'name' : result['entity.name'],
'id' : result['entity.id'],
'padding' : ' ' * indent,
'slash_or_not' : '/' if is_container(result) else ''}
fmt_string = "{id}"
if long_format:
fmt_fields['createdOn'] = utils.from_unix_epoch_time(result['entity.createdOn']).strftime("%Y-%m-%d %H:%M")
fmt_fields['createdBy'] = self._get_user_name(result['entity.createdByPrincipalId'])[:18]
fmt_fields['version'] = result['entity.versionNumber']
fmt_string += " {version:3} {createdBy:>18} {createdOn}"
if show_modified:
fmt_fields['modifiedOn'] = utils.from_unix_epoch_time(result['entity.modifiedOn']).strftime("%Y-%m-%d %H:%M")
fmt_fields['modifiedBy'] = self._get_user_name(result['entity.modifiedByPrincipalId'])[:18]
fmt_string += " {modifiedBy:>18} {modifiedOn}"
fmt_string += " {padding}{name}{slash_or_not}\n"
out.write(fmt_string.format(**fmt_fields))
if (indent==0 or recursive) and is_container(result):
self._list(result['entity.id'], recursive=recursive, long_format=long_format, show_modified=show_modified, indent=indent+2, out=out)
if indent==0 and not results_found:
out.write('No results visible to {username} found for id {id}\n'.format(username=self.username, id=id_of(parent)))
############################################################
## Deprecated methods ##
############################################################
def getEntity(self, entity, version=None):
"""
**Deprecated**
Use :py:func:`synapseclient.Synapse.get`
"""
return self.get(entity, version=version, downloadFile=False)
def loadEntity(self, entity):
"""
**Deprecated**
Use :py:func:`synapseclient.Synapse.get`
"""
sys.stderr.write('WARNING!: THIS ONLY DOWNLOADS ENTITIES!')
return self.downloadEntity(entity)
def createEntity(self, entity, used=None, executed=None, **kwargs):
"""
**Deprecated**
Use :py:func:`synapseclient.Synapse.store`
"""
return self.store(entity, used=used, executed=executed, **kwargs)
def updateEntity(self, entity, used=None, executed=None, incrementVersion=False, versionLabel=None, **kwargs):
"""
**Deprecated**
Use :py:func:`synapseclient.Synapse.store`
"""
return self.store(entity, used=used, executed=executed, forceVersion=incrementVersion, versionLabel=versionLabel, **kwargs)
def deleteEntity(self, entity):
"""
**Deprecated**
Use :py:func:`synapseclient.Synapse.delete`
"""
self.delete(entity)
def uploadFile(self, entity, filename=None, used=None, executed=None):
"""
**Deprecated**
Use :py:func:`synapseclient.Synapse.store`
"""
if filename is not None:
entity['path'] = filename
if 'name' not in entity or entity['name'] is None:
entity['name'] = utils.guess_file_name(filename)
return self.store(entity, used=used, executed=executed)
def downloadEntity(self, entity, version=None):
"""
**Deprecated**
Use :py:func:`synapseclient.Synapse.get`
"""
return self.get(entity, version=version, downloadFile=True)
############################################################
## Get / Set Annotations ##
############################################################
def _getRawAnnotations(self, entity, version=None):
"""
Retrieve annotations for an Entity returning them in the native Synapse format.
"""
# Note: Specifying the version results in a zero-ed out etag,
# even if the version is the most recent.
# See `PLFM-1874 <https://sagebionetworks.jira.com/browse/PLFM-1874>`_ for more details.
if version:
uri = '/entity/%s/version/%s/annotations' % (id_of(entity), str(version))
else:
uri = '/entity/%s/annotations' % id_of(entity)
return self.restGET(uri)
def getAnnotations(self, entity, version=None):
"""
Retrieve annotations for an Entity from the Synapse Repository as a Python dict.
Note that collapsing annotations from the native Synapse format to a Python dict
may involve some loss of information. See :py:func:`_getRawAnnotations` to get
annotations in the native format.
:param entity: An Entity or Synapse ID to lookup
:param version: The version of the Entity to retrieve.
:returns: A dictionary
"""
return from_synapse_annotations(self._getRawAnnotations(entity,version))
def setAnnotations(self, entity, annotations={}, **kwargs):
"""
Store annotations for an Entity in the Synapse Repository.
:param entity: An Entity or Synapse ID to update annotations of
:param annotations: A dictionary in Synapse format or a Python format
:param kwargs: Any additional entries to be added to the annotations dictionary
:returns: A dictionary
"""
uri = '/entity/%s/annotations' % id_of(entity)
annotations.update(kwargs)
synapseAnnos = to_synapse_annotations(annotations)
synapseAnnos['id'] = id_of(entity)
if 'etag' in entity and 'etag' not in synapseAnnos:
synapseAnnos['etag'] = entity['etag']
return from_synapse_annotations(self.restPUT(uri, body=json.dumps(synapseAnnos)))
############################################################
## Querying ##
############################################################
def query(self, queryStr):
"""
Query for Synapse entities.
**To be replaced** with :py:func:`synapseclient.Synapse.chunkedQuery` in the future.
See the `query language documentation <https://sagebionetworks.jira.com/wiki/display/PLFM/Repository+Service+API#RepositoryServiceAPI-QueryAPI>`_.
:returns: A JSON object containing an array of query results
Example::
syn.query("select id, name from entity where entity.parentId=='syn449742'")
See also: :py:func:`synapseclient.Synapse.chunkedQuery`
"""
return self.restGET('/query?query=' + urllib.quote(queryStr))
def chunkedQuery(self, queryStr):
"""
Query for Synapse Entities.
More robust than :py:func:`synapseclient.Synapse.query`.
See the `query language documentation <https://sagebionetworks.jira.com/wiki/display/PLFM/Repository+Service+API#RepositoryServiceAPI-QueryAPI>`_.
:returns: An iterator that will break up large queries into managable pieces.
Example::
results = syn.chunkedQuery("select id, name from entity where entity.parentId=='syn449742'")
for res in results:
print res['entity.id']
"""
# The query terms LIMIT and OFFSET are managed by this method
# So any user specified limits and offsets must be removed first
# Note: The limit and offset terms are always placed at the end of a query
# Note: The server does not parse the limit and offset terms if the offset occurs first.
# This parsing enforces the correct order so the user does not have to consider it.
# Regex a lower-case string to simplify matching
tempQueryStr = queryStr.lower()
regex = '\A(.*\s)(offset|limit)\s*(\d*\s*)\Z'
# Continue to strip off and save the last limit/offset
match = re.search(regex, tempQueryStr)
options = {'limit':None, 'offset':None}
while match is not None:
options[match.group(2)] = match.group(3)
tempQueryStr = match.group(1);
match = re.search(regex, tempQueryStr)
# Parse the stripped off values or default them to no limit and no offset
options['limit'] = int(options['limit']) if options['limit'] is not None else float('inf')
options['offset'] = int(options['offset']) if options['offset'] is not None else 1
# Get a truncated version of the original query string (not in lower-case)
queryStr = queryStr[:len(tempQueryStr)]
# Continue querying until the entire query has been fetched (or crash out)
limit = options['limit'] if options['limit'] < QUERY_LIMIT else QUERY_LIMIT
offset = options['offset']
while True:
remaining = options['limit'] + options['offset'] - offset
# Handle the case where a query was skipped due to size and now no items remain
if remaining <= 0:
raise StopIteration
# Build the sub-query
subqueryStr = "%s limit %d offset %d" % (queryStr, limit if limit < remaining else remaining, offset)
try:
response = self.restGET('/query?query=' + urllib.quote(subqueryStr))
for res in response['results']:
yield res
# Increase the size of the limit slowly
if limit < QUERY_LIMIT / 2:
limit = int(limit * 1.5 + 1)
# Exit when no more results can be pulled
if len(response['results']) > 0:
offset += len(response['results'])
else:
break
# Exit when all requests results have been pulled
if offset > options['offset'] + options['limit'] - 1:
break
except SynapseHTTPError as err:
# Shrink the query size when appropriate
## TODO: Change the error check when PLFM-1990 is resolved
if err.response.status_code == 400 and ('The results of this query exceeded the max' in err.response.json()['reason']):
if (limit == 1):
sys.stderr.write("A single row (offset %s) of this query "
"exceeds the maximum size. Consider "
"limiting the columns returned "
"in the select clause. Skipping...\n" % offset)
offset += 1
# Since these large rows are anomalous, reset the limit
limit = QUERY_LIMIT
else:
limit /= 2
else:
raise
def md5Query(self, md5):
"""
Find the Entities with attached file(s) with the given MD5 hash.
:param md5: The MD5 to query for (hexadecimal string)
:returns: A list of Entity headers
"""
return self.restGET('/entity/md5/%s' % md5)['results']
############################################################
## ACL manipulation ##
############################################################
def _getBenefactor(self, entity):
"""An Entity gets its ACL from its benefactor."""
if utils.is_synapse_id(entity) or synapseclient.entity.is_synapse_entity(entity):
return self.restGET('/entity/%s/benefactor' % id_of(entity))
return entity
def _getACL(self, entity):
"""Get the effective ACL for a Synapse Entity."""
if hasattr(entity, 'getACLURI'):
uri = entity.getACLURI()
else:
# Get the ACL from the benefactor (which may be the entity itself)
benefactor = self._getBenefactor(entity)
uri = '/entity/%s/acl' % (benefactor['id'])
return self.restGET(uri)
def _storeACL(self, entity, acl):
"""
Create or update the ACL for a Synapse Entity.
:param entity: An entity or Synapse ID
:param acl: An ACl as a dict
:returns: the new or updated ACL
.. code-block:: python
{'resourceAccess': [
{'accessType': ['READ'],
'principalId': 222222}
]}
"""
if hasattr(entity, 'putACLURI'):
return self.restPUT(entity.putACLURI(), json.dumps(acl))
else:
# Get benefactor. (An entity gets its ACL from its benefactor.)
entity_id = id_of(entity)
uri = '/entity/%s/benefactor' % entity_id
benefactor = self.restGET(uri)
# Update or create new ACL
uri = '/entity/%s/acl' % entity_id
if benefactor['id']==entity_id:
return self.restPUT(uri, json.dumps(acl))
else:
return self.restPOST(uri,json.dumps(acl))
def _getUserbyPrincipalIdOrName(self, principalId=None):
"""
Given either a string, int or None
finds the corresponding user
where None implies PUBLIC
:param principalId: Identifier of a user or group
:returns: The integer ID of the user
"""
if principalId is None or principalId=='PUBLIC':
return PUBLIC
try:
return int(principalId)
# If principalId is not a number assume it is a name or email
except ValueError:
userProfiles = self.restGET('/userGroupHeaders?prefix=%s' % principalId)
totalResults = userProfiles['totalNumberOfResults']
if totalResults == 1:
return int(userProfiles['children'][0]['ownerId'])
elif totalResults > 0:
for profile in userProfiles['children']:
if profile['userName'] == principalId:
return int(profile['ownerId'])
supplementalMessage = 'Please be more specific' if totalResults > 1 else 'No matches'
raise SynapseError('Unknown Synapse user (%s). %s.' % (principalId, supplementalMessage))
def getPermissions(self, entity, principalId=None):
"""Get the permissions that a user or group has on an Entity.
:param entity: An Entity or Synapse ID to lookup
:param principalId: Identifier of a user or group (defaults to PUBLIC users)
:returns: An array containing some combination of
['READ', 'CREATE', 'UPDATE', 'DELETE', 'CHANGE_PERMISSIONS', 'DOWNLOAD', 'PARTICIPATE']
or an empty array
"""
## TODO: what if user has permissions by membership in a group?
principalId = self._getUserbyPrincipalIdOrName(principalId)
acl = self._getACL(entity)
for permissions in acl['resourceAccess']:
if 'principalId' in permissions and permissions['principalId'] == int(principalId):
return permissions['accessType']
return []
def setPermissions(self, entity, principalId=None, accessType=['READ'], modify_benefactor=False, warn_if_inherits=True, overwrite=True):
"""
Sets permission that a user or group has on an Entity.
An Entity may have its own ACL or inherit its ACL from a benefactor.
:param entity: An Entity or Synapse ID to modify
:param principalId: Identifier of a user or group
:param accessType: Type of permission to be granted
:param modify_benefactor: Set as True when modifying a benefactor's ACL
:param warn_if_inherits: Set as False, when creating a new ACL.
Trying to modify the ACL of an Entity that
inherits its ACL will result in a warning
:param overwrite: By default this function overwrites existing
permissions for the specified user. Set this
flag to False to add new permissions nondestructively.
:returns: an Access Control List object
Valid access types are: CREATE, READ, UPDATE, DELETE, CHANGE_PERMISSIONS, DOWNLOAD, PARTICIPATE, SUBMIT
"""
benefactor = self._getBenefactor(entity)
if benefactor['id'] != id_of(entity):
if modify_benefactor:
entity = benefactor
elif warn_if_inherits:
sys.stderr.write('Warning: Creating an ACL for entity %s, '
'which formerly inherited access control '
'from a benefactor entity, "%s" (%s).\n'
% (id_of(entity), benefactor['name'], benefactor['id']))
acl = self._getACL(entity)
principalId = self._getUserbyPrincipalIdOrName(principalId)
# Find existing permissions
permissions_to_update = None
for permissions in acl['resourceAccess']:
if 'principalId' in permissions and permissions['principalId'] == principalId:
permissions_to_update = permissions
break
if accessType is None or accessType==[]:
## remove permissions
if permissions_to_update and overwrite:
acl['resourceAccess'].remove(permissions_to_update)
else:
## add a 'resourceAccess' entry, if necessary
if not permissions_to_update:
permissions_to_update = {u'accessType': [], u'principalId': principalId}
acl['resourceAccess'].append(permissions_to_update)
if overwrite:
permissions_to_update['accessType'] = accessType
else:
permissions_to_update['accessType'] = list(set(permissions_to_update['accessType']) | set(accessType))
return self._storeACL(entity, acl)
############################################################
## Provenance ##
############################################################
## TODO: rename these to Activity
def getProvenance(self, entity, version=None):
"""
Retrieve provenance information for a Synapse Entity.
:param entity: An Entity or Synapse ID to lookup
:param version: The version of the Entity to retrieve.
Gets the most recent version if omitted
:returns: An Activity object or
raises exception if no provenance record exists
"""
# Get versionNumber from Entity
if version is None and 'versionNumber' in entity:
version = entity['versionNumber']
if version:
uri = '/entity/%s/version/%d/generatedBy' % (id_of(entity), version)
else:
uri = '/entity/%s/generatedBy' % id_of(entity)
return Activity(data=self.restGET(uri))
def setProvenance(self, entity, activity):
"""
Stores a record of the code and data used to derive a Synapse entity.
:param entity: An Entity or Synapse ID to modify
:param activity: a :py:class:`synapseclient.activity.Activity`
:returns: An updated :py:class:`synapseclient.activity.Activity` object
"""
# Assert that the entity was generated by a given Activity.
if 'id' in activity:
# We're updating provenance
uri = '/activity/%s' % activity['id']
activity = Activity(data=self.restPUT(uri, json.dumps(activity)))
else:
activity = self.restPOST('/activity', body=json.dumps(activity))
# assert that an entity is generated by an activity
uri = '/entity/%s/generatedBy?generatedBy=%s' % (id_of(entity), activity['id'])
activity = Activity(data=self.restPUT(uri))
return activity
def deleteProvenance(self, entity):
"""
Removes provenance information from an Entity
and deletes the associated Activity.
:param entity: An Entity or Synapse ID to modify
"""
activity = self.getProvenance(entity)
if not activity: return
uri = '/entity/%s/generatedBy' % id_of(entity)
self.restDELETE(uri)
## TODO: what happens if the activity is shared by more than one entity?
uri = '/activity/%s' % activity['id']
self.restDELETE(uri)
def updateActivity(self, activity):
"""
Modifies an existing Activity.
:returns: An updated Activity object
"""
uri = '/activity/%s' % activity['id']
return Activity(data=self.restPUT(uri, json.dumps(activity)))
############################################################
## File handle service calls ##
############################################################
def _downloadFileEntity(self, entity, destination, submission=None):
"""
Downloads the file associated with a FileEntity to the given file path.
:returns: A file info dictionary with keys path, cacheDir, files
"""
if submission is not None:
url = '%s/evaluation/submission/%s/file/%s' % (self.repoEndpoint, id_of(submission),
entity['dataFileHandleId'])
elif 'versionNumber' in entity:
url = '%s/entity/%s/version/%s/file' % (self.repoEndpoint, id_of(entity), entity['versionNumber'])
else:
url = '%s/entity/%s/file' % (self.repoEndpoint, id_of(entity))
# Create the necessary directories
try:
os.makedirs(os.path.dirname(destination))
except OSError as exception:
if exception.errno != os.errno.EEXIST:
raise
return self._downloadFile(url, destination)
def _downloadFile(self, url, destination):
"""
Download a file from a URL to a the given file path.
:returns: A file info dictionary with keys path, cacheDir, files
"""
def returnDict(destination):
"""internal function to cut down on code cluter by building return type."""
return {'path': destination,
'files': [None] if destination is None else [os.path.basename(destination)],
'cacheDir': None if destination is None else os.path.dirname(destination) }
# We expect to be redirected to a signed S3 URL or externalURL
#The assumption is wrong - we always try to read either the outer or inner requests.get
#but sometimes we don't have something to read. I.e. when the type is ftp at which point
#we still set the cache and filepath based on destination which is wrong because nothing was fetched
response = requests.get(url, headers=self._generateSignedHeaders(url), allow_redirects=False)
if response.status_code in [301,302,303,307,308]:
url = response.headers['location']
scheme = urlparse.urlparse(url).scheme
# If it's a file URL, turn it into a path and return it
if scheme == 'file':
pathinfo = utils.file_url_to_path(url, verify_exists=True)
if 'path' not in pathinfo:
raise IOError("Could not download non-existent file (%s)." % url)
else:
#TODO does this make sense why not just ignore the download and return.
raise NotImplementedError('File can already be accessed. '
'Consider setting downloadFile to False')
elif scheme == 'sftp':
destination = self._sftpDownloadFile(url, destination)
return returnDict(destination)
elif scheme == 'http' or scheme == 'https':
#TODO add support for username/password
response = requests.get(url, headers=self._generateSignedHeaders(url, {}), stream=True)
## get filename from content-disposition, if we don't have it already
if os.path.isdir(destination):
filename = utils.extract_filename(
content_disposition_header=response.headers.get('content-disposition', None),
default_filename=utils.guess_file_name(url))
destination = os.path.join(destination, filename)
#TODO LARSSON add support of ftp download
else:
sys.stderr.write('Unable to download this type of URL. ')
return returnDict(None)
try:
exceptions._raise_for_status(response, verbose=self.debug)
except SynapseHTTPError as err:
if err.response.status_code == 404:
raise SynapseError("Could not download the file at %s" % url)
raise
# Stream the file to disk
toBeTransferred = float(response.headers['content-length'])
with open(destination, 'wb') as fd:
for nChunks, chunk in enumerate(response.iter_content(FILE_BUFFER_SIZE)):
fd.write(chunk)
utils.printTransferProgress(nChunks*FILE_BUFFER_SIZE ,toBeTransferred, 'Downloading ', os.path.basename(destination))
utils.printTransferProgress(toBeTransferred ,toBeTransferred, 'Downloaded ', os.path.basename(destination))
destination = os.path.abspath(destination)
return returnDict(destination)
def _uploadToFileHandleService(self, filename, synapseStore=True, mimetype=None):
"""
Create and return a fileHandle, by either uploading a local file or
linking to an external URL.
:param synapseStore: Indicates whether the file should be stored or just the URL.
Defaults to True.
"""
if filename is None:
raise ValueError('No filename given')
elif utils.is_url(filename):
if synapseStore:
raise NotImplementedError('Automatic downloading and storing of external files is not supported. Please try downloading the file locally first before storing it or set synapseStore=False')
return self._addURLtoFileHandleService(filename, mimetype=mimetype)
# For local files, we default to uploading the file unless explicitly instructed otherwise
else:
if synapseStore:
return self._chunkedUploadFile(filename, mimetype=mimetype)
else:
return self._addURLtoFileHandleService(filename, mimetype=mimetype)
def _addURLtoFileHandleService(self, externalURL, mimetype=None):
"""Create a new FileHandle representing an external URL."""
fileName = externalURL.split('/')[-1]
externalURL = utils.as_url(externalURL)
fileHandle = {'concreteType': 'org.sagebionetworks.repo.model.file.ExternalFileHandle',
'fileName' : fileName,
'externalURL' : externalURL}
if mimetype is None:
(mimetype, enc) = mimetypes.guess_type(externalURL, strict=False)
if mimetype is not None:
fileHandle['contentType'] = mimetype
return self.restPOST('/externalFileHandle', json.dumps(fileHandle), self.fileHandleEndpoint)
def _getFileHandle(self, fileHandle):
"""Retrieve a fileHandle from the fileHandle service (experimental)."""
uri = "/fileHandle/%s" % (id_of(fileHandle),)
return self.restGET(uri, endpoint=self.fileHandleEndpoint)
def _deleteFileHandle(self, fileHandle):
"""
Delete the given file handle.
Note: Only the user that created the FileHandle can delete it. Also, a
FileHandle cannot be deleted if it is associated with a FileEntity or WikiPage
"""
uri = "/fileHandle/%s" % (id_of(fileHandle),)
self.restDELETE(uri, endpoint=self.fileHandleEndpoint)
return fileHandle
def _createChunkedFileUploadToken(self, filepath, mimetype):
"""
This is the first step in uploading a large file. The resulting
ChunkedFileToken will be required for all remaining chunk file requests.
:returns: a `ChunkedFileToken <http://rest.synapse.org/org/sagebionetworks/repo/model/file/ChunkedFileToken.html>`_
"""
md5 = utils.md5_for_file(filepath).hexdigest()
fileName = utils.guess_file_name(filepath)
return self._createChunkedUploadToken(md5, fileName, mimetype)
def _createChunkedUploadToken(self, md5, fileName, mimetype):
"""
This is the first step in uploading a large file. The resulting
ChunkedFileToken will be required for all remaining chunk file requests.
:returns: a `ChunkedFileToken <http://rest.synapse.org/org/sagebionetworks/repo/model/file/ChunkedFileToken.html>`_
"""
chunkedFileTokenRequest = \
{'fileName' : fileName, \
'contentType' : mimetype, \
'contentMD5' : md5}
return self.restPOST('/createChunkedFileUploadToken', json.dumps(chunkedFileTokenRequest), endpoint=self.fileHandleEndpoint)
def _createChunkedFileUploadChunkURL(self, chunkNumber, chunkedFileToken):
"""Create a pre-signed URL that will be used to upload a single chunk of a large file."""
chunkRequest = {'chunkNumber':chunkNumber, 'chunkedFileToken':chunkedFileToken}
return self.restPOST('/createChunkedFileUploadChunkURL', json.dumps(chunkRequest), endpoint=self.fileHandleEndpoint)
def _startCompleteUploadDaemon(self, chunkedFileToken, chunkNumbers):
"""
After all of the chunks are added, start a Daemon that will copy all of the parts and complete the request.
:returns: an `UploadDaemonStatus <http://rest.synapse.org/org/sagebionetworks/repo/model/file/UploadDaemonStatus.html>`_
"""
completeAllChunksRequest = {'chunkNumbers': chunkNumbers,
'chunkedFileToken': chunkedFileToken}
return self.restPOST('/startCompleteUploadDaemon', json.dumps(completeAllChunksRequest), endpoint=self.fileHandleEndpoint)
def _completeUploadDaemonStatus(self, status):
"""
Get the status of a daemon.
:returns: an `UploadDaemonStatus <http://rest.synapse.org/org/sagebionetworks/repo/model/file/UploadDaemonStatus.html>`_
"""
return self.restGET('/completeUploadDaemonStatus/%s' % status['daemonId'], endpoint=self.fileHandleEndpoint)
def __put_chunk_to_S3(self, i, chunk, token, headers):
"""Stores a single chunk to S3. Used from chunkedUploadFile."""
# Get the signed S3 URL
url = self._createChunkedFileUploadChunkURL(i, token)
response = requests.put(url, data=chunk, headers=headers)
# Make sure requests closes response stream?:
# see: http://docs.python-requests.org/en/latest/user/advanced/#keep-alive
try:
if response is not None:
throw_away = response.content
except Exception as ex:
warnings.warn('error reading response: '+str(ex))
return response
def _chunkedUploadFile(self, filepath, chunksize=CHUNK_SIZE, progress=True, mimetype=None, threadCount=6):
"""
Upload a file to be stored in Synapse, dividing large files into chunks.
:param filepath: The file to be uploaded
:param chunksize: Chop the file into chunks of this many bytes.
The default value is 5MB, which is also the minimum value.
:returns: An `S3 FileHandle <http://rest.synapse.org/org/sagebionetworks/repo/model/file/S3FileHandle.html>`_
"""
from functools import partial
import multiprocessing.dummy as mp
from multiprocessing import Value
if chunksize < 5*MB:
raise ValueError('Minimum chunksize is 5 MB.')
if filepath is None or not os.path.exists(filepath):
raise ValueError('File not found: ' + str(filepath))
# Start timing
diagnostics = {'start-time': time.time()}
# Guess mime-type - important for confirmation of MD5 sum by receiver
if not mimetype:
(mimetype, enc) = mimetypes.guess_type(filepath, strict=False)
if not mimetype:
mimetype = "application/octet-stream"
diagnostics['mimetype'] = mimetype
# S3 wants 'content-type' and 'content-length' headers. S3 doesn't like
# 'transfer-encoding': 'chunked', which requests will add for you, if it
# can't figure out content length. The errors given by S3 are not very
# informative:
# If a request mistakenly contains both 'content-length' and
# 'transfer-encoding':'chunked', you get [Errno 32] Broken pipe.
# If you give S3 'transfer-encoding' and no 'content-length', you get:
# 501 Server Error: Not Implemented
# A header you provided implies functionality that is not implemented
headers = { 'Content-Type' : mimetype }
headers.update(synapseclient.USER_AGENT)
diagnostics['User-Agent'] = synapseclient.USER_AGENT
retry_policy=self._build_retry_policy({
"retry_status_codes": [429,502,503,504],
"retry_errors" : ['Proxy Error', 'Please slow down', 'Slowdown',
'We encountered an internal error. Please try again.',
'Max retries exceeded with url',
'RequestTimeout'], ## RequestTimeout comes from S3 during put operations
"retries" : 6})
p = mp.Pool(threadCount)
try:
# Get token
token = self._createChunkedFileUploadToken(filepath, mimetype)
diagnostics['token'] = token
diagnostics['chunks'] = []
fileSize = os.stat(filepath).st_size
completedChunks = Value('d', -1)
chunkNumbers = range(1, nchunks(filepath, chunksize=chunksize)+1)
def upload_one_chunk_with_retry(i):
chunk = get_chunk(filepath, i, chunksize=chunksize)
response = _with_retry(partial(self.__put_chunk_to_S3, i, chunk, token, headers),
verbose=False, **retry_policy)
completedChunks.value +=1
utils.printTransferProgress(completedChunks.value*chunksize,
fileSize, prefix = 'Uploading', postfix=filepath)
exceptions._raise_for_status(response, verbose=True)
p.map(upload_one_chunk_with_retry,chunkNumbers)
## complete the upload
utils.printTransferProgress(fileSize, fileSize, prefix = 'Uploaded Chunks', postfix=filepath)
sleep_on_failed_time = 1
backoff_multiplier = 2
attempt_to_complete = 0
max_attempts_to_complete = 7
while attempt_to_complete < max_attempts_to_complete:
attempt_to_complete += 1
status = self._startCompleteUploadDaemon(chunkedFileToken=token, chunkNumbers=chunkNumbers)
diagnostics['status'] = [status]
# Poll until concatenating chunks is complete
loop = 0
while (status['state']=='PROCESSING'):
loop +=1
time.sleep(CHUNK_UPLOAD_POLL_INTERVAL)
sys.stdout.write('\rWaiting for Confirmation ' + '|/-\\'[loop%4])
sys.stdout.flush()
status = self._completeUploadDaemonStatus(status)
diagnostics['status'].append(status)
if status['state'] == 'COMPLETED':
break
else:
warnings.warn("Attempt to complete upload failed: " + status['errorMessage'])
time.sleep(sleep_on_failed_time)
sleep_on_failed_time *= backoff_multiplier
if status['state'] == 'FAILED':
raise SynapseError(status['errorMessage'])
# Return a fileHandle
fileHandle = self._getFileHandle(status['fileHandleId'])
diagnostics['fileHandle'] = fileHandle
except Exception as ex:
ex.diagnostics = diagnostics
raise sys.exc_info()[0], ex, sys.exc_info()[2]
# Print timing information
if progress:
sys.stdout.write("\rUpload completed in %s.\n" % utils.format_time_interval(time.time()-diagnostics['start-time']))
return fileHandle
def _uploadStringToFile(self, content, contentType="text/plain"):
"""
Upload a string to be stored in Synapse, as a single upload chunk
:param content: The content to be uploaded
:param contentType: The content type to be stored with the file
:returns: An `S3 FileHandle <http://rest.synapse.org/org/sagebionetworks/repo/model/file/S3FileHandle.html>`_
"""
if len(content)>5*MB:
raise ValueError('Maximum string length is 5 MB.')
headers = { 'Content-Type' : contentType }
headers.update(synapseclient.USER_AGENT)
try:
# Get token
md5 = hashlib.md5(content.encode("utf-8")).hexdigest()
token = self._createChunkedUploadToken(md5, "message", contentType)
retry_policy=self._build_retry_policy(
{"retry_errors":['We encountered an internal error. Please try again.']})
i = 1
chunk_record = {'chunk-number':i}
# Get the signed S3 URL
url = self._createChunkedFileUploadChunkURL(i, token)
# PUT the chunk to S3
response = _with_retry(
lambda: requests.put(url, data=content.encode("utf-8"), headers=headers),
**retry_policy)
chunk_record['response-status-code'] = response.status_code
chunk_record['response-headers'] = response.headers
if response.text:
chunk_record['response-body'] = response.text
# Is requests closing response stream? Let's make sure:
# "Note that connections are only released back to
# the pool for reuse once all body data has been
# read; be sure to either set stream to False or
# read the content property of the Response object."
# see: http://docs.python-requests.org/en/latest/user/advanced/#keep-alive
try:
if response:
throw_away = response.content
except Exception as ex:
sys.stderr.write('error reading response: '+str(ex))
exceptions._raise_for_status(response, verbose=self.debug)
status = self._startCompleteUploadDaemon(chunkedFileToken=token, chunkNumbers=[a+1 for a in range(i)])
# Poll until concatenating chunks is complete
while (status['state']=='PROCESSING'):
time.sleep(CHUNK_UPLOAD_POLL_INTERVAL)
status = self._completeUploadDaemonStatus(status)
if status['state'] == 'FAILED':
raise SynapseError(status['errorMessage'])
# Return a fileHandle
fileHandle = self._getFileHandle(status['fileHandleId'])
except Exception as ex:
raise sys.exc_info()[0], ex, sys.exc_info()[2]
return fileHandle
############################################################
## SFTP ##
############################################################
def __getStorageLocation(self, entity):
storageLocations = self.restGET('/entity/%s/uploadDestinations'% entity['parentId'],
endpoint=self.fileHandleEndpoint)['list']
return storageLocations[0]
# if uploadHost is None:
# return storageLocations[0]
# locations = [l.get('url', 'S3') for l in storageLocations]
# uploadHost = entity.get('uploadHost', None)
# for location in storageLocations:
# #location can either be of uploadType S3 or SFTP where the latter has a URL
# if location['uploadType'] == 'S3' and uploadHost == 'S3':
# return location
# elif (location['uploadType'] == 'SFTP' and uploadHost != 'S3' and
# utils.is_same_base_url(uploadHost, location['url'])):
# return location
# raise SynapseError('You are uploading to a project that supports multiple storage '
# 'locations but have specified the location of %s which is not '
# 'supported by this project. Please choose one of:\n %s'
# %(uploadHost, '\n\t'.join(locations)))
def __uploadExternallyStoringProjects(self, entity, local_state):
"""Determines the upload location of the file based on project settings and if it is
an external location performs upload and returns the new url and sets synapseStore=False.
It not an external storage location returns the original path.
:param entity: An entity with path.
:returns: A URL or local file path to add to Synapse along with an update local_state
containing externalURL and content-type
"""
#If it is already an exteranal URL just return
if utils.is_url(entity['path']):
local_state['externalURL'] = entity['path']
return entity['path'], local_state
location = self.__getStorageLocation(entity)
if location['uploadType'] == 'S3':
if entity.get('synapseStore', True):
sys.stdout.write('\n' + '#'*50+'\n Uploading file to Synapse storage \n'+'#'*50+'\n')
return entity['path'], local_state
elif location['uploadType'] == 'SFTP' :
entity['synapseStore'] = False
if entity.get('synapseStore', True):
sys.stdout.write('\n%s\n%s\nUploading to: %s\n%s\n' %('#'*50,
location.get('banner', ''),
urlparse.urlparse(location['url']).netloc,
'#'*50))
pass
#Fill out local_state with fileSize, externalURL etc...
uploadLocation = self._sftpUploadFile(entity['path'], urllib.unquote(location['url']))
local_state['externalURL'] = uploadLocation
local_state['fileSize'] = os.stat(entity['path']).st_size
if local_state.get('contentType') is None:
mimetype, enc = mimetypes.guess_type(entity['path'], strict=False)
local_state['contentType'] = mimetype
return uploadLocation, local_state
else:
raise NotImplementedError('Can only handle S3 and SFTP upload locations.')
#@utils.memoize #To memoize we need to be able to back out faulty credentials
def __getUserCredentials(self, baseURL, username=None, password=None):
"""Get user credentials for a specified URL by either looking in the configFile
or querying the user.
:param username: username on server (optionally specified)
:param password: password for authentication on the server (optionally specified)
:returns: tuple of username, password
"""
#Get authentication information from configFile
config = self.getConfigFile(self.configPath)
if username is None and config.has_option(baseURL, 'username'):
username = config.get(baseURL, 'username')
if password is None and config.has_option(baseURL, 'password'):
password = config.get(baseURL, 'password')
#If I still don't have a username and password prompt for it
if username is None:
username = getpass.getuser() #Default to login name
user = raw_input('Username for %s (%s):' %(baseURL, username))
username = username if user=='' else user
if password is None:
password = getpass.getpass('Password for %s:' %baseURL)
return username, password
def _sftpUploadFile(self, filepath, url, username=None, password=None):
"""
Performs upload of a local file to an sftp server.
:param filepath: The file to be uploaded
:param url: URL where file will be deposited. Should include path and protocol. e.g.
sftp://sftp.example.com/path/to/file/store
:param username: username on sftp server
:param password: password for authentication on the sftp server
:returns: A URL where file is stored
"""
_test_import_sftp()
import pysftp
parsedURL = urlparse.urlparse(url)
if parsedURL.scheme!='sftp':
raise(NotImplementedError("sftpUpload only supports uploads to URLs of type sftp of the "
" form sftp://..."))
username, password = self.__getUserCredentials(parsedURL.scheme+'://'+parsedURL.hostname, username, password)
with pysftp.Connection(parsedURL.hostname, username=username, password=password) as sftp:
sftp.makedirs(parsedURL.path)
with sftp.cd(parsedURL.path):
sftp.put(filepath, preserve_mtime=True, callback=utils.printTransferProgress)
path = urllib.quote(parsedURL.path+'/'+os.path.split(filepath)[-1])
parsedURL = parsedURL._replace(path=path)
return urlparse.urlunparse(parsedURL)
def _sftpDownloadFile(self, url, localFilepath=None, username=None, password=None):
"""
Performs download of a file from an sftp server.
:param url: URL where file will be deposited. Path will be chopped out.
:param localFilepath: location where to store file
:param username: username on server
:param password: password for authentication on server
:returns: localFilePath
"""
_test_import_sftp()
import pysftp
parsedURL = urlparse.urlparse(url)
if parsedURL.scheme!='sftp':
raise(NotImplementedError("sftpUpload only supports uploads to URLs of type sftp of the "
" form sftp://..."))
#Create the local file path if it doesn't exist
username, password = self.__getUserCredentials(parsedURL.scheme+'://'+parsedURL.hostname, username, password)
path = urllib.unquote(parsedURL.path)
if localFilepath is None:
localFilepath = os.getcwd()
if os.path.isdir(localFilepath):
localFilepath = os.path.join(localFilepath, path.split('/')[-1])
#Check and create the directory
dir = os.path.dirname(localFilepath)
if not os.path.exists(dir):
os.makedirs(dir)
#Download file
with pysftp.Connection(parsedURL.hostname, username=username, password=password) as sftp:
sftp.get(path, localFilepath, preserve_mtime=True, callback=utils.printTransferProgress)
return localFilepath
############################################################
## CRUD for Evaluations ##
############################################################
def getEvaluation(self, id):
"""
Gets an Evaluation object from Synapse.
See: :py:mod:`synapseclient.evaluation`
Example::
evaluation = syn.getEvalutation(2005090)
"""
evaluation_id = id_of(id)
uri = Evaluation.getURI(evaluation_id)
return Evaluation(**self.restGET(uri))
## TODO: Should this be combined with getEvaluation?
def getEvaluationByName(self, name):
"""
Gets an Evaluation object from Synapse.
See: :py:mod:`synapseclient.evaluation`
"""
uri = Evaluation.getByNameURI(urllib.quote(name))
return Evaluation(**self.restGET(uri))
def getEvaluationByContentSource(self, entity):
"""
Returns a generator over evaluations that
derive their content from the given entity
"""
entityId = id_of(entity)
url = "/entity/%s/evaluation" % entityId
for result in self._GET_paginated(url):
yield Evaluation(**result)
def submit(self, evaluation, entity, name=None, teamName=None, silent=False):
"""
Submit an Entity for `evaluation <Evaluation.html>`_.
:param evaluation: Evaluation board to submit to
:param entity: The Entity containing the Submission
:param name: A name for this submission
:param teamName: Team name to be publicly displayed
:returns: A :py:class:`synapseclient.evaluation.Submission` object
Example::
evaluation = syn.getEvaluation(12345)
entity = syn.get('syn12345')
submission = syn.submit(evaluation, entity, name='Our Final Answer', teamName='Blue Team')
Set team name to user name::
profile = syn.getUserProfile()
submission = syn.submit(evaluation, entity, name='My Data', teamName=profile['displayName'])
"""
evaluation_id = id_of(evaluation)
# Check for access rights
unmetRights = self.restGET('/evaluation/%s/accessRequirementUnfulfilled' % evaluation_id)
if unmetRights['totalNumberOfResults'] > 0:
accessTerms = ["%s - %s" % (rights['accessType'], rights['termsOfUse']) for rights in unmetRights['results']]
raise SynapseAuthenticationError('You have unmet access requirements: \n%s' % '\n'.join(accessTerms))
## TODO: accept entities or entity IDs
if not 'versionNumber' in entity:
entity = self.get(entity)
## version defaults to 1 to hack around required version field and allow submission of files/folders
entity_version = entity.get('versionNumber', 1)
entity_id = entity['id']
name = entity['name'] if (name is None and 'name' in entity) else name
submission = {'evaluationId' : evaluation_id,
'entityId' : entity_id,
'name' : name,
'submitterAlias': teamName,
'versionNumber' : entity_version}
submitted = Submission(**self.restPOST('/evaluation/submission?etag=%s' % entity['etag'],
json.dumps(submission)))
## if we want to display the receipt message, we need the full object
if not silent:
if not(isinstance(evaluation, Evaluation)):
evaluation = self.getEvaluation(evaluation_id)
if 'submissionReceiptMessage' in evaluation:
print evaluation['submissionReceiptMessage']
#TODO: consider returning dict(submission=submitted, message=evaluation['submissionReceiptMessage']) like the R client
return submitted
def _allowParticipation(self, evaluation, user, rights=["READ", "PARTICIPATE", "SUBMIT", "UPDATE_SUBMISSION"]):
"""
Grants the given user the minimal access rights to join and submit to an Evaluation.
Note: The specification of this method has not been decided yet, so the method is likely to change in future.
:param evaluation: An Evaluation object or Evaluation ID
:param user: Either a user group or the principal ID of a user to grant rights to.
To allow all users, use "PUBLIC".
To allow authenticated users, use "AUTHENTICATED_USERS".
:param rights: The access rights to give to the users.
Defaults to "READ", "PARTICIPATE", "SUBMIT", and "UPDATE_SUBMISSION".
"""
# Check to see if the user is an ID or group
userId = -1
try:
## TODO: is there a better way to differentiate between a userID and a group name?
## What if a group is named with just numbers?
userId = int(user)
# Verify that the user exists
try:
self.getUserProfile(userId)
except SynapseHTTPError as err:
if err.response.status_code == 404:
raise SynapseError("The user (%s) does not exist" % str(userId))
raise
except ValueError:
# Fetch the ID of the user group
userId = self._getUserbyPrincipalIdOrName(user)
if not isinstance(evaluation, Evaluation):
evaluation = self.getEvaluation(id_of(evaluation))
self.setPermissions(evaluation, userId, accessType=rights, overwrite=False)
def joinEvaluation(self, evaluation):
"""
Adds the current user to an Evaluation.
:param evaluation: An Evaluation object or Evaluation ID
Example::
evaluation = syn.getEvaluation(12345)
syn.joinEvaluation(evaluation)
See: :py:mod:`synapseclient.evaluation`
"""
self.restPOST('/evaluation/%s/participant' % id_of(evaluation), {})
def getParticipants(self, evaluation):
"""
:param evaluation: Evaluation to get Participants from.
:returns: A generator over Participants (dictionary) for an Evaluation
See: :py:mod:`synapseclient.evaluation`
"""
evaluation_id = id_of(evaluation)
url = "/evaluation/%s/participant" % evaluation_id
for result in self._GET_paginated(url):
yield result
def getSubmissions(self, evaluation, status=None, myOwn=False, limit=100, offset=0):
"""
:param evaluation: Evaluation to get submissions from.
:param status: Optionally filter submissions for a specific status.
One of {OPEN, CLOSED, SCORED,INVALID,VALIDATED,
EVALUATION_IN_PROGRESS,RECEIVED, REJECTED, ACCEPTED}
:param myOwn: Determines if only your Submissions should be fetched.
Defaults to False (all Submissions)
:param limit: Limits the number of submissions in a single response.
Because this method returns a generator and repeatedly
fetches submissions, this arguement is limiting the
size of a single request and NOT the number of sub-
missions returned in total.
:param offset: Start iterating at a submission offset from the first
submission.
:returns: A generator over :py:class:`synapseclient.evaluation.Submission` objects for an Evaluation
Example::
for submission in syn.getSubmissions(1234567):
print submission['entityId']
See: :py:mod:`synapseclient.evaluation`
"""
evaluation_id = id_of(evaluation)
uri = "/evaluation/%s/submission%s" % (evaluation_id, "" if myOwn else "/all")
if status != None:
# if status not in ['OPEN', 'CLOSED', 'SCORED', 'INVALID']:
# raise SynapseError('Status must be one of {OPEN, CLOSED, SCORED, INVALID}')
uri += "?status=%s" % status
for result in self._GET_paginated(uri, limit=limit, offset=offset):
yield Submission(**result)
def _getSubmissionBundles(self, evaluation, status=None, myOwn=False, limit=100, offset=0):
"""
:param evaluation: Evaluation to get submissions from.
:param status: Optionally filter submissions for a specific status.
One of {OPEN, CLOSED, SCORED, INVALID}
:param myOwn: Determines if only your Submissions should be fetched.
Defaults to False (all Submissions)
:param limit: Limits the number of submissions coming back from the
service in a single response.
:param offset: Start iterating at a submission offset from the first
submission.
:returns: A generator over dictionaries with keys 'submission' and 'submissionStatus'.
Example::
for sb in syn._getSubmissionBundles(1234567):
print sb['submission']['name'], \\
sb['submission']['submitterAlias'], \\
sb['submissionStatus']['status'], \\
sb['submissionStatus']['score']
This may later be changed to return objects, pending some thought on how submissions
along with related status and annotations should be represented in the clients.
See: :py:mod:`synapseclient.evaluation`
"""
evaluation_id = id_of(evaluation)
url = "/evaluation/%s/submission/bundle%s" % (evaluation_id, "" if myOwn else "/all")
if status != None:
url += "?status=%s" % status
return self._GET_paginated(url, limit=limit, offset=offset)
def getSubmissionBundles(self, evaluation, status=None, myOwn=False, limit=100, offset=0):
"""
:param evaluation: Evaluation to get submissions from.
:param status: Optionally filter submissions for a specific status.
One of {OPEN, CLOSED, SCORED, INVALID}
:param myOwn: Determines if only your Submissions should be fetched.
Defaults to False (all Submissions)
:param limit: Limits the number of submissions coming back from the
service in a single response.
:param offset: Start iterating at a submission offset from the first
submission.
:returns: A generator over tuples containing a :py:class:`synapseclient.evaluation.Submission`
and a :py:class:`synapseclient.evaluation.SubmissionStatus`.
Example::
for submission, status in syn.getSubmissionBundles(evaluation):
print submission.name, \\
submission.submitterAlias, \\
status.status, \\
status.score
This may later be changed to return objects, pending some thought on how submissions
along with related status and annotations should be represented in the clients.
See: :py:mod:`synapseclient.evaluation`
"""
for bundle in self._getSubmissionBundles(evaluation, status=status, myOwn=myOwn, limit=limit, offset=offset):
yield (Submission(**bundle['submission']), SubmissionStatus(**bundle['submissionStatus']))
def _GET_paginated(self, uri, limit=20, offset=0):
"""
:param uri: A URI that returns paginated results
:param limit: How many records should be returned per request
:param offset: At what record offset from the first should
iteration start
:returns: A generator over some paginated results
The limit parameter is set at 20 by default. Using a larger limit
results in fewer calls to the service, but if responses are large
enough to be a burden on the service they may be truncated.
"""
totalNumberOfResults = sys.maxint
while offset < totalNumberOfResults:
uri = utils._limit_and_offset(uri, limit=limit, offset=offset)
page = self.restGET(uri)
results = page['results'] if 'results' in page else page['children']
totalNumberOfResults = page.get('totalNumberOfResults', len(results))
for result in results:
offset += 1
yield result
def getSubmission(self, id, **kwargs):
"""
Gets a :py:class:`synapseclient.evaluation.Submission` object.
See: :py:func:`synapseclient.Synapse.get` for information
on the *downloadFile*, *downloadLocation*, and *ifcollision* parameters
"""
submission_id = id_of(id)
uri = Submission.getURI(submission_id)
submission = Submission(**self.restGET(uri))
# Pre-fetch the Entity tied to the Submission, if there is one
if 'entityId' in submission and submission['entityId'] is not None:
related = self._getWithEntityBundle(
entityBundle=json.loads(submission['entityBundleJSON']),
entity=submission['entityId'],
submission=submission_id, **kwargs)
submission.entity = related
submission.filePath = related.get('path', None)
return submission
def getSubmissionStatus(self, submission):
"""
Downloads the status of a Submission.
:param submission: The Submission to lookup
:returns: A :py:class:`synapseclient.evaluation.SubmissionStatus` object
"""
submission_id = id_of(submission)
uri = SubmissionStatus.getURI(submission_id)
val = self.restGET(uri)
return SubmissionStatus(**val)
############################################################
## CRUD for Wikis ##
############################################################
def getWiki(self, owner, subpageId=None):
"""Gets a :py:class:`synapseclient.wiki.Wiki` object from Synapse."""
if subpageId:
uri = '/entity/%s/wiki/%s' % (id_of(owner), id_of(subpageId))
else:
uri = '/entity/%s/wiki' % id_of(owner)
wiki = self.restGET(uri)
wiki['owner'] = owner
return Wiki(**wiki)
def getWikiHeaders(self, owner):
"""
Retrieves the header of all Wiki's belonging to the owner.
:param owner: An Evaluation or Entity
:returns: A list of Objects with three fields: id, title and parentId.
"""
uri = '/entity/%s/wikiheadertree' % id_of(owner)
return [DictObject(**header) for header in self.restGET(uri)['results']]
def _storeWiki(self, wiki):
"""
Stores or updates the given Wiki.
:param wiki: A Wiki object
:returns: An updated Wiki object
"""
# Make sure the file handle field is a list
if 'attachmentFileHandleIds' not in wiki:
wiki['attachmentFileHandleIds'] = []
# Convert all attachments into file handles
if 'attachments' in wiki:
for attachment in wiki['attachments']:
fileHandle = self._uploadToFileHandleService(attachment)
self.cache.add(fileHandle['id'], path=attachment)
wiki['attachmentFileHandleIds'].append(fileHandle['id'])
del wiki['attachments']
# Perform an update if the Wiki has an ID
if 'id' in wiki:
wiki.update(self.restPUT(wiki.putURI(), wiki.json()))
# Perform a create if the Wiki has no ID
else:
try:
wiki.update(self.restPOST(wiki.postURI(), wiki.json()))
except SynapseHTTPError as err:
# If already present we get an unhelpful SQL error
# TODO: implement createOrUpdate for Wikis, see SYNR-631
if err.response.status_code == 400 and "DuplicateKeyException" in err.message:
raise SynapseHTTPError("Can't re-create a wiki that already exists. "
"CreateOrUpdate not yet supported for wikis.",
response=err.response)
raise
return wiki
def _downloadWikiAttachment(self, owner, wiki, filename, destination=None):
"""
Download a file attached to a wiki page
"""
url = "%s/entity/%s/wiki/%s/attachment?fileName=%s" % (self.repoEndpoint, id_of(owner), id_of(wiki), filename,)
if not destination:
destination = filename
elif os.path.isdir(destination):
destination = os.path.join(destination, filename)
return self._downloadFile(url, destination)
def getWikiAttachments(self, wiki):
uri = "/entity/%s/wiki/%s/attachmenthandles" % (wiki.ownerId, wiki.id)
results = self.restGET(uri)
file_handles = list(WikiAttachment(**fh) for fh in results['list'])
return file_handles
def _copyWiki(self, wiki, destWiki):
"""
Copy wiki contents including attachments from one wiki to another.
:param wiki: source :py:class:`synapseclient.wiki.Wiki`
:param destWiki: destination :py:class:`synapseclient.wiki.Wiki`
Both Wikis must already exist.
"""
uri = "/entity/%s/wiki/%s/attachmenthandles" % (wiki.ownerId, wiki.id)
results = self.restGET(uri)
file_handles = {fh['id']:fh for fh in results['list']}
## need to download an re-upload wiki attachments, ug!
attachments = []
tempdir = tempfile.gettempdir()
for fhid in wiki.attachmentFileHandleIds:
file_info = self._downloadWikiAttachment(wiki.ownerId, wiki, file_handles[fhid]['fileName'], destination=tempdir)
attachments.append(file_info['path'])
destWiki.update({'attachments':attachments, 'markdown':wiki.markdown, 'title':wiki.title})
return self._storeWiki(destWiki)
############################################################
## Tables ##
############################################################
def _waitForAsync(self, uri, request):
async_job_id = self.restPOST(uri+'/start', body=json.dumps(request))
# http://rest.synapse.org/org/sagebionetworks/repo/model/asynch/AsynchronousJobStatus.html
sleep = self.table_query_sleep
start_time = time.time()
lastMessage, lastProgress, lastTotal, progressed = '', 0, 1, False
while time.time()-start_time < self.table_query_timeout:
result = self.restGET(uri+'/get/%s'%async_job_id['token'])
if result.get('jobState', None) == 'PROCESSING':
progressed=True
message = result.get('progressMessage', lastMessage)
progress = result.get('progressCurrent', lastProgress)
total = result.get('progressTotal', lastTotal)
if message !='':
utils.printTransferProgress(progress ,total, message, isBytes=False)
#Reset the time if we made progress (fix SYNPY-214)
if message != lastMessage or lastProgress != progress:
start_time = time.time()
lastMessage, lastProgress, lastTotal = message, progress, total
sleep = min(self.table_query_max_sleep, sleep * self.table_query_backoff)
time.sleep(sleep)
else:
break
else:
raise SynapseTimeoutError('Timeout waiting for query results: %0.1f seconds ' % (time.time()-start_time))
if result.get('jobState', None) == 'FAILED':
raise SynapseError(result.get('errorMessage', None) + '\n' + result.get('errorDetails', None), asynchronousJobStatus=result)
if progressed:
utils.printTransferProgress(total ,total, message, isBytes=False)
return result
def getColumn(self, id):
"""
Gets a Column object from Synapse by ID.
See: :py:mod:`synapseclient.table.Column`
Example::
column = syn.getColumn(123)
"""
return Column(**self.restGET(Column.getURI(id)))
def getColumns(self, x, limit=100, offset=0):
"""
Get all columns defined in Synapse, those corresponding to a set of column
headers or those whose names start with a given prefix.
:param x: a list of column headers, a Schema, a TableSchema's Synapse ID, or a string prefix
:Return: a generator of Column objects
"""
if x is None:
uri = '/column'
for result in self._GET_paginated(uri, limit=limit, offset=offset):
yield Column(**result)
elif isinstance(x, (list, tuple)):
for header in x:
try:
## if header is an integer, it's a columnID, otherwise it's
## an aggregate column, like "AVG(Foo)"
int(header)
yield self.getColumn(header)
except ValueError:
pass
elif isinstance(x, Schema) or utils.is_synapse_id(x):
uri = '/entity/{id}/column'.format(id=id_of(x))
for result in self._GET_paginated(uri, limit=limit, offset=offset):
yield Column(**result)
elif isinstance(x, basestring):
uri = '/column?prefix=' + x
for result in self._GET_paginated(uri, limit=limit, offset=offset):
yield Column(**result)
else:
ValueError("Can't get columns for a %s" % type(x))
def getTableColumns(self, table, limit=100, offset=0):
"""
Retrieve the column models used in the given table schema.
"""
uri = '/entity/{id}/column'.format(id=id_of(table))
for result in self._GET_paginated(uri, limit=limit, offset=offset):
yield Column(**result)
def tableQuery(self, query, resultsAs="csv", **kwargs):
"""
Query a Synapse Table.
:param query: query string in a `SQL-like syntax <http://rest.synapse.org/org/sagebionetworks/repo/web/controller/TableExamples.html>`_::
SELECT * from syn12345
:param resultsAs: select whether results are returned as a CSV file ("csv") or incrementally
downloaded as sets of rows ("rowset").
:return: A Table object that serves as a wrapper around a CSV file (or generator over
Row objects if resultsAs="rowset").
You can receive query results either as a generator over rows or as a CSV file. For
smallish tables, either method will work equally well. Use of a "rowset" generator allows
rows to be processed one at a time and processing may be stopped before downloading
the entire table.
Optional keyword arguments differ for the two return types. For the "rowset" option,
:param limit: specify the maximum number of rows to be returned, defaults to None
:param offset: don't return the first n rows, defaults to None
:param isConsistent: defaults to True. If set to False, return results based on current
state of the index without waiting for pending writes to complete.
Only use this if you know what you're doing.
For CSV files, there are several parameters to control the format of the resulting file:
:param quoteCharacter: default double quote
:param escapeCharacter: default backslash
:param lineEnd: defaults to os.linesep
:param separator: defaults to comma
:param header: True by default
:param includeRowIdAndRowVersion: True by default
NOTE: When performing queries on frequently updated tables,
the table can be inaccessible for a period leading to a
timeout of the query. Since the results are guaranteed
to eventually be returned you can change the max timeout
by setting the table_query_timeout variable of the Synapse
object:
syn.table_query_timeout = 300 #Sets the max timeout to 5 minutes.
"""
if resultsAs.lower()=="rowset":
return TableQueryResult(self, query, **kwargs)
elif resultsAs.lower()=="csv":
return CsvFileTable.from_table_query(self, query, **kwargs)
else:
raise ValueError("Unknown return type requested from tableQuery: " + unicode(resultsAs))
def _queryTable(self, query, limit=None, offset=None, isConsistent=True, partMask=None):
"""
Query a table and return the first page of results as a `QueryResultBundle <http://rest.synapse.org/org/sagebionetworks/repo/model/table/QueryResultBundle.html>`_.
If the result contains a *nextPageToken*, following pages a retrieved
by calling :py:meth:`~._queryTableNext`.
:param partMask: Optional, default all. The 'partsMask' is a bit field for requesting
different elements in the resulting JSON bundle.
Query Results (queryResults) = 0x1
Query Count (queryCount) = 0x2
Select Columns (selectColumns) = 0x4
Max Rows Per Page (maxRowsPerPage) = 0x8
"""
# See: http://rest.synapse.org/org/sagebionetworks/repo/model/table/QueryBundleRequest.html
query_bundle_request = {
"concreteType": "org.sagebionetworks.repo.model.table.QueryBundleRequest",
"query": {
"sql": query,
"isConsistent": isConsistent
}
}
if partMask:
query_bundle_request["partMask"] = partMask
if limit is not None:
query_bundle_request["query"]["limit"] = limit
if offset is not None:
query_bundle_request["query"]["offset"] = offset
query_bundle_request["query"]["isConsistent"] = isConsistent
uri = '/entity/{id}/table/query/async'.format(id=_extract_synapse_id_from_query(query))
return self._waitForAsync(uri=uri, request=query_bundle_request)
def _queryTableNext(self, nextPageToken, tableId):
uri = '/entity/{id}/table/query/nextPage/async'.format(id=tableId)
return self._waitForAsync(uri=uri, request=nextPageToken)
def _uploadCsv(self, filepath, schema, updateEtag=None, quoteCharacter='"', escapeCharacter="\\", lineEnd=os.linesep, separator=",", header=True, linesToSkip=0):
"""
Send an `UploadToTableRequest <http://rest.synapse.org/org/sagebionetworks/repo/model/table/UploadToTableRequest.html>`_ to Synapse.
:param filepath: Path of a `CSV <https://en.wikipedia.org/wiki/Comma-separated_values>`_ file.
:param schema: A table entity or its Synapse ID.
:param updateEtag: Any RowSet returned from Synapse will contain the current etag of the change set. To update any rows from a RowSet the etag must be provided with the POST.
:returns: `UploadToTableResult <http://rest.synapse.org/org/sagebionetworks/repo/model/table/UploadToTableResult.html>`_
"""
fileHandle = self._chunkedUploadFile(filepath, mimetype="text/csv")
request = {
"concreteType":"org.sagebionetworks.repo.model.table.UploadToTableRequest",
"csvTableDescriptor": {
"isFirstLineHeader": header,
"quoteCharacter": quoteCharacter,
"escapeCharacter": escapeCharacter,
"lineEnd": lineEnd,
"separator": separator},
"linesToSkip": linesToSkip,
"tableId": id_of(schema),
"uploadFileHandleId": fileHandle['id']
}
if updateEtag:
request["updateEtag"] = updateEtag
uri = "/entity/{id}/table/upload/csv/async".format(id=id_of(schema))
return self._waitForAsync(uri=uri, request=request)
def _queryTableCsv(self, query, quoteCharacter='"', escapeCharacter="\\", lineEnd=os.linesep, separator=",", header=True, includeRowIdAndRowVersion=True):
"""
Query a Synapse Table and download a CSV file containing the results.
Sends a `DownloadFromTableRequest <http://rest.synapse.org/org/sagebionetworks/repo/model/table/DownloadFromTableRequest.html>`_ to Synapse.
:return: a tuple containing a `DownloadFromTableResult <http://rest.synapse.org/org/sagebionetworks/repo/model/table/DownloadFromTableResult.html>`_
The DownloadFromTableResult object contains these fields:
* headers: ARRAY<STRING>, The list of ColumnModel IDs that describes the rows of this set.
* resultsFileHandleId: STRING, The resulting file handle ID can be used to download the CSV file created by this query.
* concreteType: STRING
* etag: STRING, Any RowSet returned from Synapse will contain the current etag of the change set. To update any rows from a RowSet the etag must be provided with the POST.
* tableId: STRING, The ID of the table identified in the from clause of the table query.
"""
download_from_table_request = {
"concreteType": "org.sagebionetworks.repo.model.table.DownloadFromTableRequest",
"csvTableDescriptor": {
"isFirstLineHeader": header,
"quoteCharacter": quoteCharacter,
"escapeCharacter": escapeCharacter,
"lineEnd": lineEnd,
"separator": separator},
"sql": query,
"writeHeader": header,
"includeRowIdAndRowVersion": includeRowIdAndRowVersion}
uri = "/entity/{id}/table/download/csv/async".format(id=_extract_synapse_id_from_query(query))
download_from_table_result = self._waitForAsync(uri=uri, request=download_from_table_request)
file_handle_id = download_from_table_result['resultsFileHandleId']
cached_file_path = self.cache.get(file_handle_id=file_handle_id)
if cached_file_path is not None:
return (download_from_table_result, {'path':cached_file_path})
else:
url = '%s/fileHandle/%s/url' % (self.fileHandleEndpoint, file_handle_id)
cache_dir = self.cache.get_cache_dir(file_handle_id)
file_info = self._downloadFile(url, os.path.join(cache_dir, "query_results.csv"))
self.cache.add(file_handle_id, file_info['path'])
return (download_from_table_result, file_info)
## This is redundant with syn.store(Column(...)) and will be removed
## unless people prefer this method.
def createColumn(self, name, columnType, maximumSize=None, defaultValue=None, enumValues=None):
columnModel = Column(name=name, columnType=columnType, maximumSize=maximumSize, defaultValue=defaultValue, enumValue=enumValues)
return Column(**self.restPOST('/column', json.dumps(columnModel)))
def _getColumnByName(self, schema, column_name):
"""
Given a schema and a column name, get the corresponding py:class:`Column` object.
"""
for column in self.getColumns(schema):
if column.name == column_name:
return column
return None
def downloadTableFile(self, table, column, downloadLocation=None, rowId=None, versionNumber=None, rowIdAndVersion=None, ifcollision="keep.both"):
"""
Downloads a file associated with a row in a Synapse table.
:param table: schema object, table query result or synapse ID
:param rowId: row number that holds the file handle
:param versionNumber: version number of the row that holds the file handle
:param rowIdAndVersion: row number and version in one string, "101_2" for version 2 of row 101
:param column: a Column object, the ID of a column or its name
:param downloadLocation: location in local file system to download the file
:param ifcollision: Determines how to handle file collisions.
May be "overwrite.local", "keep.local", or "keep.both".
Defaults to "keep.both".
:returns: a dictionary with 'path'.
Example::
file_info = syn.downloadTableFile(table, rowId=1, versionNumber=1, column="cover_art", downloadLocation=".")
print file_info['path']
"""
if (rowId is None or versionNumber is None) and rowIdAndVersion is None:
raise ValueError("Need to pass in either rowIdAndVersion or (rowId and versionNumber).")
## get table ID, given a string, Table or Schema
if isinstance(table, basestring):
table_id = table
elif isinstance(table, synapseclient.table.TableAbstractBaseClass):
table_id = table.tableId
elif isinstance(table, Schema):
table_id = table.id
else:
raise ValueError("Unrecognized table object \"%s\"." % table)
## get column ID, given a column name, ID or Column object
if isinstance(column, basestring):
column = self._getColumnByName(table_id, column)
if column is None:
raise SynapseError("Can't find column \"%s\"." % column)
column_id = column.id
elif isinstance(column, Column):
column_id = column.id
elif isinstance(column, int):
column_id = column
else:
raise ValueError("Unrecognized column \"%s\"." % column)
## extract row and version
if rowIdAndVersion:
m = re.match(r'(\d+)_(\d+)', rowIdAndVersion)
if m:
rowId = m.group(1)
versionNumber = m.group(2)
else:
raise ValueError('Row and version \"%s\" in unrecognized format.')
row_reference_set = {
'tableId':table_id,
'headers':[{'id':column_id}],
'rows':[{'rowId':rowId,'versionNumber':versionNumber}]
}
# result is a http://rest.synapse.org/org/sagebionetworks/repo/model/table/TableFileHandleResults.html
result = self.restPOST("/entity/%s/table/filehandles" % table_id, body=json.dumps(row_reference_set))
if len(result['rows'][0]['list']) != 1:
raise SynapseError('Couldn\'t get file handle for tableId={id}, column={columnId}, row={rowId}, version={versionNumber}'.format(
id=table_id,
columnId=column_id,
rowId=rowId,
versionNumber=versionNumber))
file_handle_id = result['rows'][0]['list'][0]['id']
if downloadLocation is None:
downloadLocation = self.cache.get_cache_dir(file_handle_id)
cached_file_path = self.cache.get(file_handle_id, downloadLocation)
if cached_file_path is not None:
return {'path':cached_file_path}
else:
url = "{endpoint}/entity/{id}/table/column/{columnId}/row/{rowId}/version/{versionNumber}/file".format(
endpoint=self.repoEndpoint,
id=table_id,
columnId=column_id,
rowId=rowId,
versionNumber=versionNumber)
file_info = self._downloadFile(url, downloadLocation)
self.cache.add(file_handle_id, file_info['path'])
return file_info
############################################################
## CRUD for Entities (properties) ##
############################################################
def _getEntity(self, entity, version=None):
"""
Get an entity from Synapse.
:param entity: A Synapse ID, a dictionary representing an Entity, or a Synapse Entity object
:param version: The version number to fetch
:returns: A dictionary containing an Entity's properties
"""
uri = '/entity/'+id_of(entity)
if version:
uri += '/version/%d' % version
return self.restGET(uri)
def _createEntity(self, entity):
"""
Create a new entity in Synapse.
:param entity: A dictionary representing an Entity or a Synapse Entity object
:returns: A dictionary containing an Entity's properties
"""
return self.restPOST(uri='/entity', body=json.dumps(get_properties(entity)))
def _updateEntity(self, entity, incrementVersion=True, versionLabel=None):
"""
Update an existing entity in Synapse.
:param entity: A dictionary representing an Entity or a Synapse Entity object
:returns: A dictionary containing an Entity's properties
"""
uri = '/entity/%s' % id_of(entity)
if is_versionable(entity):
if incrementVersion or versionLabel is not None:
uri += '/version'
if 'versionNumber' in entity:
entity['versionNumber'] += 1
if 'versionLabel' in entity:
entity['versionLabel'] = str(entity['versionNumber'])
if versionLabel:
entity['versionLabel'] = str(versionLabel)
return self.restPUT(uri, body=json.dumps(get_properties(entity)))
def _findEntityIdByNameAndParent(self, name, parent=None):
"""
Find an Entity given its name and parent ID.
:returns: the Entity ID or None if not found
"""
if parent is None:
parent = ROOT_ENTITY
qr = self.query('select id from entity where name=="%s" and parentId=="%s"' % (name, id_of(parent)))
if qr.get('totalNumberOfResults', 0) == 1:
return qr['results'][0]['entity.id']
else:
return None
############################################################
## Send Message ##
############################################################
def sendMessage(self, userIds, messageSubject, messageBody, contentType="text/plain"):
"""
send a message via Synapse.
:param userId: A list of user IDs to which the message is to be sent
:param messageSubject: The subject for the message
:param messageBody: The body of the message
:param contentType: optional contentType of message body (default="text/plain")
Should be one of "text/plain" or "text/html"
:returns: The metadata of the created message
"""
fileHandle = self._uploadStringToFile(messageBody, contentType)
message = dict()
message['recipients']=userIds
message['subject']=messageSubject
message['fileHandleId']=fileHandle['id']
return self.restPOST(uri='/message', body=json.dumps(message))
############################################################
## Low level Rest calls ##
############################################################
def _generateSignedHeaders(self, url, headers=None):
"""Generate headers signed with the API key."""
if self.username is None or self.apiKey is None:
raise SynapseAuthenticationError("Please login")
if headers is None:
headers = dict(self.default_headers)
headers.update(synapseclient.USER_AGENT)
sig_timestamp = time.strftime(utils.ISO_FORMAT, time.gmtime())
url = urlparse.urlparse(url).path
sig_data = self.username + url + sig_timestamp
signature = base64.b64encode(hmac.new(self.apiKey, sig_data, hashlib.sha1).digest())
sig_header = {'userId' : self.username,
'signatureTimestamp' : sig_timestamp,
'signature' : signature}
headers.update(sig_header)
return headers
def restGET(self, uri, endpoint=None, headers=None, retryPolicy={}, **kwargs):
"""
Performs a REST GET operation to the Synapse server.
:param uri: URI on which get is performed
:param endpoint: Server endpoint, defaults to self.repoEndpoint
:param headers: Dictionary of headers to use rather than the API-key-signed default set of headers
:param kwargs: Any other arguments taken by a `requests <http://docs.python-requests.org/en/latest/>`_ method
:returns: JSON encoding of response
"""
uri, headers = self._build_uri_and_headers(uri, endpoint, headers)
retryPolicy = self._build_retry_policy(retryPolicy)
response = _with_retry(lambda: requests.get(uri, headers=headers, **kwargs), **retryPolicy)
exceptions._raise_for_status(response, verbose=self.debug)
return self._return_rest_body(response)
def restPOST(self, uri, body, endpoint=None, headers=None, retryPolicy={}, **kwargs):
"""
Performs a REST POST operation to the Synapse server.
:param uri: URI on which get is performed
:param endpoint: Server endpoint, defaults to self.repoEndpoint
:param body: The payload to be delivered
:param headers: Dictionary of headers to use rather than the API-key-signed default set of headers
:param kwargs: Any other arguments taken by a `requests <http://docs.python-requests.org/en/latest/>`_ method
:returns: JSON encoding of response
"""
uri, headers = self._build_uri_and_headers(uri, endpoint, headers)
retryPolicy = self._build_retry_policy(retryPolicy)
response = _with_retry(lambda: requests.post(uri, data=body, headers=headers, **kwargs), **retryPolicy)
exceptions._raise_for_status(response, verbose=self.debug)
return self._return_rest_body(response)
def restPUT(self, uri, body=None, endpoint=None, headers=None, retryPolicy={}, **kwargs):
"""
Performs a REST PUT operation to the Synapse server.
:param uri: URI on which get is performed
:param endpoint: Server endpoint, defaults to self.repoEndpoint
:param body: The payload to be delivered
:param headers: Dictionary of headers to use rather than the API-key-signed default set of headers
:param kwargs: Any other arguments taken by a `requests <http://docs.python-requests.org/en/latest/>`_ method
:returns: JSON encoding of response
"""
uri, headers = self._build_uri_and_headers(uri, endpoint, headers)
retryPolicy = self._build_retry_policy(retryPolicy)
response = _with_retry(lambda: requests.put(uri, data=body, headers=headers, **kwargs), **retryPolicy)
exceptions._raise_for_status(response, verbose=self.debug)
return self._return_rest_body(response)
def restDELETE(self, uri, endpoint=None, headers=None, retryPolicy={}, **kwargs):
"""
Performs a REST DELETE operation to the Synapse server.
:param uri: URI of resource to be deleted
:param endpoint: Server endpoint, defaults to self.repoEndpoint
:param headers: Dictionary of headers to use rather than the API-key-signed default set of headers
:param kwargs: Any other arguments taken by a `requests <http://docs.python-requests.org/en/latest/>`_ method
"""
uri, headers = self._build_uri_and_headers(uri, endpoint, headers)
retryPolicy = self._build_retry_policy(retryPolicy)
response = _with_retry(lambda: requests.delete(uri, headers=headers, **kwargs), **retryPolicy)
exceptions._raise_for_status(response, verbose=self.debug)
def _build_uri_and_headers(self, uri, endpoint=None, headers=None):
"""Returns a tuple of the URI and headers to request with."""
if endpoint == None:
endpoint = self.repoEndpoint
# Check to see if the URI is incomplete (i.e. a Synapse URL)
# In that case, append a Synapse endpoint to the URI
parsedURL = urlparse.urlparse(uri)
if parsedURL.netloc == '':
uri = endpoint + uri
if headers is None:
headers = self._generateSignedHeaders(uri)
return uri, headers
def _build_retry_policy(self, retryPolicy={}):
"""Returns a retry policy to be passed onto _with_retry."""
defaults = dict(STANDARD_RETRY_PARAMS)
defaults.update(retryPolicy)
return defaults
def _return_rest_body(self, response):
"""Returns either a dictionary or a string depending on the 'content-type' of the response."""
if _is_json(response.headers.get('content-type', None)):
return response.json()
return response.text
| 43.023284 | 205 | 0.601233 |
cc0a9343c601b5f1a34c29d6cf15bed338570cc8 | 22,288 | py | Python | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Executor.py | LP-Codes/Nuitka | 2939da85f74127fb3ff6c1155a44bd73e8f5b2a9 | [
"Apache-2.0"
] | null | null | null | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Executor.py | LP-Codes/Nuitka | 2939da85f74127fb3ff6c1155a44bd73e8f5b2a9 | [
"Apache-2.0"
] | 1 | 2021-06-10T06:47:04.000Z | 2021-06-10T06:47:04.000Z | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Executor.py | LP-Codes/Nuitka | 2939da85f74127fb3ff6c1155a44bd73e8f5b2a9 | [
"Apache-2.0"
] | null | null | null | """SCons.Executor
A module for executing actions with specific lists of target and source
Nodes.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
__revision__ = "src/engine/SCons/Executor.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import collections
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
import SCons.Util
from SCons.compat import with_metaclass, NoSlotsPyPy
class Batch(object):
"""Remembers exact association between targets
and sources of executor."""
__slots__ = ('targets',
'sources')
def __init__(self, targets=[], sources=[]):
self.targets = targets
self.sources = sources
class TSList(collections.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by wrapping
an executor Method. This class is used in the Executor.lvars()
to delay creation of NodeList objects until they're needed.
Note that we subclass collections.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
collections.UserList methods in practice.
"""
def __init__(self, func):
self.func = func
def __getattr__(self, attr):
nl = self.func()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.func()
return nl[i]
def __getslice__(self, i, j):
nl = self.func()
i, j = max(i, 0), max(j, 0)
return nl[i:j]
def __str__(self):
nl = self.func()
return str(nl)
def __repr__(self):
nl = self.func()
return repr(nl)
class TSObject(object):
"""A class that implements $TARGET or $SOURCE expansions by wrapping
an Executor method.
"""
def __init__(self, func):
self.func = func
def __getattr__(self, attr):
n = self.func()
return getattr(n, attr)
def __str__(self):
n = self.func()
if n:
return str(n)
return ''
def __repr__(self):
n = self.func()
if n:
return repr(n)
return ''
def rfile(node):
"""
A function to return the results of a Node's rfile() method,
if it exists, and the Node itself otherwise (if it's a Value
Node, e.g.).
"""
try:
rfile = node.rfile
except AttributeError:
return node
else:
return rfile()
def execute_nothing(obj, target, kw):
return 0
def execute_action_list(obj, target, kw):
"""Actually execute the action list."""
env = obj.get_build_env()
kw = obj.get_kw(kw)
status = 0
for act in obj.get_action_list():
args = ([], [], env)
status = act(*args, **kw)
if isinstance(status, SCons.Errors.BuildError):
status.executor = obj
raise status # TODO pylint E0702: raising int not allowed
elif status:
msg = "Error %s" % status
raise SCons.Errors.BuildError(
errstr=msg,
node=obj.batches[0].targets,
executor=obj,
action=act)
return status
_do_execute_map = {0 : execute_nothing,
1 : execute_action_list}
def execute_actions_str(obj):
env = obj.get_build_env()
return "\n".join(action.genstring(obj.get_all_targets(),
obj.get_all_sources(),
env) for action in obj.get_action_list())
def execute_null_str(obj):
return ''
_execute_str_map = {0 : execute_null_str,
1 : execute_actions_str}
class Executor(object, with_metaclass(NoSlotsPyPy)):
"""A class for controlling instances of executing an action.
This largely exists to hold a single association of an action,
environment, list of environment override dictionaries, targets
and sources for later processing as needed.
"""
__slots__ = ('pre_actions',
'post_actions',
'env',
'overridelist',
'batches',
'builder_kw',
'_memo',
'lvars',
'_changed_sources_list',
'_changed_targets_list',
'_unchanged_sources_list',
'_unchanged_targets_list',
'action_list',
'_do_execute',
'_execute_str')
def __init__(self, action, env=None, overridelist=[{}],
targets=[], sources=[], builder_kw={}):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Executor.Executor')
self.set_action_list(action)
self.pre_actions = []
self.post_actions = []
self.env = env
self.overridelist = overridelist
if targets or sources:
self.batches = [Batch(targets[:], sources[:])]
else:
self.batches = []
self.builder_kw = builder_kw
self._do_execute = 1
self._execute_str = 1
self._memo = {}
def get_lvars(self):
try:
return self.lvars
except AttributeError:
self.lvars = {
'CHANGED_SOURCES' : TSList(self._get_changed_sources),
'CHANGED_TARGETS' : TSList(self._get_changed_targets),
'SOURCE' : TSObject(self._get_source),
'SOURCES' : TSList(self._get_sources),
'TARGET' : TSObject(self._get_target),
'TARGETS' : TSList(self._get_targets),
'UNCHANGED_SOURCES' : TSList(self._get_unchanged_sources),
'UNCHANGED_TARGETS' : TSList(self._get_unchanged_targets),
}
return self.lvars
def _get_changes(self):
cs = []
ct = []
us = []
ut = []
for b in self.batches:
# don't add targets marked always build to unchanged lists
# add to changed list as they always need to build
if not b.targets[0].always_build and b.targets[0].is_up_to_date():
us.extend(list(map(rfile, b.sources)))
ut.extend(b.targets)
else:
cs.extend(list(map(rfile, b.sources)))
ct.extend(b.targets)
self._changed_sources_list = SCons.Util.NodeList(cs)
self._changed_targets_list = SCons.Util.NodeList(ct)
self._unchanged_sources_list = SCons.Util.NodeList(us)
self._unchanged_targets_list = SCons.Util.NodeList(ut)
def _get_changed_sources(self, *args, **kw):
try:
return self._changed_sources_list
except AttributeError:
self._get_changes()
return self._changed_sources_list
def _get_changed_targets(self, *args, **kw):
try:
return self._changed_targets_list
except AttributeError:
self._get_changes()
return self._changed_targets_list
def _get_source(self, *args, **kw):
return rfile(self.batches[0].sources[0]).get_subst_proxy()
def _get_sources(self, *args, **kw):
return SCons.Util.NodeList([rfile(n).get_subst_proxy() for n in self.get_all_sources()])
def _get_target(self, *args, **kw):
return self.batches[0].targets[0].get_subst_proxy()
def _get_targets(self, *args, **kw):
return SCons.Util.NodeList([n.get_subst_proxy() for n in self.get_all_targets()])
def _get_unchanged_sources(self, *args, **kw):
try:
return self._unchanged_sources_list
except AttributeError:
self._get_changes()
return self._unchanged_sources_list
def _get_unchanged_targets(self, *args, **kw):
try:
return self._unchanged_targets_list
except AttributeError:
self._get_changes()
return self._unchanged_targets_list
def get_action_targets(self):
if not self.action_list:
return []
targets_string = self.action_list[0].get_targets(self.env, self)
if targets_string[0] == '$':
targets_string = targets_string[1:]
return self.get_lvars()[targets_string]
def set_action_list(self, action):
import SCons.Util
if not SCons.Util.is_List(action):
if not action:
import SCons.Errors
raise SCons.Errors.UserError("Executor must have an action.")
action = [action]
self.action_list = action
def get_action_list(self):
if self.action_list is None:
return []
return self.pre_actions + self.action_list + self.post_actions
def get_all_targets(self):
"""Returns all targets for all batches of this Executor."""
result = []
for batch in self.batches:
result.extend(batch.targets)
return result
def get_all_sources(self):
"""Returns all sources for all batches of this Executor."""
result = []
for batch in self.batches:
result.extend(batch.sources)
return result
def get_all_children(self):
"""Returns all unique children (dependencies) for all batches
of this Executor.
The Taskmaster can recognize when it's already evaluated a
Node, so we don't have to make this list unique for its intended
canonical use case, but we expect there to be a lot of redundancy
(long lists of batched .cc files #including the same .h files
over and over), so removing the duplicates once up front should
save the Taskmaster a lot of work.
"""
result = SCons.Util.UniqueList([])
for target in self.get_all_targets():
result.extend(target.children())
return result
def get_all_prerequisites(self):
"""Returns all unique (order-only) prerequisites for all batches
of this Executor.
"""
result = SCons.Util.UniqueList([])
for target in self.get_all_targets():
if target.prerequisites is not None:
result.extend(target.prerequisites)
return result
def get_action_side_effects(self):
"""Returns all side effects for all batches of this
Executor used by the underlying Action.
"""
result = SCons.Util.UniqueList([])
for target in self.get_action_targets():
result.extend(target.side_effects)
return result
@SCons.Memoize.CountMethodCall
def get_build_env(self):
"""Fetch or create the appropriate build Environment
for this Executor.
"""
try:
return self._memo['get_build_env']
except KeyError:
pass
# Create the build environment instance with appropriate
# overrides. These get evaluated against the current
# environment's construction variables so that users can
# add to existing values by referencing the variable in
# the expansion.
overrides = {}
for odict in self.overridelist:
overrides.update(odict)
import SCons.Defaults
env = self.env or SCons.Defaults.DefaultEnvironment()
build_env = env.Override(overrides)
self._memo['get_build_env'] = build_env
return build_env
def get_build_scanner_path(self, scanner):
"""Fetch the scanner path for this executor's targets and sources.
"""
env = self.get_build_env()
try:
cwd = self.batches[0].targets[0].cwd
except (IndexError, AttributeError):
cwd = None
return scanner.path(env, cwd,
self.get_all_targets(),
self.get_all_sources())
def get_kw(self, kw={}):
result = self.builder_kw.copy()
result.update(kw)
result['executor'] = self
return result
# use extra indirection because with new-style objects (Python 2.2
# and above) we can't override special methods, and nullify() needs
# to be able to do this.
def __call__(self, target, **kw):
return _do_execute_map[self._do_execute](self, target, kw)
def cleanup(self):
self._memo = {}
def add_sources(self, sources):
"""Add source files to this Executor's list. This is necessary
for "multi" Builders that can be called repeatedly to build up
a source file list for a given target."""
# TODO(batch): extend to multiple batches
assert (len(self.batches) == 1)
# TODO(batch): remove duplicates?
sources = [x for x in sources if x not in self.batches[0].sources]
self.batches[0].sources.extend(sources)
def get_sources(self):
return self.batches[0].sources
def add_batch(self, targets, sources):
"""Add pair of associated target and source to this Executor's list.
This is necessary for "batch" Builders that can be called repeatedly
to build up a list of matching target and source files that will be
used in order to update multiple target files at once from multiple
corresponding source files, for tools like MSVC that support it."""
self.batches.append(Batch(targets, sources))
def prepare(self):
"""
Preparatory checks for whether this Executor can go ahead
and (try to) build its targets.
"""
for s in self.get_all_sources():
if s.missing():
msg = "Source `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (s, self.batches[0].targets[0]))
def add_pre_action(self, action):
self.pre_actions.append(action)
def add_post_action(self, action):
self.post_actions.append(action)
# another extra indirection for new-style objects and nullify...
def __str__(self):
return _execute_str_map[self._execute_str](self)
def nullify(self):
self.cleanup()
self._do_execute = 0
self._execute_str = 0
@SCons.Memoize.CountMethodCall
def get_contents(self):
"""Fetch the signature contents. This is the main reason this
class exists, so we can compute this once and cache it regardless
of how many target or source Nodes there are.
Returns bytes
"""
try:
return self._memo['get_contents']
except KeyError:
pass
env = self.get_build_env()
action_list = self.get_action_list()
all_targets = self.get_all_targets()
all_sources = self.get_all_sources()
result = bytearray("",'utf-8').join([action.get_contents(all_targets,
all_sources,
env)
for action in action_list])
self._memo['get_contents'] = result
return result
def get_timestamp(self):
"""Fetch a time stamp for this Executor. We don't have one, of
course (only files do), but this is the interface used by the
timestamp module.
"""
return 0
def scan_targets(self, scanner):
# TODO(batch): scan by batches
self.scan(scanner, self.get_all_targets())
def scan_sources(self, scanner):
# TODO(batch): scan by batches
if self.batches[0].sources:
self.scan(scanner, self.get_all_sources())
def scan(self, scanner, node_list):
"""Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
"""
env = self.get_build_env()
path = self.get_build_scanner_path
kw = self.get_kw()
# TODO(batch): scan by batches)
deps = []
for node in node_list:
node.disambiguate()
deps.extend(node.get_implicit_deps(env, scanner, path, kw))
deps.extend(self.get_implicit_deps())
for tgt in self.get_all_targets():
tgt.add_to_implicit(deps)
def _get_unignored_sources_key(self, node, ignore=()):
return (node,) + tuple(ignore)
@SCons.Memoize.CountDictCall(_get_unignored_sources_key)
def get_unignored_sources(self, node, ignore=()):
key = (node,) + tuple(ignore)
try:
memo_dict = self._memo['get_unignored_sources']
except KeyError:
memo_dict = {}
self._memo['get_unignored_sources'] = memo_dict
else:
try:
return memo_dict[key]
except KeyError:
pass
if node:
# TODO: better way to do this (it's a linear search,
# but it may not be critical path)?
sourcelist = []
for b in self.batches:
if node in b.targets:
sourcelist = b.sources
break
else:
sourcelist = self.get_all_sources()
if ignore:
idict = {i: 1 for i in ignore}
sourcelist = [s for s in sourcelist if s not in idict]
memo_dict[key] = sourcelist
return sourcelist
def get_implicit_deps(self):
"""Return the executor's implicit dependencies, i.e. the nodes of
the commands to be executed."""
result = []
build_env = self.get_build_env()
for act in self.get_action_list():
deps = act.get_implicit_deps(self.get_all_targets(),
self.get_all_sources(),
build_env)
result.extend(deps)
return result
_batch_executors = {}
def GetBatchExecutor(key):
return _batch_executors[key]
def AddBatchExecutor(key, executor):
assert key not in _batch_executors
_batch_executors[key] = executor
nullenv = None
class NullEnvironment(SCons.Util.Null):
import SCons.CacheDir
_CacheDir_path = None
_CacheDir = SCons.CacheDir.CacheDir(None)
def get_CacheDir(self):
return self._CacheDir
def get_NullEnvironment():
"""Use singleton pattern for Null Environments."""
global nullenv
if nullenv is None:
nullenv = NullEnvironment()
return nullenv
class Null(object, with_metaclass(NoSlotsPyPy)):
"""A null Executor, with a null build Environment, that does
nothing when the rest of the methods call it.
This might be able to disappear when we refactor things to
disassociate Builders from Nodes entirely, so we're not
going to worry about unit tests for this--at least for now.
"""
__slots__ = ('pre_actions',
'post_actions',
'env',
'overridelist',
'batches',
'builder_kw',
'_memo',
'lvars',
'_changed_sources_list',
'_changed_targets_list',
'_unchanged_sources_list',
'_unchanged_targets_list',
'action_list',
'_do_execute',
'_execute_str')
def __init__(self, *args, **kw):
if SCons.Debug.track_instances:
logInstanceCreation(self, 'Executor.Null')
self.batches = [Batch(kw['targets'][:], [])]
def get_build_env(self):
return get_NullEnvironment()
def get_build_scanner_path(self):
return None
def cleanup(self):
pass
def prepare(self):
pass
def get_unignored_sources(self, *args, **kw):
return tuple(())
def get_action_targets(self):
return []
def get_action_list(self):
return []
def get_all_targets(self):
return self.batches[0].targets
def get_all_sources(self):
return self.batches[0].targets[0].sources
def get_all_children(self):
return self.batches[0].targets[0].children()
def get_all_prerequisites(self):
return []
def get_action_side_effects(self):
return []
def __call__(self, *args, **kw):
return 0
def get_contents(self):
return ''
def _morph(self):
"""Morph this Null executor to a real Executor object."""
batches = self.batches
self.__class__ = Executor
self.__init__([])
self.batches = batches
# The following methods require morphing this Null Executor to a
# real Executor object.
def add_pre_action(self, action):
self._morph()
self.add_pre_action(action)
def add_post_action(self, action):
self._morph()
self.add_post_action(action)
def set_action_list(self, action):
self._morph()
self.set_action_list(action)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.216095 | 114 | 0.606829 |
c93a2539e8c603ef059e6c930c76dff514388b01 | 2,408 | py | Python | scrapy_proj/openrecipes/spiders/whatsgabycooking_spider.py | fictivekin/openrecipes | 82b5c080168439b328f76a115aa2011fa4601384 | [
"Apache-2.0"
] | 300 | 2015-01-05T05:37:34.000Z | 2022-03-05T16:24:37.000Z | scrapy_proj/openrecipes/spiders/whatsgabycooking_spider.py | fictivekin/openrecipes | 82b5c080168439b328f76a115aa2011fa4601384 | [
"Apache-2.0"
] | 11 | 2015-05-14T04:15:22.000Z | 2018-01-27T17:22:32.000Z | scrapy_proj/openrecipes/spiders/whatsgabycooking_spider.py | fictivekin/openrecipes | 82b5c080168439b328f76a115aa2011fa4601384 | [
"Apache-2.0"
] | 100 | 2015-01-11T23:14:29.000Z | 2022-03-25T06:03:48.000Z | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from openrecipes.items import RecipeItem, RecipeItemLoader
from openrecipes.schema_org_parser import parse_recipes
from openrecipes.util import is_ingredient_container
class WhatsgabycookingMixin(object):
source = 'whatsgabycooking'
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
image_path = hxs.select("descendant-or-self::img[@class and contains(@class, 'wp-image')][1]/@data-lazy-src").extract()
raw_recipes = parse_recipes(hxs, {'source': self.source, 'url': response.url})
if raw_recipes:
# schema.org. Yay!
for recipe in raw_recipes:
recipe['image'] = image_path
return [RecipeItem.from_dict(recipe) for recipe in raw_recipes]
else:
# not schema.org. Boo!
il = RecipeItemLoader(item=RecipeItem())
il.add_value('source', self.source)
il.add_value('url', response.url)
il.add_value('image', image_path)
name_path = '//*[@class="post-title"]/h1/text()'
il.add_value('name', hxs.select(name_path).extract())
# maybe it's in the P's
for p in hxs.select('//div[@id="recipe" or @class="span9"]/p'):
if is_ingredient_container(p):
il.add_value('ingredients', p.select('text()').extract())
# or maybe it's in the LI's
for li in hxs.select('//*[@class="span9"]//ul/li'):
if is_ingredient_container(li):
il.add_value('ingredients', li.select('text()').extract())
# or maybe it's in these other LI's
for li in hxs.select('//li[@class="ingredient"]/text()'):
il.add_value('ingredients', li.extract())
return il.load_item()
class WhatsgabycookingcrawlSpider(CrawlSpider, WhatsgabycookingMixin):
name = "whatsgabycooking.com"
allowed_domains = ["whatsgabycooking.com"]
start_urls = [
"http://whatsgabycooking.com/index/",
]
rules = (
Rule(SgmlLinkExtractor(allow=('/category/categories/.*/'))),
Rule(SgmlLinkExtractor(allow=('/[^/]+/'), deny=('/category/categories/.*/')),
callback='parse_item'),
)
| 37.625 | 127 | 0.615864 |
355f4a5df501922d7cced07daae4a173d4d88eaf | 9,891 | py | Python | codes/models.py | RonDen/HanTokenization | 7fd61f3de33a1fc094784fcc49bd4b2808195c89 | [
"MIT"
] | 3 | 2021-04-04T03:28:41.000Z | 2021-04-23T02:57:20.000Z | codes/models.py | RonDen/HanTokenization | 7fd61f3de33a1fc094784fcc49bd4b2808195c89 | [
"MIT"
] | null | null | null | codes/models.py | RonDen/HanTokenization | 7fd61f3de33a1fc094784fcc49bd4b2808195c89 | [
"MIT"
] | 2 | 2021-04-13T11:34:35.000Z | 2021-04-20T00:51:41.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: Xukun Luo
# Date: 2021.04.08
import sys
sys.path.append("../")
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from codes.layers import CRF, TransformerLayer
class BiLstmModel(nn.Module):
def __init__(self, args):
super(BiLstmModel, self).__init__()
self.label_number = args.label_number
self.lstm_layers = args.lstm_layers
self.lstm_hidden = args.lstm_hidden
self.lstm_dropout = args.lstm_dropout
self.use_cuda = args.use_cuda
self.embed_size = args.embed_size
self.num_embeddings = args.vocab_len
self.embedding = nn.Embedding(self.num_embeddings, self.embed_size)
self.lstm_encoder = nn.LSTM(input_size=self.embed_size,
hidden_size=self.lstm_hidden,
num_layers=self.lstm_layers,
bidirectional=True,
dropout=self.lstm_dropout,
batch_first=True)
self.lstm_decoder = nn.LSTM(input_size=self.lstm_hidden*2,
hidden_size=self.lstm_hidden,
num_layers=self.lstm_layers,
bidirectional=True,
dropout=self.lstm_dropout,
batch_first=True)
self.linear = nn.Linear(self.lstm_hidden*2, self.label_number)
self.droplayer = nn.Dropout(p=self.lstm_dropout)
def forward(self, src, src_len):
'''
Forward Algorithm.
Args:
src (batch_size, seq_length) : word-level representation of sentence
src_len (batch_size) : the sentence length
Returns:
feats (batch_size, seq_length, num_labels) : predect feats.
'''
batch_size, seq_len = src.size(0), src.size(1)
# Embedding.
emb = self.embedding(src)
emb = pack_padded_sequence(emb, src_len, True)
# Encoder. (batch_size, seq_length, lstm_hidden*2)
context_vector, _ = self.lstm_encoder(emb)
#context_vector = self.droplayer(context_vector)
# Decoder. (batch_size, seq_length, lstm_hidden*2)
lstm_out, hidden = self.lstm_decoder(context_vector)
lstm_out, _ = pad_packed_sequence(lstm_out, True)
lstm_out = lstm_out.contiguous().view(-1, self.lstm_hidden*2)
lstm_out = self.droplayer(lstm_out)
# Linear layer. (batch_size, seq_length, label_number)
lstm_feats = self.linear(lstm_out).view(batch_size, seq_len, -1)
return lstm_feats
class BiLstmCRFModel(nn.Module):
def __init__(self, args):
super(BiLstmCRFModel, self).__init__()
self.label_number = args.label_number
self.lstm_layers = args.lstm_layers
self.lstm_hidden = args.lstm_hidden
self.lstm_dropout = args.lstm_dropout
self.use_cuda = args.use_cuda
self.embed_size = args.embed_size
self.num_embeddings = args.vocab_len
self.device = args.device
self.embedding = nn.Embedding(self.num_embeddings, self.embed_size)
self.lstm_encoder = nn.LSTM(input_size=self.embed_size,
hidden_size=self.lstm_hidden,
num_layers=self.lstm_layers,
bidirectional=True,
dropout=self.lstm_dropout,
batch_first=True)
self.lstm_decoder = nn.LSTM(input_size=self.lstm_hidden*2,
hidden_size=self.lstm_hidden,
num_layers=self.lstm_layers,
bidirectional=True,
dropout=self.lstm_dropout,
batch_first=True)
self.crf = CRF(target_size=self.label_number,
average_batch=True,
use_cuda=self.use_cuda,
bad_pairs=[],
good_pairs=[])
self.linear = nn.Linear(self.lstm_hidden*2, self.label_number+2)
self.droplayer = nn.Dropout(p=self.lstm_dropout)
def forward(self, src, src_len):
'''
Forward Algorithm.
Args:
src (batch_size, seq_length) : word-level representation of sentence
src_len (batch_size) : the sentence length
Returns:
feats (batch_size, seq_length, num_labels) : predect feats.
'''
batch_size, seq_len = src.size(0), src.size(1)
# Embedding.
emb = self.embedding(src)
emb = pack_padded_sequence(emb, src_len, True)
# Encoder. (batch_size, seq_length, lstm_hidden*2)
context_vector, _ = self.lstm_encoder(emb)
#context_vector = self.droplayer(context_vector)
# Decoder. (batch_size, seq_length, lstm_hidden*2)
lstm_out, hidden = self.lstm_decoder(context_vector)
lstm_out, _ = pad_packed_sequence(lstm_out, True)
lstm_out = lstm_out.contiguous().view(-1, self.lstm_hidden*2)
lstm_out = self.droplayer(lstm_out)
# Linear layer. (batch_size, seq_length, label_number)
lstm_feats = self.linear(lstm_out).view(batch_size, seq_len, -1)
return lstm_feats
def get_mask(self, src_len, batch_size, seq_len):
""" Generate the mask matrix. """
src_range = torch.arange(0, seq_len).long() # [0, 1, 2, 3, 4]
src_len = torch.LongTensor(src_len) # [3, 4, 5, 1]
# [
# [0, 1, 2, 3, 4],
# [0, 1, 2, 3, 4],
# [0, 1, 2, 3, 4],
# [0, 1, 2, 3, 4]
# ]
src_range_expand = src_range.unsqueeze(0).expand(batch_size, seq_len)
# [
# [3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4],
# [5, 5, 5, 5, 5],
# [1, 1, 1, 1, 1]
# ]
src_len_expand = src_len.unsqueeze(1).expand_as(src_range_expand)
# [
# [1, 1, 1, 0, 0],
# [1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1],
# [1, 0, 0, 0, 0]
# ]
mask = src_range_expand < src_len_expand
return mask
def criterion(self, feats, src_len, labels):
"""
CRF LOSS.
Args:
feats: size=(batch_size, seq_len, tag_size)
src_len: size=(batch_size)
tags: size=(batch_size, seq_len)
Returns:
loss_value
"""
batch_size, seq_len = feats.size(0), feats.size(1)
# Generate the mask matrix.
mask = self.get_mask(src_len, batch_size, seq_len)
# Get loss.
loss_value = self.crf.neg_log_likelihood_loss(feats, mask.long().to(self.device), labels)
batch_size = feats.size(0)
loss_value /= float(batch_size)
return loss_value
def get_best_path(self, feats, src_len):
"""
Best path.
Args:
feats: size=(batch_size, seq_len, tag_size)
src_len: size=(batch_size)
Returns:
best_path: size=(batch_size, seq_len)
"""
batch_size, seq_len = feats.size(0), feats.size(1)
# Generate the mask matrix.
mask = self.get_mask(src_len, batch_size, seq_len)
# Get best path.
path_score, best_path = self.crf(feats, mask.bool().to(self.device))
return best_path
class TransformerModel(nn.Module):
def __init__(self, args):
super(TransformerModel, self).__init__()
self.label_number = args.label_number
self.use_cuda = args.use_cuda
self.embed_size = args.embed_size
self.num_embeddings = args.vocab_len
self.device = args.device
self.hidden_size = args.hidden_size
self.transformer_layers = args.transformer_layers
self.dropout = args.dropout
self.embedding = nn.Embedding(self.num_embeddings, self.embed_size)
self.transformer = nn.ModuleList([
TransformerLayer(args) for _ in range(self.transformer_layers)
])
self.linear = nn.Linear(self.hidden_size, self.label_number)
self.droplayer = nn.Dropout(p=self.dropout)
def get_mask(self, src_len, batch_size, seq_len):
""" Generate the mask matrix. """
src_range = torch.arange(0, seq_len).long()
src_len = torch.LongTensor(src_len)
src_range_expand = src_range.unsqueeze(0).expand(batch_size, seq_len)
src_len_expand = src_len.unsqueeze(1).expand_as(src_range_expand)
mask = src_range_expand < src_len_expand
return mask
def forward(self, src, src_len):
'''
Forward Algorithm.
Args:
src (batch_size, seq_length) : word-level representation of sentence
src_len (batch_size) : the sentence length
Returns:
feats (batch_size, seq_length, num_labels) : predect feats.
'''
batch_size, seq_len = src.size(0), src.size(1)
# Embedding.
emb = self.embedding(src)
# Transformer. (batch_size, seq_length, hidden_size)
mask = self.get_mask(src_len, batch_size, seq_len).float().view(batch_size, 1, seq_len, -1)
mask = (1.0 - mask) * -10000.0
hidden = emb
for i in range(self.transformer_layers):
hidden = self.transformer[i](hidden, mask.to(self.device))
# Linear layer. (batch_size, seq_length, label_number)
feats = self.linear(hidden).view(batch_size, seq_len, -1)
return feats
ModelDict = {
"bilstm": BiLstmModel,
"bilstmcrf": BiLstmCRFModel,
"transformer": TransformerModel
} | 36.769517 | 99 | 0.574158 |
f5f745ae7ac5a82ba61ceb21b66769ce32ddca23 | 740 | py | Python | mayan/apps/converter/migrations/0010_auto_20150815_0351.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 2,743 | 2017-12-18T07:12:30.000Z | 2022-03-27T17:21:25.000Z | mayan/apps/converter/migrations/0010_auto_20150815_0351.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | 15 | 2017-12-18T14:58:07.000Z | 2021-03-01T20:05:05.000Z | mayan/apps/converter/migrations/0010_auto_20150815_0351.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | 257 | 2017-12-18T03:12:58.000Z | 2022-03-25T08:59:10.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('converter', '0009_auto_20150714_2228'),
]
operations = [
migrations.AlterField(
model_name='transformation',
name='name',
field=models.CharField(
max_length=128, verbose_name='Name',
choices=[
('rotate', 'Rotate: degrees'), ('zoom', 'Zoom: percent'),
('resize', 'Resize: width, height'),
('crop', 'Crop: left, top, right, bottom')
]
),
preserve_default=True,
),
]
| 26.428571 | 77 | 0.510811 |
b1ea991ca6aa36d0ede8e941d3e8f7b0a8b030a9 | 7,820 | py | Python | src/toil/test/src/miscTests.py | PolusAI/toil | a98acdb5cbe0f850b2c11403d147577d9971f4e1 | [
"Apache-2.0"
] | 516 | 2015-07-30T19:08:55.000Z | 2018-07-03T20:53:42.000Z | src/toil/test/src/miscTests.py | PolusAI/toil | a98acdb5cbe0f850b2c11403d147577d9971f4e1 | [
"Apache-2.0"
] | 1,949 | 2015-07-29T23:38:49.000Z | 2018-07-05T12:42:04.000Z | src/toil/test/src/miscTests.py | gmloose/toil | a82834073b28f66747c5c3ac99d1a678b82d2290 | [
"Apache-2.0"
] | 193 | 2015-07-31T18:52:57.000Z | 2018-07-05T08:54:11.000Z | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
import random
import sys
import tempfile
from uuid import uuid4
from toil.common import getNodeID
from toil.lib.exceptions import panic, raise_
from toil.lib.io import AtomicFileCreate, atomic_install, atomic_tmp_file
from toil.lib.misc import CalledProcessErrorStderr, call_command
from toil.test import ToilTest, slow
log = logging.getLogger(__name__)
logging.basicConfig()
class MiscTests(ToilTest):
"""
This class contains miscellaneous tests that don't have enough content to be their own test
file, and that don't logically fit in with any of the other test suites.
"""
def setUp(self):
super().setUp()
self.testDir = self._createTempDir()
def testIDStability(self):
prevNodeID = None
for i in range(10, 1):
nodeID = getNodeID()
self.assertEqual(nodeID, prevNodeID)
prevNodeID = nodeID
@slow
def testGetSizeOfDirectoryWorks(self):
'''A test to make sure toil.common.getDirSizeRecursively does not
underestimate the amount of disk space needed.
Disk space allocation varies from system to system. The computed value
should always be equal to or slightly greater than the creation value.
This test generates a number of random directories and randomly sized
files to test this using getDirSizeRecursively.
'''
from toil.common import getDirSizeRecursively
# a list of the directories used in the test
directories = [self.testDir]
# A dict of {FILENAME: FILESIZE} for all files used in the test
files = {}
# Create a random directory structure
for i in range(0,10):
directories.append(tempfile.mkdtemp(dir=random.choice(directories), prefix='test'))
# Create 50 random file entries in different locations in the directories. 75% of the time
# these are fresh files of size [1, 10] MB and 25% of the time they are hard links to old
# files.
while len(files) <= 50:
fileName = os.path.join(random.choice(directories), self._getRandomName())
if random.randint(0,100) < 75:
# Create a fresh file in the range of 1-10 MB
fileSize = int(round(random.random(), 2) * 10 * 1024 * 1024)
with open(fileName, 'wb') as fileHandle:
fileHandle.write(os.urandom(fileSize))
files[fileName] = fileSize
else:
# Link to one of the previous files
if len(files) == 0:
continue
linkSrc = random.choice(list(files.keys()))
os.link(linkSrc, fileName)
files[fileName] = 'Link to %s' % linkSrc
computedDirectorySize = getDirSizeRecursively(self.testDir)
totalExpectedSize = sum([x for x in list(files.values()) if isinstance(x, int)])
self.assertGreaterEqual(computedDirectorySize, totalExpectedSize)
@staticmethod
def _getRandomName():
return uuid4().hex
def _get_test_out_file(self, tail):
outf = os.path.join(self.testDir, self.id() + "." + tail)
if os.path.exists(outf):
os.unlink(outf)
return outf
def _write_test_file(self, outf_tmp):
with open(outf_tmp, "w") as fh:
fh.write(self.id() + '\n')
def test_atomic_install(self):
outf = self._get_test_out_file(".foo.gz")
outf_tmp = atomic_tmp_file(outf)
self._write_test_file(outf_tmp)
atomic_install(outf_tmp, outf)
self.assertTrue(os.path.exists(outf))
def test_atomic_install_dev(self):
devn = '/dev/null'
tmp = atomic_tmp_file(devn)
self.assertEqual(tmp, devn)
atomic_install(tmp, devn)
def test_atomic_context_ok(self):
outf = self._get_test_out_file(".tar")
with AtomicFileCreate(outf) as outf_tmp:
self._write_test_file(outf_tmp)
self.assertTrue(os.path.exists(outf))
def test_atomic_context_error(self):
outf = self._get_test_out_file(".tar")
try:
with AtomicFileCreate(outf) as outf_tmp:
self._write_test_file(outf_tmp)
raise Exception("stop!")
except Exception as ex:
self.assertEqual(str(ex), "stop!")
self.assertFalse(os.path.exists(outf))
def test_call_command_ok(self):
o = call_command(["echo", "Fred"])
self.assertEqual("Fred\n", o)
self.assertTrue(isinstance(o, str), str(type(o)))
def test_call_command_err(self):
with self.assertRaisesRegex(CalledProcessErrorStderr,
"^Command '\\['cat', '/dev/Frankenheimer']' exit status 1: cat: /dev/Frankenheimer: No such file or directory\n$"):
call_command(["cat", "/dev/Frankenheimer"])
class TestPanic(ToilTest):
def test_panic_by_hand(self):
try:
self.try_and_panic_by_hand()
except:
self.__assert_raised_exception_is_primary()
def test_panic(self):
try:
self.try_and_panic()
except:
self.__assert_raised_exception_is_primary()
def test_panic_with_secondary(self):
try:
self.try_and_panic_with_secondary()
except:
self.__assert_raised_exception_is_primary()
def test_nested_panic(self):
try:
self.try_and_nested_panic_with_secondary()
except:
self.__assert_raised_exception_is_primary()
def try_and_panic_by_hand(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except Exception:
exc_type, exc_value, traceback = sys.exc_info()
try:
raise RuntimeError("secondary")
except Exception:
pass
raise_(exc_type, exc_value, traceback)
def try_and_panic(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic(log):
pass
def try_and_panic_with_secondary(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic( log ):
raise RuntimeError("secondary")
def try_and_nested_panic_with_secondary(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic( log ):
with panic( log ):
raise RuntimeError("secondary")
def __assert_raised_exception_is_primary(self):
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(exc_type, ValueError)
self.assertEqual(str(exc_value), "primary")
while exc_traceback.tb_next is not None:
exc_traceback = exc_traceback.tb_next
self.assertEqual(exc_traceback.tb_lineno, self.line_of_primary_exc)
| 36.886792 | 152 | 0.635934 |
969cc17b7ae46a5f1fd0de9509eb6e7c7a531aad | 4,541 | py | Python | tuttest/__init__.py | rw1nkler/tuttest | 30535a9662793f42d9b4197a3494195f26327c3e | [
"Apache-2.0"
] | null | null | null | tuttest/__init__.py | rw1nkler/tuttest | 30535a9662793f42d9b4197a3494195f26327c3e | [
"Apache-2.0"
] | null | null | null | tuttest/__init__.py | rw1nkler/tuttest | 30535a9662793f42d9b4197a3494195f26327c3e | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
from typing import Optional, List, Dict
import re
def parse_rst(text: str, names: List[str] = None, extra_roles: List[str] = []) -> OrderedDict:
snippets = OrderedDict()
from docutils.core import publish_doctree
# Sphinx roles
from docutils.parsers.rst import roles
from docutils import nodes
roles.register_generic_role('kbd', nodes.emphasis)
roles.register_generic_role('ref', nodes.emphasis)
# custom roles e.g. extlinks
for role in extra_roles:
roles.register_generic_role(role, nodes.emphasis)
doctree = publish_doctree(text)
def is_literal_block(node):
return (node.tagname == 'literal_block')
# TODO: getting lang is tricky, as it's just one of the classes at this point. Another one is 'code', but there can also be user-set classes. Perhaps we should just match against a language array, but this is not optimal. Otherwise we have to do full RST parsing...
literal_blocks = doctree.traverse(condition=is_literal_block)
for idx, block in enumerate(literal_blocks):
snippet = Snippet(block.astext())
name = ''
name = ' '.join(block['names'])
if name != '':
snippet.meta['name'] = name
snippets[snippet.meta['name']] = snippet
else:
if names and idx < len(names):
name = names[idx]
snippet.meta['name'] = name
else:
name = 'unnamed'+str(idx)
snippets[name] = snippet
return snippets
def parse_markdown(text: str, names: List[str] = None) -> OrderedDict:
snippets = OrderedDict()
lines = text.split('\n')
inside = False
snippet = Snippet()
# TODO: fix for indented code blocks e.g. inside lists
prevl = None
snippet_lines = []
for l in lines:
if l[0:3] == "```":
if inside:
# we've found the end of the previous snippet
snippet.text = '\n'.join(snippet_lines)
if 'name' in snippet.meta:
snippets[snippet.meta['name']] = snippet
else:
snippets['unnamed'+str(len(snippets))] = snippet
inside = False
else:
# we're starting to parse a new snippet
inside = True
snippet = Snippet()
snippet_lines = []
lang = l[3:].strip()
if lang != "":
snippet.lang = lang
# look in previous line for metadata in for key1=val1 ; key2=val2
if prevl is not None:
prevl = prevl.strip()
if prevl[0:4] == "<!--" and prevl[-3:] == "-->" :
prevl = prevl[4:-4]
variables = prevl.split(';')
for v in variables:
split = v.split('=',1)
snippet.meta[split[0].strip()] = split[1].strip().strip('"')
else:
# store current line into the line buffer
if inside:
snippet_lines.append(l)
# store previous line to be able to look for metadata
prevl = l
return snippets
def get_snippets(filename: str, *, names: List[str] = None, extra_roles: List[str] = [], parse: bool = False) -> OrderedDict:
'''Top level function. Use this one instead of the underlying "parse_rst" etc.'''
text = open(filename, 'r+', encoding='utf-8').read()
snippets = None
if filename.endswith('.rst') or filename.endswith('.rest'):
snippets = parse_rst(text, names, extra_roles)
else: # the default is markdown
snippets = parse_markdown(text, names)
if parse:
for s in snippets.values():
s.commands = parse_snippet(s.text)
return snippets
from dataclasses import dataclass, field
@dataclass
class Command:
prompt: str
command: str
result: str = ''
@dataclass
class Snippet:
text: List[str] = field(default_factory=list)
lang: str = ''
meta: Dict = field(default_factory=dict)
commands: List[Command] = field(default_factory=list)
# currently only parse Renode snippets
def parse_snippet(snippet: str) -> List[Command]:
prompt = re.compile(r'\n(\([a-zA-Z0-9\-]+\) *)([^\n]*)')
spl = prompt.split('\n'+snippet)
commands = []
for i in range(1,len(spl),3):
commands.append(Command(spl[i].strip(), spl[i+1], spl[i+2]))
return commands
| 34.142857 | 269 | 0.574103 |
5acf27d8825771f588caa5fb73febd4ba7e6aedf | 7,598 | py | Python | connectivity.py | boredStats/Infraslow-MEG-waves | 78b0d7aaf625fe566da16fd135916b4ad8619a48 | [
"MIT"
] | null | null | null | connectivity.py | boredStats/Infraslow-MEG-waves | 78b0d7aaf625fe566da16fd135916b4ad8619a48 | [
"MIT"
] | null | null | null | connectivity.py | boredStats/Infraslow-MEG-waves | 78b0d7aaf625fe566da16fd135916b4ad8619a48 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""Script for calculating connectivity measures."""
import os
import math
import h5py
import utils
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from mne.connectivity import phase_slope_index
def _try_epoching(dataset=None, fs=500, transpose=True):
if dataset is None:
test_timepoints = 1100
dataset = np.ndarray(shape=(test_timepoints, 2))
nrows = dataset.shape[0]
ncols = dataset.shape[1]
new_constant = int(fs * 1) # min requirement infraslow PSI is 500s
n_splits = math.ceil(nrows/new_constant)
indexer = KFold(n_splits=n_splits)
if transpose is False:
epoched_data = np.ndarray(shape=(n_splits, new_constant, ncols))
else:
epoched_data = np.ndarray(shape=(n_splits, ncols, new_constant))
for i, (_, ts) in enumerate(indexer.split(dataset)):
segment = dataset[ts, :]
if segment.shape[0] < new_constant:
epoch = np.pad(
segment,
pad_width=((0, int(fs-segment.shape[0])), (0, 0)),
constant_values=0)
else:
epoch = segment
if transpose is True:
epoch = epoch.T
epoched_data[i, :, :] = epoch
del epoch, segment
return epoched_data
def epoch_MEG(rois=None, fs=500, transpose=True):
data_dir = utils.ProjectData.data_dir
meg_subj, meg_sess = utils.ProjectData.meg_metadata
epoch_file = os.path.join(data_dir, 'MEG_epoched.hdf5')
out = h5py.File(epoch_file, 'a')
for sess in meg_sess:
for subj in meg_subj:
path = subj + '/' + sess
if path in out:
print(path)
continue
key = subj + '/MEG/' + sess + '/timeseries'
print('Epoching %s' % str(key))
dataset = utils.index_hcp_raw(key=key, rois=rois)
nrows = dataset.shape[0]
ncols = dataset.shape[1]
n_splits = math.ceil(nrows/fs)
indexer = KFold(n_splits=n_splits)
d = np.float32
if transpose is False:
epoched_data = np.ndarray(shape=(n_splits, fs, ncols), dtype=d)
else:
epoched_data = np.ndarray(shape=(n_splits, ncols, fs), dtype=d)
for i, (_, ts) in enumerate(indexer.split(dataset)):
segment = dataset[ts, :]
if segment.shape[0] < fs:
epoch = np.pad(
segment,
pad_width=((0, int(fs-segment.shape[0])), (0, 0)),
constant_values=0)
else:
epoch = segment
if transpose is True:
epoch = epoch.T
epoched_data[i, :, :] = epoch
del epoch, segment
del dataset
grp = out.require_group(path)
grp.create_dataset('epochs', data=epoched_data, compression='lzf')
del epoched_data
out.close()
def effective_connectivity(start_rois=None):
data_dir = utils.ProjectData.data_dir
subjects, sessions = utils.ProjectData.meg_metadata
glasser_rois = utils.ProjectData.glasser_rois
if start_rois is None:
start_rois = glasser_rois
start_indices, end_indices, connections = [], [], []
if type(start_rois) == str:
start_rois = [start_rois]
for sr in start_rois:
for g, gr in enumerate(glasser_rois):
if sr == gr:
idxs = [g] * len(glasser_rois)
start_indices.extend(idxs)
end_indices.extend(np.arange(len(glasser_rois)))
connections.append('%s %s' % (sr, gr))
start_indices = np.array(start_indices)
indices = (start_indices, end_indices)
epoch_file = os.path.join(data_dir, 'MEG_epoched.hdf5')
res_dict = {}
for sess in sessions:
res_df = pd.DataFrame(index=subjects, columns=connections)
for subj in subjects:
print('Calculating PSI for %s %s' % (sess, subj))
f = h5py.File(epoch_file, 'r')
epoched_data = f[subj][sess]['epochs'][...]
f.close()
eff_con, _, _, _, _ = phase_slope_index(
data=epoched_data,
indices=indices,
fmin=8,
fmax=12,
mode='fourier',
sfreq=500,
verbose='CRITICAL',
)
res_df.loc[subj] = np.ndarray.flatten(eff_con)
del epoched_data, eff_con
res_dict[sess] = res_df
return res_dict
def _circ_line_corr(ang, line):
# Correlate periodic data with linear data
n = len(ang)
rxs = pearsonr(line, np.sin(ang))
rxs = rxs[0]
rxc = pearsonr(line, np.cos(ang))
rxc = rxc[0]
rcs = pearsonr(np.sin(ang), np.cos(ang))
rcs = rcs[0]
rho = np.sqrt((rxc**2 + rxs**2 - 2*rxc*rxs*rcs)/(1-rcs**2))
pval = 1 - chi2.cdf(n*(rho**2), 1)
# standard_error = np.sqrt((1-r_2)/(n-2))
return rho, pval # ,standard_error
def _calc_ppc(band, output_file=None, rois=None):
ProjectData = utils.ProjectData
data_dir = ProjectData.data_dir
subjects, sessions = ProjectData.meg_metadata
phase_amp_data = os.path.join(data_dir, 'MEG_phase_amp_data.hdf5')
if output_file is None:
output_file = os.path.join(data_dir, 'MEG_phase_phase_coupling.hdf5')
if rois is None:
rois = ProjectData.glasser_rois # Not recommended, specify ROIs
roi_indices, sorted_roi_names = sort_roi_names(rois)
for sess in sessions:
for subj in subjects:
print('%s: Running %s %s' % (utils.ctime(), sess, subj))
ppc_file = h5py.File(output_file, 'a')
prog = sess + '/' + subj
if prog in ppc_file:
continue # Check if it's been run already
data_file = h5py.File(phase_amp_data, 'r+')
band_key = '%s/phase_data' % band
subj_data = data_file[subj][sess][band_key][:, roi_indices]
ppc = np.ndarray(shape=(len(rois), len(rois)))
for r1, roi1 in enumerate(roi_indices):
for r2, roi2 in enumerate(roi_indices):
if r1 == r2:
ppc[r1, r2] = 1
elif not r2 > r1:
phase_1 = subj_data[:, r1]
phase_2 = subj_data[:, r2]
rho = circ_corr(phase_1, phase_2)
ppc[r1, r2] = rho
else:
ppc[r1, r2] = 0
out = ppc_file.require_group(prog)
out.create_dataset('ppc', data=ppc, compression='lzf')
ppc_file.close()
data_file.close()
del subj_data
def _calc_alpha_ppc():
from misc import get_psd_rois
psd_rois, _ = get_psd_rois()
data_dir = utils.ProjectData.data_dir
alpha_ppc = os.path.join(data_dir, 'MEG_alpha_ppc.hdf5')
_calc_ppc(band='Alpha', rois=psd_rois, output_file=alpha_ppc)
def _calc_infraslow_ppc():
from misc import get_psd_rois
psd_rois, _ = get_psd_rois()
data_dir = utils.ProjectData.data_dir
alpha_ppc = os.path.join(data_dir, 'MEG_infraslow_ppc.hdf5')
_calc_ppc(band='BOLD bandpass', rois=psd_rois, output_file=alpha_ppc)
if __name__ == "__main__":
res = effective_connectivity(start_rois=['p24pr_L', 'p24pr_R'])
outpath = os.path.join(data_dir, 'dACC_effective_connectivity.xlsx')
utils.save_xls(res, outpath)
# _calc_alpha_ppc()
# _calc_infraslow_ppc()
| 34.536364 | 79 | 0.575283 |
37f1cf98e706f260747bf8c68c51578ea7add006 | 7,558 | py | Python | src/network/dynamicTwoTerminalGate.py | mikepfrank/dynamic | 01581e5f671f3ab34eb5bec45c2cab508a1b6928 | [
"Unlicense"
] | 2 | 2019-01-25T07:18:56.000Z | 2021-12-18T05:16:40.000Z | src/network/dynamicTwoTerminalGate.py | mikepfrank/dynamic | 01581e5f671f3ab34eb5bec45c2cab508a1b6928 | [
"Unlicense"
] | null | null | null | src/network/dynamicTwoTerminalGate.py | mikepfrank/dynamic | 01581e5f671f3ab34eb5bec45c2cab508a1b6928 | [
"Unlicense"
] | null | null | null | from numbers import Real
import logmaster
_logger = logmaster.getLogger(logmaster.sysName + '.simulator')
from functions.binaryDifferentiableFunction import BinaryDifferentiableFunction
from .dynamicNode import DynamicNode
from .dynamicPort import DynamicPort as Port
from .dynamicComponent import DynamicComponent
from .dynamicNetwork import DynamicNetwork
#-- A DynamicTwoTerminalGate has two nodes called "input" and "output"
# with a single interaction term between them. It's assumed that there's
# no internal state other than the position/velocity values associated
# with the two nodes.
class DynamicTwoTerminalGate(DynamicComponent):
#-- Data members:
# interaction [BinaryDifferentiableFunction] -
# Interaction potential energy function between the
# input and output node's coordinates (x,y).
#-- To initialize a dynamic two-terminal "gate," we simply
# create two ports called "input" and "output," create
# a simple dynamic node to be our output node, and link
# it to our output port.
def __init__(inst, inputNode:DynamicNode, name:str=None,
network:DynamicNetwork=None, inPortName:str='input',
outPortName:str='output',
interaction:BinaryDifferentiableFunction=None,
outNodeName:str=None, initOutPos:Real=None):
# First do generic initialization for dynamic components.
_logger.normal("DynamicTwoTerminalGate.__init__(): Initializing "
"component named %s in network %s." %
(name, str(network)))
DynamicComponent.__init__(inst, name=name, network=network)
# Create our two ports, named "input" and "output".
_logger.normal("DynamicTwoTerminalGate.__init__(): Creating two "
"ports named %s and %s..." %
(inPortName, outPortName))
inst._addPorts(inPortName, outPortName)
# Remember our port name for future reference.
inst.inPortName = inPortName
inst.outPortName = outPortName
# Link our input node to our input port.
_logger.normal("DynamicTwoTerminalGate.__init__(): Linking input "
"node %s to our port named %s..." %
(str(inputNode), inPortName))
inst.inputNode = inputNode
inst.link(inPortName, inputNode)
_logger.normal("DynamicTwoTerminalGate.__init__(): Right after "
"linking input node, it is as follows:")
inputNode.printInfo()
# Create and remember our output node named "out".
initialOutputNodeName = outNodeName or 'out'
_logger.normal("DynamicTwoTerminalGate.__init__(): Creating output "
"node initially named %s..." % initialOutputNodeName)
inst.outputNode = DynamicNode(network, name=initialOutputNodeName)
if initOutPos is not None:
inst.outputNode.coord.position.value = Fixed(initOutPos)
_logger.normal("DynamicTwoTerminalGate.__init__(): Linking new "
"output node %s to our port named %s..." %
(str(inst.outputNode), outPortName))
# Link our port named "output" to our output node.
inst.link(outPortName, inst.outputNode)
# Set our interaction function to the given function.
_logger.normal("DynamicTwoTerminalGate.__init__(): Setting "
"interaction function to %s..." %
str(interaction))
if interaction != None: inst.interaction = interaction
# The below shouldn't be needed since the node was created in the network already.
#
## # Add our output node to the network.
##
## if network != None: network.addNode(inst.outputNode)
#__/ End method DynamicTwoTerminalGate.__init__().
@property
def inPortName(this):
if hasattr(this, '_inPortName'):
return this._inPortName
else:
return None
@inPortName.setter
def inPortName(this, inPortName:str):
this._inPortName = inPortName # Remember our portname.
if this.inPort != None: # If our port has been created,
this.inPort.name = inPortName # actually set the port's name.
if this.interaction != None: # If our potential has been set,
this.interaction.argName1 = inPortName # set its argument name.
@property
def outPortName(this):
if hasattr(this, '_outPortName'):
return this._outPortName
else:
return None
@outPortName.setter
def outPortName(this, outPortName:str):
this._outPortName = outPortName # Remember our portname.
if this.outPort != None: # If our port has been created,
this.outPort.name = outPortName # actually set the port's name.
if this.interaction != None: # If our potential has been set,
this.interaction.argName2 = outPortName # set its argument name.
@property
def inPort(this) -> Port:
ports = this.ports # Get our ports.
#print("ports is %s" % (str(ports)))
nPorts = len(ports) # Count how many ports we have.
if nPorts == 0: # If we have zero ports,
return None # return that our port is None.
#print("%d ports" % nPorts)
if nPorts != 2:
raise WrongNumberOfPortsException("Two-terminal gate %s was found to have %d ports (%s)!" %
(this, nPorts, str(ports)))
assert nPorts == 2 # We should have exactly two ports.
return list(ports.values())[0] # Return the first port.
@property
def outPort(this) -> Port:
ports = this.ports # Get our ports.
#print("ports is %s" % (str(ports)))
nPorts = len(ports) # Count how many ports we have.
if nPorts == 0: # If we have zero ports,
return None # return that our port is None.
#print("%d ports" % nPorts)
if nPorts != 2:
raise WrongNumberOfPortsException("Two-terminal gate %s was found to have %d ports (%s)!" %
(this, nPorts, str(ports)))
assert nPorts == 2 # We should have exactly two ports.
return list(ports.values())[1] # Return the second port.
@property
def interaction(this): # The interaction between input & output terminals.
if hasattr(this, '_interaction'):
return this._interaction
else:
return None
@interaction.setter
def interaction(this, interaction:BinaryDifferentiableFunction):
if this.interaction != None:
del this.interaction
if interaction != None:
if this.inPortName != None:
interaction.argName1 = this.inPortName
if this.outPortName != None:
interaction.argName2 = this.outPortName
this._interaction = interaction
this._addInteraction(interaction)
@interaction.deleter
def interaction(this):
if this.interaction != None:
this._removeInteraction(this._interaction)
del this._interaction
| 38.365482 | 103 | 0.597248 |
60e1511336d06c9df15c7217210e41d34d8b842c | 2,582 | py | Python | crawler/RISJbot/spmiddlewares/unwantedcontent.py | ausnews/ausnews-search | 47f33e7a19e28f4b42fe1286aaefd3c5c3f85741 | [
"MIT"
] | 10 | 2021-02-18T01:30:59.000Z | 2021-09-13T04:31:40.000Z | crawler/RISJbot/spmiddlewares/unwantedcontent.py | ausnews/ausnews-search | 47f33e7a19e28f4b42fe1286aaefd3c5c3f85741 | [
"MIT"
] | null | null | null | crawler/RISJbot/spmiddlewares/unwantedcontent.py | ausnews/ausnews-search | 47f33e7a19e28f4b42fe1286aaefd3c5c3f85741 | [
"MIT"
] | 2 | 2021-02-18T22:51:00.000Z | 2021-05-12T02:04:47.000Z | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
import logging
from scrapy.exceptions import NotConfigured
logger = logging.getLogger(__name__)
class UnwantedContent(object):
"""Spider middleware to process a response's selector by removing a
(configurable) set of elements from it. This is used to strip things
like figures from the content before passing the body on to the parsing
code. This makes it feasible to strip similar junk from all pages if
necessary.
Note that this leaves response.selector and response.body containing
different data. This is (1) an advantage, as it lets the spider save
the original page, and (2) a disadvantage, as the 'response' object
is confusingly no longer coherent. Caller beware!
Under the covers, Selectors contain an lxml.etree.Element document
root, which is not exposed by the Selector interface. This is mutatable
using the .remove method on parts of the selector.root document tree.
Unfortunately, there is no native content removal interface in scrapy.
As this is not using a published interface for Selector, it must be
considered risky. In particular, it is feasible (though not likely) that
scrapy could change its selector implementation to use a different
HTML/XML parsing library, at which point this would fail.
"""
def __init__(self, settings):
if not settings.getbool('UNWANTEDCONTENT_ENABLED'):
raise NotConfigured
self.xpaths = settings.get('UNWANTEDCONTENT_XPATHS')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_spider_input(self, response, spider):
# This can pick up initial (possibly gzipped) sitemap
# Responses, before they have made it to the Spider and been decoded.
# In any case, we don't want to edit sitemap files (or RSS for that
# matter. Filter this strictly to non-sitemap objects.
try:
sel = response.selector
except AttributeError:
# logger.warning("No selector for {}; probably non-HTML".format(
# response))
return None
if not response.meta.get('sitemap'):
for xpath_str in self.xpaths:
for node in sel.root.xpath(xpath_str):
node.getparent().remove(node)
return None # Success
| 39.723077 | 79 | 0.677769 |
b63dc4573897937a4d0d0b833695d6a4ecc16b4f | 35,243 | py | Python | src/bandersnatch/tests/plugins/test_storage_plugins.py | lepaperwan/bandersnatch | 251127a3b979f0e845b619a5bbd507db85a13f01 | [
"AFL-3.0"
] | 1 | 2021-04-14T05:41:33.000Z | 2021-04-14T05:41:33.000Z | src/bandersnatch/tests/plugins/test_storage_plugins.py | lepaperwan/bandersnatch | 251127a3b979f0e845b619a5bbd507db85a13f01 | [
"AFL-3.0"
] | 16 | 2020-12-18T22:46:36.000Z | 2022-03-02T18:06:15.000Z | src/bandersnatch/tests/plugins/test_storage_plugins.py | jiegec/bandersnatch | cc50bfecfd8b8912c35227aa8124c385ca1239ab | [
"AFL-3.0"
] | 1 | 2021-04-14T05:41:27.000Z | 2021-04-14T05:41:27.000Z | import atexit
import contextlib
import datetime
import hashlib
import json
import mimetypes
import os
import pathlib
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Union
from unittest import TestCase, mock
from mock_config import mock_config
import bandersnatch.storage
from bandersnatch.master import Master
from bandersnatch.mirror import BandersnatchMirror
from bandersnatch.package import Package
from bandersnatch.storage import PATH_TYPES
from bandersnatch_storage_plugins import filesystem, swift
if TYPE_CHECKING:
import swiftclient
BASE_SAMPLE_FILE = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "sample"
)
SWIFT_CONTAINER_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "swift_container.json"
)
def get_swift_file_attrs(path: Path, base: Path, container: str = "") -> Dict[str, Any]:
path = strip_dir_prefix(base, path, container=container)
if not path.is_absolute():
path = "/" / path
try:
last_modified = get_swift_date(
datetime.datetime.fromtimestamp(path.stat().st_mtime)
)
except Exception:
print(list(path.parent.iterdir()), file=sys.stderr)
data = path.read_bytes()
posix_format = path.as_posix().lstrip("/")
name_start = 0
posix_base = base.as_posix()
if posix_base in posix_format:
name_start = posix_format.index(posix_base) + len(posix_base)
name = posix_format[name_start:]
mimetype, encoding = mimetypes.guess_type(posix_format)
if mimetype is None:
mimetype = "application/octet-stream"
if encoding is not None:
mimetype = f"{mimetype}; encoding={encoding}"
result_dict = {
"bytes": len(data),
"hash": hashlib.md5(data).hexdigest(),
"name": Path(name.lstrip("/")),
"content_type": mimetype,
"last_modified": last_modified,
}
return result_dict
def strip_dir_prefix(
base_dir: Path, subdir: Path, container: Optional[str] = None
) -> Path:
if container is not None:
base_dir = base_dir.joinpath(container)
base_dir_prefix = base_dir.as_posix()[1:]
result = subdir.as_posix()
if result.startswith(base_dir_prefix):
return type(base_dir)(result[len(base_dir_prefix) :].lstrip("/")) # noqa:E203
return type(base_dir)(result.lstrip("/"))
def iter_dir(
path: Path, base: Optional[Path] = None, recurse: bool = False, container: str = ""
) -> Iterator[Dict[str, Any]]:
if base is None:
base = path
if path.is_dir():
for sub_path in path.iterdir():
if sub_path.is_dir():
subdir_path = strip_dir_prefix(base, sub_path, container=container)
yield {"subdir": subdir_path, "container": container}
if recurse:
yield from iter_dir(
sub_path, base, recurse=recurse, container=container
)
else:
yield get_swift_file_attrs(sub_path, base, container=container)
else:
yield get_swift_file_attrs(path, base, container=container)
def get_swift_object_date(date: datetime.datetime) -> str:
return (
date.astimezone(datetime.timezone.utc)
.strftime("%a, %d %b %Y %H:%M:%S %Z")
.replace("UTC", "GMT")
)
def get_swift_date(date: datetime.datetime) -> str:
return date.astimezone(datetime.timezone.utc).isoformat()
class MockConnection:
"""
Compatible class to provide local files over the swift interface.
This is used to mock out the swift interface for testing against the
storage plugin system.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.tmpdir = kwargs.pop("tmpdir", None)
if not self.tmpdir:
self.tmpdir = tempfile.TemporaryDirectory()
atexit.register(self.tmpdir.cleanup)
self.base = pathlib.Path(self.tmpdir.name)
self.container_path = self.base / "bandersnatch"
self.container_path.mkdir(exist_ok=True)
_conn_mock = mock.MagicMock("swiftclient.client.Connection", autospec=True)
_connection = _conn_mock()
_connection.get_account.return_value = ("", "")
self._connection = _connection
def __getattr__(self, key: str) -> Any:
try:
return self.__getattribute__(key)
except AttributeError:
return self.__getattribute__("_connection").getattr(key)
def clean_path(self, container: PATH_TYPES, obj: PATH_TYPES) -> Path:
base_prefix = f"{self.tmpdir.name}/{container}"
if isinstance(obj, str):
obj = Path(obj)
if not any(
str(obj).startswith(prefix) for prefix in (base_prefix, base_prefix[1:])
):
obj = Path(f"{base_prefix}/{obj!s}")
if not obj.anchor:
obj = Path(f"/{obj!s}")
return obj
def _strip_prefix(self, prefix: str, container: Optional[str] = None) -> str:
base_dir_prefix = self.tmpdir.name[1:]
if container is not None:
base_dir_prefix = os.path.join(base_dir_prefix, container)
if prefix.startswith(base_dir_prefix):
return prefix[len(base_dir_prefix) :].lstrip("/") # noqa:E203
return prefix.lstrip("/")
def get_account(self) -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
return {}, {}
def get_object(self, container: str, obj: str) -> Tuple[Dict[Any, Any], bytes]:
path = self.clean_path(container, obj)
if not path.exists():
from swiftclient.exceptions import ClientException
raise ClientException(f"No such path: {path!s}")
return {}, path.read_bytes()
def head_object(
self,
container: str,
obj: str,
headers: Optional[Dict[str, str]] = None,
query_string: Optional[str] = None,
) -> Dict[str, str]:
path = self.clean_path(container, obj)
if not path.exists():
from swiftclient.exceptions import ClientException
raise ClientException(f"No such path: {path!s}")
try:
max_date = max(path.stat().st_mtime, path.stat().st_ctime)
current_timestamp = get_swift_object_date(datetime.datetime.now())
path_contents = path.read_bytes()
except Exception:
from swiftclient.exceptions import ClientException
raise ClientException(f"Not a file: {path!s}")
name = path.as_posix()
mimetype, encoding = mimetypes.guess_type(name)
if mimetype is None:
mimetype = "application/octet-stream"
if encoding is not None:
mimetype = f"{mimetype}; encoding={encoding}"
return {
"date": current_timestamp,
"server": "Apache/2.4.29 (Ubuntu)",
"content-length": "{}".format(len(path_contents)),
"accept-ranges": "bytes",
"last-modified": f"{path.stat().st_mtime}",
"etag": hashlib.md5(path_contents).hexdigest(),
"x-timestamp": f"{max_date}",
"content-type": mimetype,
"x-trans-id": "txfcbf2e82791411eaa6bd-cf51efeb8527",
"x-openstack-request-id": "txfcbf2e82791411eaa6bd-cf51efeb8527",
}
def post_object(
self,
container: str,
obj: str,
headers: Dict[str, str],
response_dict: Optional[Dict[str, Any]] = None,
) -> None:
path = self.clean_path(container, obj)
path.touch()
def _get_container(
self,
container: str,
marker: Optional[str] = None,
limit: Optional[int] = None,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
end_marker: Optional[str] = None,
path: Optional[Path] = None,
full_listing: bool = False,
headers: Optional[Dict[str, str]] = None,
query_string: Optional[str] = None,
) -> List[Dict[str, Any]]:
base = self.base
if container:
base = base / container
if prefix:
base = self.clean_path(container, prefix)
if not base.is_dir():
return []
if delimiter:
files = iter_dir(base, base=None, recurse=False, container=container)
else:
files = iter_dir(base, base=None, recurse=True, container=container)
return list(files)
def get_container(
self,
container: str,
marker: Optional[str] = None,
limit: Optional[int] = None,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
end_marker: Optional[str] = None,
path: Optional[Path] = None,
full_listing: bool = False,
headers: Optional[Dict[str, str]] = None,
query_string: Optional[str] = None,
) -> List[Dict[str, Any]]:
with open(SWIFT_CONTAINER_FILE) as fh:
contents = json.load(fh)
if prefix:
contents = [p for p in contents if p["name"].startswith(prefix)]
results = self._get_container(
container, limit=limit, prefix=prefix, delimiter=delimiter
)
if delimiter:
subdirs = set()
prefix = "" if not prefix else prefix
prefix_delims = prefix.count(delimiter)
for entry in contents:
split_entry = entry["name"].split(delimiter)
if len(split_entry[prefix_delims:]) > 1:
subdirs.add(delimiter.join(split_entry[: prefix_delims + 1]))
else:
results.append(entry)
for subdir in subdirs:
results.append({"subdir": subdir})
else:
results.extend(contents)
if limit:
results = results[:limit]
return results
def copy_object(
self,
container: str,
obj: str,
destination: str,
headers: Optional[Dict[str, str]] = None,
fresh_metadata: Any = None,
response_dict: Optional[Dict[str, Any]] = None,
) -> None:
# destination path always starts with container/
dest_container, _, dest_path = destination.partition("/")
dest = self.clean_path(dest_container, dest_path)
base = self.clean_path(container, obj)
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
dest.write_bytes(base.read_bytes())
def put_object(
self,
container: str,
obj: str,
contents: Union[str, bytes],
content_length: Optional[int] = None,
etag: Any = None,
chunk_size: Optional[int] = None,
content_type: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
query_string: Optional[str] = None,
response_dict: Optional[Dict[str, Any]] = None,
) -> None:
dest = self.clean_path(container, obj)
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
if headers and "X-Symlink-Target" in headers:
src_container, _, src_path = headers["X-Symlink-Target"].partition("/")
src = self.clean_path(src_container, src_path)
if os.name != "nt":
os.symlink(str(src), str(dest))
else:
shutil.copyfile(str(src), str(dest))
return None
if isinstance(contents, bytes):
dest.write_bytes(contents)
else:
dest.write_text(contents)
def delete_object(
self,
container: str,
obj: str,
query_string: Optional[str] = None,
response_dict: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> None:
target = self.clean_path(container, obj)
if not target.exists():
from swiftclient.exceptions import ClientException
raise ClientException(f"File does not exist: {target!s}")
target.unlink()
if not list(target.parent.iterdir()):
target.parent.rmdir()
class BasePluginTestCase(TestCase):
tempdir = None
cwd = None
backend: Optional[str] = None
config_contents = """\
[mirror]
directory = srv/pypi
storage-backend = {0}
master = https://pypi.org
json = false
timeout = 10
global-timeout = 18000
verifiers = 3
diff-file = {{mirror_directory}}/mirrored-files
diff-append-epoch = false
stop-on-error = false
hash-index = false
workers = 3
; keep_index_versions = 0
; log-config = /etc/bandersnatch-log.conf
"""
def setUp(self) -> None:
if self.backend is None:
raise unittest.SkipTest("Skipping base test case")
self.cwd = os.getcwd()
self.tempdir = tempfile.TemporaryDirectory()
self.pkgs: List[Package] = []
self.container: Optional[str] = None
self.config_data = mock_config(self.config_contents.format(self.backend))
os.chdir(self.tempdir.name)
self.setUp_backEnd()
self.setUp_plugin()
self.setUp_mirror()
self.setUp_Structure()
def setUp_dirs(self) -> None:
self.web_base_path = os.path.join(self.mirror_base_path, "web")
self.json_base_path = os.path.join(self.web_base_path, "json")
self.pypi_base_path = os.path.join(self.web_base_path, "pypi")
self.simple_base_path = os.path.join(self.web_base_path, "simple")
paths = (self.json_base_path, self.pypi_base_path, self.simple_base_path)
for path in paths:
os.makedirs(path, exist_ok=True)
def setUp_backEnd(self) -> None:
pypi_dir = mirror_path = "srv/pypi"
if self.backend == "swift":
self.container = "bandersnatch"
pypi_dir = f"{self.container}/{pypi_dir}"
self.setUp_swift()
assert self.tempdir
self.mirror_base_path = os.path.join(self.tempdir.name, pypi_dir)
self.setUp_dirs()
target_sample_file = "sample"
if self.container is not None:
target_sample_file = f"{self.container}/{target_sample_file}"
assert self.tempdir
self.sample_file = os.path.join(self.tempdir.name, target_sample_file)
shutil.copy(BASE_SAMPLE_FILE, self.sample_file)
if self.backend == "swift":
self.mirror_path = Path(mirror_path)
else:
self.mirror_path = Path(self.mirror_base_path)
def setUp_mirror(self) -> None:
self.master = Master(url="https://foo.bar.com")
self.mirror = BandersnatchMirror(self.mirror_path, self.master, self.backend)
pkg = Package("foobar", serial=1)
pkg._metadata = {
"info": {"name": "foobar", "version": "1.0"},
"releases": mock.Mock(),
}
self.pkgs.append(pkg)
def setUp_plugin(self) -> None:
self.plugin = next(
iter(
bandersnatch.storage.storage_backend_plugins(
self.backend, clear_cache=True
)
)
)
def setUp_mirrorDirs(self) -> None:
pypi_dir = (
"srv/pypi" if self.container is None else f"{self.container}/srv/pypi"
)
assert self.tempdir
self.mirror_base_path = os.path.join(self.tempdir.name, pypi_dir)
self.web_base_path = os.path.join(self.mirror_base_path, "web")
self.json_base_path = os.path.join(self.web_base_path, "json")
self.pypi_base_path = os.path.join(self.web_base_path, "pypi")
self.simple_base_path = os.path.join(self.web_base_path, "simple")
os.makedirs(self.json_base_path, exist_ok=True)
os.makedirs(self.pypi_base_path, exist_ok=True)
os.makedirs(self.simple_base_path, exist_ok=True)
def setUp_swift(self) -> None:
self.setUp_swiftVars()
self.conn_patcher = mock.patch(
"swiftclient.client.Connection", side_effect=MockConnection
)
Connection = self.conn_patcher.start()
Connection.get_account.return_value = ("", "")
from bandersnatch_storage_plugins.swift import SwiftStorage
@contextlib.contextmanager
def connection(o: SwiftStorage) -> Iterator["swiftclient.client.Connection"]:
yield Connection(tmpdir=self.tempdir)
def is_dir(self: SwiftStorage, path: Path) -> bool:
"""Check whether the provided path is a directory."""
target_path = str(path)
if target_path == ".":
target_path = ""
if target_path and not target_path.endswith("/"):
target_path = f"{target_path}/"
files = []
with self.connection() as conn:
try:
files = conn.get_container(
self.default_container, prefix=target_path
)
except OSError:
return False
return bool(files)
self.swift_patcher = mock.patch.object(SwiftStorage, "connection", connection)
self.is_dir_patcher = mock.patch(
"bandersnatch_storage_plugins.swift.SwiftStorage.is_dir", is_dir
)
self.swift_patcher.start()
self.is_dir_patcher.start()
def setUp_swiftVars(self) -> None:
swift_keys = (
"OS_USER_DOMAIN_NAME",
"OS_PROJECT_DOMAIN_NAME",
"OS_PASSWORD",
"OS_USER_ID",
"OS_USERNAME",
"OS_PROJECT_NAME",
"OS_TENANT_NAME",
"OS_AUTH_URL",
"OS_AUTHENTICATION_URL",
"OS_STORAGE_URL",
"OS_REGION_NAME",
"OS_PROJECT_ID",
)
self.original_swiftvars = {
k: os.environ[k] for k in swift_keys if k in os.environ
}
self.os_dict = {
"OS_USER_DOMAIN_NAME": "default",
"OS_PROJECT_DOMAIN_NAME": "default",
"OS_PASSWORD": "test123",
"OS_USER_ID": "test_userid",
"OS_PROJECT_NAME": "test_project",
"OS_AUTH_URL": "https://keystone.localhost:5000/v3",
"OS_STORAGE_URL": "https://swift-proxy.localhost:8080/v1/AUTH_test_project",
"OS_REGION_NAME": "test_region",
}
os.environ.update(self.os_dict)
def tearDown_swiftVars(self) -> None:
for k in self.os_dict.keys():
if k in os.environ:
del os.environ[k]
os.environ.update(self.original_swiftvars)
self.is_dir_patcher
self.swift_patcher.stop()
self.conn_patcher.stop()
def setUp_Structure(self) -> None:
web_files = [
"last-modified",
"local-stats/days",
"packages/2.7/f/foo/foo.whl",
"packages/3.8/f/foo/foo.whl",
"packages/any/f/foo/foo.zip",
"simple/foobar/index.html",
"simple/index.html",
]
for path in web_files:
p = pathlib.Path(self.web_base_path) / path
p.parent.mkdir(parents=True, exist_ok=True)
p.touch()
paths = ["generation", "sample", "status"]
for path in paths:
p = pathlib.Path(self.mirror_base_path) / path
p.touch()
pathlib.Path(self.mirror_base_path).joinpath("status").write_text("20")
pathlib.Path(self.web_base_path).joinpath("simple/index.html").write_text(
"""<!DOCTYPE html>
<html>
<head>
<title>Simple Index</title>
</head>
<body>
<a href="foobar/">foobar</a><br/>
</body>
</html>""".strip()
)
def tearDown(self) -> None:
if self.tempdir:
assert self.cwd
os.chdir(self.cwd)
self.tempdir.cleanup()
if self.backend == "swift":
self.tearDown_swiftVars()
class BaseStoragePluginTestCase(BasePluginTestCase):
plugin_map = {
"filesystem": filesystem.FilesystemStorage,
"swift": swift.SwiftStorage,
}
path_backends = {
"filesystem": pathlib.Path,
"swift": swift.SwiftPath,
}
base_find_contents = r"""
.lock
generation
sample
status
web
web{0}json
web{0}last-modified
web{0}local-stats
web{0}local-stats{0}days
web{0}local-stats{0}days{0}.swiftkeep
web{0}packages
web{0}packages{0}2.7
web{0}packages{0}2.7{0}f
web{0}packages{0}2.7{0}f{0}foo
web{0}packages{0}2.7{0}f{0}foo{0}foo.whl
web{0}packages{0}3.8
web{0}packages{0}3.8{0}f
web{0}packages{0}3.8{0}f{0}foo
web{0}packages{0}3.8{0}f{0}foo{0}foo.whl
web{0}packages{0}any
web{0}packages{0}any{0}f
web{0}packages{0}any{0}f{0}foo
web{0}packages{0}any{0}f{0}foo{0}foo.zip
web{0}pypi
web{0}simple
web{0}simple{0}foobar
web{0}simple{0}foobar{0}index.html
web{0}simple{0}index.html""".format(
os.sep
).strip()
if sys.platform == "win32":
base_find_contents = base_find_contents.replace(".lock\n", "")
def test_plugin_type(self) -> None:
assert self.backend
self.assertTrue(isinstance(self.plugin, self.plugin_map[self.backend]))
self.assertTrue(self.plugin.PATH_BACKEND is self.path_backends[self.backend])
def test_json_paths(self) -> None:
config = mock_config(self.config_contents).config
mirror_dir = self.plugin.PATH_BACKEND(config.get("mirror", "directory"))
packages = {
"bandersnatch": [
mirror_dir / "web/json/bandersnatch",
mirror_dir / "web/pypi/bandersnatch",
],
"black": [mirror_dir / "web/json/black", mirror_dir / "web/pypi/black"],
}
for name, json_paths in packages.items():
with self.subTest(name=name, json_paths=json_paths):
self.assertEqual(self.plugin.get_json_paths(name), json_paths)
def test_canonicalize_package(self) -> None:
packages = (
("SQLAlchemy", "sqlalchemy"),
("mypy_extensions", "mypy-extensions"),
("py_ecc", "py-ecc"),
("Requests", "requests"),
("oslo.utils", "oslo-utils"),
)
for name, normalized in packages:
with self.subTest(name=name, normalized=normalized):
self.assertEqual(self.plugin.canonicalize_package(name), normalized)
def test_hash_file(self) -> None:
path = self.plugin.PATH_BACKEND(self.sample_file)
md5_digest = "125765989403df246cecb48fa3e87ff8"
sha256_digest = (
"95c07c174663ebff531eed59b326ebb3fa95f418f680349fc33b07dfbcf29f18"
)
# newlines make the hash different here
if sys.platform == "win32":
md5_digest = "91ef8f60d130b312af17543b34bfb372"
sha256_digest = (
"398e162e08d9af1d87c8eb2ee46d7c64248867afbe30dee807122022dc497332"
)
expected_hashes = (
("md5", md5_digest),
("sha256", sha256_digest),
)
for hash_func, hash_val in expected_hashes:
with self.subTest(hash_func=hash_func, hash_val=hash_val):
self.assertEqual(
self.plugin.hash_file(path, function=hash_func), hash_val
)
def test_iter_dir(self) -> None:
base_path = self.plugin.PATH_BACKEND(self.simple_base_path)
lists = [
(base_path.joinpath("foobar"), True),
(base_path.joinpath("index.html"), False),
]
self.assertListEqual(
list(sorted(base_path.iterdir(), key=lambda p: str(p))),
list(sorted([elem[0] for elem in lists], key=lambda p: str(p))),
)
for expected, is_dir in lists:
with self.subTest(is_dir=is_dir, produced_path=expected):
self.assertIs(is_dir, self.plugin.is_dir(expected))
if is_dir is False:
self.assertIs(True, self.plugin.is_file(expected))
def test_rewrite(self) -> None:
target_file = os.path.join(self.mirror_base_path, "example.txt")
replace_with = "new text"
with open(target_file, "w") as fh:
fh.write("sample text")
with self.plugin.rewrite(target_file) as fh: # type: ignore
fh.write(replace_with)
with open(target_file) as fh:
self.assertEqual(fh.read().strip(), replace_with)
def test_update_safe(self) -> None:
target_file = os.path.join(self.mirror_base_path, "example.txt")
replace_with = "new text"
with open(target_file, "w") as fh:
fh.write("sample text")
with self.plugin.update_safe(target_file, mode="w") as fh: # type: ignore
fh.write(replace_with)
with open(target_file) as fh:
self.assertEqual(fh.read().strip(), replace_with)
def test_compare_files(self) -> None:
target_file1 = os.path.join(self.mirror_base_path, "cmp_example1.txt")
target_file2 = os.path.join(self.mirror_base_path, "cmp_example2.txt")
target_file3 = os.path.join(self.mirror_base_path, "cmp_example3.txt")
for fn in (target_file1, target_file2):
with open(fn, "w") as fh:
fh.write("sample text")
with open(target_file3, "w") as fh:
fh.write("some other text")
files = [target_file1, target_file2, target_file3]
comparisons = (
(target_file1, target_file2, True),
(target_file1, target_file3, False),
(target_file2, target_file3, False),
)
for cmp_file1, cmp_file2, rv in comparisons:
with self.subTest(cmp_file1=cmp_file1, cmp_file2=cmp_file2, rv=rv):
msg = "file1 contents: {!r}\n\nfile2 contents: {!r}".format(
self.plugin.read_file(cmp_file1), self.plugin.read_file(cmp_file2)
)
self.assertTrue(
self.plugin.compare_files(cmp_file1, cmp_file2) is rv, msg
)
for fn in files:
os.unlink(fn)
def test_find(self) -> None:
base_path = self.mirror_base_path
if self.backend == "swift":
base_path = base_path.lstrip("/")
self.assertEqual(self.base_find_contents, self.plugin.find(base_path))
def test_open_file(self) -> None:
self.plugin.write_file(os.path.join(self.mirror_base_path, "status"), "20")
rvs = (
(
os.path.join(self.web_base_path, "simple/index.html"),
"""\
<!DOCTYPE html>
<html>
<head>
<title>Simple Index</title>
</head>
<body>
<a href="foobar/">foobar</a><br/>
</body>
</html>""",
),
(os.path.join(self.mirror_base_path, "status"), "20"),
)
for path, rv in rvs:
with self.subTest(path=path, rv=rv):
with self.plugin.open_file(path, text=True) as fh:
self.assertEqual(fh.read(), rv)
def test_write_file(self) -> None:
data: List[Union[str, bytes]] = ["this is some text", b"this is some text"]
tmp_path = os.path.join(self.mirror_base_path, "test_write_file.txt")
for write_val in data:
with self.subTest(write_val=write_val):
self.plugin.write_file(tmp_path, write_val)
rv: Union[str, bytes]
if not isinstance(write_val, str):
rv = self.plugin.PATH_BACKEND(tmp_path).read_bytes()
else:
rv = self.plugin.PATH_BACKEND(tmp_path).read_text()
self.assertEqual(rv, write_val)
os.unlink(tmp_path)
def test_read_file(self) -> None:
self.plugin.write_file(os.path.join(self.mirror_base_path, "status"), "20")
rvs = (
(
self.plugin.PATH_BACKEND(self.web_base_path).joinpath(
"simple/index.html"
),
"""\
<!DOCTYPE html>
<html>
<head>
<title>Simple Index</title>
</head>
<body>
<a href="foobar/">foobar</a><br/>
</body>
</html>""",
),
(self.plugin.PATH_BACKEND(self.mirror_base_path).joinpath("status"), "20"),
)
for path, rv in rvs:
with self.subTest(path=path, rv=rv):
self.assertEqual(self.plugin.read_file(path), rv)
def test_delete(self) -> None:
delete_path = self.plugin.PATH_BACKEND(self.mirror_base_path).joinpath(
"test_delete.txt"
)
delete_dir = self.plugin.PATH_BACKEND(self.mirror_base_path).joinpath(
"test_delete_dir"
)
delete_dir.mkdir()
delete_path.touch()
for path in [delete_path, delete_dir]:
with self.subTest(path=path):
self.assertTrue(path.exists())
self.plugin.delete(path)
self.assertFalse(path.exists())
def test_delete_file(self) -> None:
delete_path = self.plugin.PATH_BACKEND(self.mirror_base_path).joinpath(
"test_delete.txt"
)
print(f"delete path: {delete_path!r}")
delete_path.touch()
self.assertTrue(delete_path.exists())
self.plugin.delete_file(delete_path)
self.assertFalse(delete_path.exists())
def test_copy_file(self) -> None:
file_content = "this is some data"
dest_file = os.path.join(self.mirror_base_path, "temp_file.txt")
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tf:
atexit.register(os.unlink, tf.name)
tf.write(file_content)
tf.flush()
self.plugin.copy_file(tf.name, dest_file)
with open(dest_file) as fh:
copied_content = fh.read()
os.unlink(dest_file)
self.assertEqual(copied_content, file_content)
def test_mkdir(self) -> None:
self.plugin.mkdir(os.path.join(self.mirror_base_path, "test_dir"))
self.assertTrue(
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).exists()
)
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).rmdir()
def test_rmdir(self) -> None:
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).mkdir()
self.assertTrue(
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).exists()
)
self.plugin.rmdir(
self.plugin.PATH_BACKEND(os.path.join(self.mirror_base_path, "test_dir"))
)
self.assertFalse(
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).exists()
)
def test_is_dir(self) -> None:
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).mkdir()
self.assertTrue(
self.plugin.is_dir(
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
)
)
)
self.plugin.rmdir(
self.plugin.PATH_BACKEND(os.path.join(self.mirror_base_path, "test_dir")),
force=True,
)
def test_is_file(self) -> None:
delete_path = self.plugin.PATH_BACKEND(self.mirror_base_path).joinpath(
"test_delete.txt"
)
delete_path.touch()
self.assertTrue(self.plugin.is_file(delete_path))
delete_path.unlink()
def test_symlink(self) -> None:
file_content = "this is some text"
test_path = self.plugin.PATH_BACKEND(self.mirror_base_path).joinpath(
"symlink_file.txt"
)
test_path.write_text(file_content)
symlink_dest = test_path.parent.joinpath("symlink_dest.txt")
self.plugin.symlink(test_path, symlink_dest)
self.assertEqual(self.plugin.read_file(symlink_dest), file_content)
def test_get_hash(self) -> None:
path = self.plugin.PATH_BACKEND(self.sample_file)
md5_digest = "125765989403df246cecb48fa3e87ff8"
sha256_digest = (
"95c07c174663ebff531eed59b326ebb3fa95f418f680349fc33b07dfbcf29f18"
)
# newlines make the hash different here
if sys.platform == "win32":
md5_digest = "91ef8f60d130b312af17543b34bfb372"
sha256_digest = (
"398e162e08d9af1d87c8eb2ee46d7c64248867afbe30dee807122022dc497332"
)
expected_hashes = (
("md5", md5_digest),
("sha256", sha256_digest),
)
for fn, hash_val in expected_hashes:
with self.subTest(fn=fn, hash_val=hash_val):
self.assertEqual(self.plugin.get_hash(path, function=fn), hash_val)
class TestFilesystemStoragePlugin(BaseStoragePluginTestCase):
backend = "filesystem"
base_find_contents = "\n".join(
[
line
for line in BaseStoragePluginTestCase.base_find_contents.split("\n")
if "web{0}local-stats{0}days{0}.swiftkeep".format(os.path.sep)
!= line.strip()
]
)
class TestSwiftStoragePlugin(BaseStoragePluginTestCase):
backend = "swift"
base_find_contents = BaseStoragePluginTestCase.base_find_contents.replace(
".lock\n", ""
).strip()
def setUp(self) -> None:
if os.name == "nt":
raise unittest.SkipTest("Skipping swift tests on windows")
super().setUp()
def test_mkdir(self) -> None:
tmp_filename = next(tempfile._get_candidate_names()) # type: ignore
tmp_file = self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir", tmp_filename)
)
tmp_file.write_text("")
self.assertTrue(
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).exists()
)
tmp_file.unlink()
def test_rmdir(self) -> None:
tmp_filename = next(tempfile._get_candidate_names()) # type: ignore
tmp_file = self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir", tmp_filename)
)
tmp_file.write_text("")
self.assertTrue(
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).exists()
)
tmp_file.unlink()
self.assertFalse(
self.plugin.PATH_BACKEND(
os.path.join(self.mirror_base_path, "test_dir")
).exists()
)
def test_copy_file(self) -> None:
file_content = "this is some data"
dest_file = os.path.join(self.mirror_base_path, "temp_file.txt")
assert self.tempdir
with tempfile.NamedTemporaryFile(
dir=os.path.join(self.tempdir.name, "bandersnatch"), mode="w"
) as tf:
tf.write(file_content)
tf.flush()
self.plugin.copy_file(tf.name, dest_file)
with open(dest_file) as fh:
copied_content = fh.read()
os.unlink(dest_file)
self.assertEqual(copied_content, file_content)
if __name__ == "__main__":
unittest.main()
| 35.671053 | 88 | 0.598076 |
668bb57fe751c374625ca8ca348cc8baeb37d8d1 | 427 | py | Python | CalibTracker/SiStripESProducers/python/DBWriter/SiStripBadModuleDummyDBWriter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CalibTracker/SiStripESProducers/python/DBWriter/SiStripBadModuleDummyDBWriter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CalibTracker/SiStripESProducers/python/DBWriter/SiStripBadModuleDummyDBWriter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
siStripBadModuleDummyDBWriter = cms.EDAnalyzer("SiStripBadModuleDummyDBWriter",
record = cms.string(""),
OpenIovAt = cms.untracked.string("beginOfTime"),
OpenIovAtTime = cms.untracked.uint32(1)
)
| 32.846154 | 93 | 0.447307 |
f2c86b9a677b99448180ec4faca44a7fbd4edb62 | 2,008 | py | Python | tests/tensor/test_sum.py | nlp-greyfoss/metagrad | 0f32f177ced1478f0c75ad37bace9a9fc4044ba3 | [
"MIT"
] | 7 | 2022-01-27T05:38:02.000Z | 2022-03-30T01:48:00.000Z | tests/tensor/test_sum.py | nlp-greyfoss/metagrad | 0f32f177ced1478f0c75ad37bace9a9fc4044ba3 | [
"MIT"
] | null | null | null | tests/tensor/test_sum.py | nlp-greyfoss/metagrad | 0f32f177ced1478f0c75ad37bace9a9fc4044ba3 | [
"MIT"
] | 2 | 2022-02-22T07:47:02.000Z | 2022-03-22T08:31:59.000Z | import numpy as np
from metagrad.tensor import Tensor
def test_simple_sum():
x = Tensor([1, 2, 3], requires_grad=True)
y = x.sum()
assert y.data == 6
y.backward()
assert x.grad.data.tolist() == [1, 1, 1]
def test_sum_with_grad():
x = Tensor([1, 2, 3], requires_grad=True)
y = x.sum()
y.backward(Tensor(3))
assert x.grad.data.tolist() == [3, 3, 3]
def test_matrix_sum():
x = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad=True) # (2,3)
y = x.sum()
assert y.data == 21
y.backward()
assert x.grad.data.tolist() == np.ones_like(x.data).tolist()
def test_matrix_with_axis():
x = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad=True) # (2,3)
y = x.sum(0) # keepdims = False
assert y.shape == (3,)
assert y.data.tolist() == [5, 7, 9]
y.backward([1, 1, 1])
assert x.grad.data.tolist() == [[1, 1, 1], [1, 1, 1]]
def test_matrix_with_keepdims():
x = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad=True) # (2,3)
y = x.sum(axis=0, keepdims=True) # keepdims = True
assert y.shape == (1, 3)
assert y.data.tolist() == [[5, 7, 9]]
y.backward([1, 1, 1])
assert x.grad.data.tolist() == [[1, 1, 1], [1, 1, 1]]
def test_complex_matrix_with_axis():
x = Tensor(np.arange(24).reshape(2, 3, 4), requires_grad=True) # (2,3,4)
y = x.sum((0, 1)) # keepdims = False
assert y.shape == (4,)
assert y.data.tolist() == [60, 66, 72, 78]
y.backward([2, 2, 2, 2])
assert x.grad.data.tolist() == [[[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]],
[[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]]
def test_sum_with_negative_axis():
x = Tensor(np.arange(16).reshape(2, 2, 4), requires_grad=True) # (2,2,4)
y = x.sum(-2) # keepdims = False
assert y.shape == (2, 4)
assert y.data.tolist() == [[4, 6, 8, 10], [20, 22, 24, 26]]
y.sum().backward()
assert x.grad.data.tolist() == [[[1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1]]]
| 24.790123 | 95 | 0.523406 |
5f043d8304b4b5f12af8c825ea732c7984222ff3 | 4,125 | py | Python | cnn_tut3.py | sachinumrao/pytorch_tutorials | 113b17875e6858ea50ececd29948d0054f3a535c | [
"MIT"
] | null | null | null | cnn_tut3.py | sachinumrao/pytorch_tutorials | 113b17875e6858ea50ececd29948d0054f3a535c | [
"MIT"
] | null | null | null | cnn_tut3.py | sachinumrao/pytorch_tutorials | 113b17875e6858ea50ececd29948d0054f3a535c | [
"MIT"
] | null | null | null | # CNN with MNIST data
# import dependencies
import torch
import torchvision
import torchvision.transforms as transforms
# tranform dataset images into tensors
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.13,),(0.31,))])
# load dataset
trainset = torchvision.datasets.MNIST(root='./data',
train=True,
download=True,
transform=transform)
# create iterable data object
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=32,
shuffle=True,
num_workers=4)
testset = torchvision.datasets.MNIST(root='./data',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(testset,
batch_size=32,
shuffle=True,
num_workers=4)
# define convolutional network
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
# define convolution architecture
# convolution params: input_channel=3, output_channel=6, kernel_szie=5
# self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, padding=1)
self.conv1 = nn.Conv2d(1,8,3)
# batch normalization params: input_channel=6
self.batch_norm1 = nn.BatchNorm2d(8)
# max-pool layer params: kernel_size=2, stride=2
self.pool = nn.MaxPool2d(2,2)
self.conv2 = nn.Conv2d(8,16,3)
self.batch_norm2 = nn.BatchNorm2d(16)
self.fc1 = nn.Linear(16*5*5, 120)
self.droput1 = nn.Dropout(0.10)
self.fc2 = nn.Linear(120, 60)
self.droput2 = nn.Dropout(0.05)
self.fc3 = nn.Linear(60,10)
def forward(self, x):
# pass the input through first convolutional layer
out = self.pool(F.relu(self.conv1(x)))
out = self.batch_norm1(out)
# print("Conv1 Output shape : ", out.shape)
# pass through second conv layer
out = self.pool(F.relu(self.conv2(out)))
out = self.batch_norm2(out)
# print("Conv2 Output shape : ", out.shape)
# pass through simply connected layer
# reshape the input for linear layer
out = out.view(-1, 16*5*5) ## find out how to arrive on this number
# : number of output filters from last conv layer multiply by
# remaining output size in that conv layer
# apply one fully connected layer and pass through relu
#debug
# print("Flattend Output shape : ", out.shape)
out = F.relu(self.fc1(out))
out = self.droput1(out)
# print("FC1 Output shape : ", out.shape)
out = F.relu(self.fc2(out))
out = self.droput2(out)
# print("FC2 Output shape : ", out.shape)
out = F.relu(self.fc3(out))
# debug
# print("Final Output shape : ", out.shape)
return out
model = Model()
# define loss and optimizer
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.005)
# train the network in epochs
epochs = 10
for epoch in range(epochs):
running_loss = 0.0
for i,data in enumerate(trainloader, 0):
inputs, labels = data
# reset the gradients
optimizer.zero_grad()
# forward pass
outputs = model(inputs)
#print(outputs.shape)
# debug
# print("Input size : ", inputs.shape)
# print("Output size : ", outputs.shape)
# calculate loss
loss = criterion(outputs, labels)
# backward pass / calculate gradients
loss.backward()
# take one grad step
optimizer.step()
# print stats
if (i+1)%100 == 0:
running_loss = loss.item()
print("Epoch : ", epoch+1, " , Step : ", i+1, " , Loss : ",running_loss)
# test the model
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("Accuracy : ", correct/total)
| 29.049296 | 89 | 0.622545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.