hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73bdd86155a33f933587e1152f8cc8cdb05381c8 | 68,295 | py | Python | app/docs/private.py | theLaborInVain/kdm-manager-api | fa8744c9b8a739262d1b94900648254cc69d16e1 | [
"MIT"
] | 2 | 2020-03-04T13:43:45.000Z | 2020-11-03T20:34:21.000Z | app/docs/private.py | theLaborInVain/kdm-manager-api | fa8744c9b8a739262d1b94900648254cc69d16e1 | [
"MIT"
] | 64 | 2019-07-19T19:19:50.000Z | 2022-03-03T21:19:28.000Z | app/docs/private.py | theLaborInVain/kdm-manager-api | fa8744c9b8a739262d1b94900648254cc69d16e1 | [
"MIT"
] | null | null | null | authorization_token_management = {
"authorization_check": {
"name": "/authorization/check",
"desc": """\
<p><b>GET</b> or <b>POST</b> to this endpoint to determine if your Authorization
header is still valid or if it has expired.</p>""",
},
"authorization_refresh": {
"name": "/authorization/refresh",
"desc": """\
<p> Use the standard 'Authorization' header and <b>POST</b> an empty request to
this route to recieve a new Auth token based on the previous one.</p>
<p> On the back end, this route reads the incoming 'Authorization' header and,
even if the JWT token is expired, will check the 'login' and 'password' (hash)
keys: if they check out, you get a 200 and a brand new token.</p>
<p> Finally, the KDM API does NOT use refresh tokens (it just feels like
overkill, you know?).</p>\
"""
},
}
administrative_views_and_data = {
"admin_view_panel": {
"name": "/admin/view/panel",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Access the API Admin panel. Uses HTTP basic auth (no cookies/no sessions)
and requires a user have the 'admin' bit flipped on their user.</p>
""",
},
"admin_get_user_data": {
"name": "/admin/get/user_data",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Retrieves a nice, juicy hunk of JSON re: recent users of the API.</p>
""",
},
"admin_get_logs": {
"name": "/admin/get/logs",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Dumps the contents of a number of system logs from the local filesystem where
the API is running and represents them as JSON.</p>
""",
},
}
user_management = {
"user_get": {
"name": "/user/get/<user_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Retrieve a serialized version of the user who owns <user_id>,
to include some additional usage and meta facts about that user.</p>
<p>Like many of the <code><b>GET</b></code> routes supported by the KD:M API,
this route will return user info whether you use <code><b>POST</b></code> or
any other supported method.</p>
""",
},
"user_dashboard": {
"name": "/user/dashboard/<user_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>This fetches a serialized version of the user that includes the
<code>/world</code> output as well as a bunch of info about the
user, including their friends, settlements they own or are
playing in, etc.</p>
<p>Here's a run-down of the key elements:</p>
<pre><code>{
"is_application_admin": true,
"meta": {...},
"user": {...},
"preferences": [...],
"dashboard": {
"campaigns": [...],
"settlements": [...],
},
}</code></pre>
<p>The top-level <code>dashboard</code> element includes two arrays:
<code>campaigns</code> and <code>settlements</code>.</p>
<p>The <code>campaigns</code> array is a <b>reverse-chronological</b> list
of OIDs of all settlements where the user owns a survivor (i.e.
the survivor's <code>email</code> attribute matches the users
<code>login</code> attribute.</p>
<p>This list can include settlements owned/created by other users:
the basic idea behing the <code>campaigns</code> list is that
you probably want to show these settlements to the user when they
sign in or when they choose which settlement they want to view.</p>
<p>The <code>campaigns</code> array <u>does not</u> include any
'abandoned' settlements (i.e. any settlement with a Boolean True
value for the <code>abandoned</code> attribute.</p>
<p>See <a href="/#settlementAbandon"><code>/settlement/abandon/oid</code>
(below)</a> for more on abandoning a settlement. </p>
<p>Conrastively, the <code>settlements</code> array is a
<b>chronologically</b> sorted list of all settlement OIDs that belong
to the current user, whether abandoned or not.</p>
<p>This is more of an archival/historical sort of list, meant to
facilitate that kind of view/list/UX.</p>
""",
},
"user_set": {
"name": "/user/set/<user_id>",
"subsection": "user_attribute_management",
"desc": """\
<p>This route supports the assignment of user-specified key/value
attributes to the user object.</p><p>To set an attribute, include
JSON in the body of the request that indicates the key/value to set.</p>
Supported attribute keys include:
<table class="embedded_table">
<tr><th>key</th><th>value</th></tr>
<tr>
<td>current_settlement</td>
<td class="text">
OID of an existing,non-removed settlement.
</td>
</tr>
</table>
Use multiple key/value pairs to set multiple attributes in a single
request, e.g. <code>{"current_settlement": $oid, "current_session":
$oid}</code>
</p>
<p><b>Important!</b> This route does not support the assignment of
arbitrary keys and will completely fail any request that includes
unsupported keys!</p>
""",
},
"user_set_preferences": {
"name": "/user/set_preferences/<user_id>",
"subsection": "user_attribute_management",
"desc": """\
<p><b>POST</b> a list of hashes to this endpoint to set user preferences.</p>
<p>Your list has to be named <code>preferences</code> and your
hashes have to be key/value pairs where they key is a valid
preferences handle and the key is a Boolean:</p>
<code>{preferences: [{handle: "beta", value: true}, {...}]}</code>
<p>Since this is mostly a sysadmin/back-of-house kind of route,
it fails pretty easily if you try to <b>POST</b> something it doesn't
like. The good news, is that it should fail pretty descriptively.</p>
""",
},
"user_add_expansion_to_collection": {
"name": "/user/add_expansion_to_collection/<user_id>",
"subsection": "user_collection_management",
"desc": """\
<p>Starting in July of 2021, this endpoint is deprecated.</p>
<p>Please use <code>/user/set_expansions/<user_id></code> instead.</p>
""",
},
"user_rm_expansion_from_collection": {
"name": "/user/rm_expansion_from_collection/<user_id>",
"subsection": "user_collection_management",
"desc": """\
<p>Starting in July of 2021, this endpoint is deprecated.</p>
<p>Please use <code>/user/set_expansions/<user_id></code> instead.</p>
""",
},
"user_set_collection": {
"name": "/user/set_collection/<user_id>",
"subsection": "user_collection_management",
"desc": """\
<p>This endpoint facilitates all-at-once updates to a user's
<code>collection</code>, which basically looks like this:</p>
<pre><code>
'collection': {
'expansions': [
'manhunter',
'flower_knight'
],
}
</pre></code>
<p>The idea behind this endpoint is that you want to <b>POST</b> the actual
<code>collection</code> to it, so the JSON you post is going to have a key,
'collection', and that key's value is going to be a hash, and that hash will
have the 'expansions' key, etc.</p>
<p>Just follow the example JSON above.</p>
""",
},
}
create_assets = {
"new_settlement": {
"name": "/new/settlement",
"methods": ["POST","OPTIONS"],
"desc": (
'<p>Use <code>handle</code> values from the '
'<code>/kingdom_death</code> pubilc route as params, like this:</p>'
"""\
<code><pre>{
"campaign": "people_of_the_lantern",
"expansions": ["dung_beetle_knight", "lion_god"],
"survivors": ["adam", "anna"],
"name": "Chicago",
"macros": ["create_first_story_survivors"]
}</pre></code>
"""
'<p>If successful, this route returns a serialized version '
'of the new settlement, including its OID, as JSON.</p>'
),
},
"new_survivor": {
"name": "/new/survivor",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>This works differently from <code>/new/settlement</code> in
a number of significant ways.</p>
<p> In a nutshell, the basic idea here is that the only required key
in the JSON you <b>POST</b> to this route is an object ID for the settlement
to which the survivor belongs:</p>
<code>{'settlement': '59669ace4af5ca799c968c94'}</code>
<p> Beyond that, you are free to supply any other attributes of the
survivor, so long as they comply with the data model for survivors.</p>
<p> Consult the <a href="/#survivorDataModel">Survivor Data Model (below)</a> for a
complete reference on what attributes of the survivor may be set at
creation time.</p>
<p>As a general piece of advice, it typically makes more sense to
just initialize a new survivor with defaults and then operate on it
using the routes below, unless you're doing something inheritance.</p>
<p>For normal inheritance, simply <b>POST</b> the OID's of one or
more of the survivor's parents like so:</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff'}</code>
<p>...or like so:</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5ca11240f519f'}</code>
<p>This will cause normal inheritance rules to be triggered when the
new survivor is created.</p>
<p>In order to trigger conditional or special inheritance, e.g. where
an innovation requires the user to select a single parent as the donor,
you <u>must</u> specify which parent is the donor using the <code>
primary_donor_parent</code> key and setting it to 'father' or 'mother':</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5c
a11240f519f', primary_donor_parent: 'father'}</code>
<p>This will cause normal inheritance rules to be triggered when the
new survivor is created.</p>
<p>In order to trigger conditional or special inheritance, e.g. where
an innovation requires the user to select a single parent as the donor,
you <u>must</u> specify which parent is the donor using the <code>
primary_donor_parent</code> key and setting it to 'father' or 'mother':</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5ca11240f519f', primary_donor_parent: 'father'}</code>
<p>This will cause innovations such as <b>Family</b> to use the primary
donor parent to follow one-parent inheritance rules for that innovation.</p>
<p>As of API releases > 0.77.n, survivors can be created with an avatar.
Inclde the <code>avatar</code> key in the <b>POST</b> body, and let
that key's value be a string representation of the image that should
be used as the survivor's avatar.</p>
<p>(<a href="/#setAvatarAnchor">See <code>/survivor/set_avatar/<oid></code> route below</a> for more
information on how to post string representations of binary content.</p>
<p><b>Important!</b>Just like the <code>/new/settlement</code> route,
a successful <b>POST</b> to the <code>/new/survivor</code> route will return
a serialized version (i.e. JSON) of the new survivor, complete with
the <code>sheet</code> element, etc.</p>
""",
},
"new_survivors": {
"name": "/new/survivors",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>Not to be confused with <code>/new/survivor</code> (above),
this route adds multiple new survivors, rather than just one.</p>
<p>The JSON you have to <b>POST</b> to this route is a little different
and more limited than what you would post to <code>/new/survivor</code>.</p>
<p>The following <b>POST</b> key/value pairs are the only ones supported
by this route:</p>
<table class="embedded_table">
<tr><th>key</th><th>O/R</th><th>value type</th><th>comment</th>
<tr>
<td>settlement_id</td>
<td><b>R</b></td>
<td>settlement OID</td>
<td class="text">The OID of the settlement to which the new survivors belong.</td>
</tr>
<tr>
<td>public</td>
<td>O</td>
<td>boolean</td>
<td class="text">
The value of the new survivors'<code>public</code> attribute.
Defaults to <code>true</code>.
</td>
</tr>
<tr>
<td>male</td>
<td>O</td>
<td>arbitrary int</td>
<td class="text">The number of male survivors to create.</td>
</tr>
<tr>
<td>female</td>
<td>O</td>
<td>arbitrary int</td>
<td class="text">The number of female survivors to create.</td>
</tr>
<tr>
<td>father</td>
<td>O</td>
<td>survivor OID</td>
<td class="text">The OID of the survivor that should be the father of the new survivors.</td>
</tr>
<tr>
<td>mother</td>
<td>O</td>
<td>survivor OID</td>
<td class="text">The OID of the survivor that should be the mother of the new survivors.</td>
</tr>
</table>
<p>Creating new survivors this way is very simple. This JSON, for
example, would create two new male survivors:</p>
<code>{"male": 2, "settlement_id": "5a1485164af5ca67035bea03"}</code>
<p>A successful <b>POST</b> to this route always returns a list of
serialized survivors (i.e. the ones that were created), so if
you are creating more than four or five survivors, this route is
a.) going to take a couple/few seconds to come back to you and b.)
is going to drop a pile of JSON on your head. YHBW.</p>
<p>NB: this route <i>does not</i> support random sex assignment.</p>
""",
},
}
settlement_management = {
"settlement_get_settlement_id": {
"name": "/settlement/get/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p> Retrieve a serialized version of the settlement associated
with <settlement_id> (to include all related user and game
assets, including survivors).</p>
<p><b>Important!</b> Depending on the number of expansions, survivors,
users, etc. involved in a settlement/campaign, this one can take a
long time to come back (over 2.5 seconds on Production hardware).
YHBW</p>
""",
},
"settlement_get_summary_settlement_id": {
"name": "/settlement/get_summary/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Get a nice, dashboard-friendly summary of a settlement's info.</p>
<p>This route is optimized for speedy returns, e.g. the kind you want when
showing a user a list of their settlements.</p>
""",
},
"settlement_get_campaign_settlement_id": {
"name": "/settlement/get_campaign/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Retrieve a serialized version of the settlement where the
<code>user_assets</code> element includes the <code>groups</code>
list, among other things, and is intended to be used in creating
'campaign' type views.</p>
<p>Much like the big <code>get</code> route for settlements, this one
can take a while to come back, e.g. two or more seconds for a normal
settlement. YHBW.</p>
""",
},
"settlement_get_sheet_settlement_id": {
"name": "/settlement/get_sheet/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>A convenience endpoint that only returns the settlement's <code>sheet</code>
element, i.e. the dictionary of assets it owns.</p>
""",
},
"settlement_get_game_assets_settlement_id": {
"name": "/settlement/get_game_assets/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>A convenience endpoint that only returns the serialized settlement's <code>
game_assets</code> element, i.e. the JSON representation of the game assets
(gear, events, locations, etc.) required to represent the settlement. </p>
""",
},
"settlement_get_event_log_settlement_id": {
"name": "/settlement/get_event_log/<settlement_id>",
"subsection": "settlement_component_gets",
"desc": """\
<p><b>GET</b> this end point to retrieve all settlement event log
entries (in a giant hunk of JSON) in <u>reverse chronological
order</u>, i.e. latest first, oldest last.</p>
<p>PROTIP: For higher LY settlements this can be a really huge
list and take a long time to return: if you're a front-end
developer, definitely consider loading this one AFTER you have
rendered the rest of your view.</p>
<p>Another way to optimize here is to include a filter key/value
pair in your <b>POST</b> body to limit your results. Some of the
accepted filter params will decrease the time it takes for your
requested lines to come back from the API:
<table class="embedded_table">
<tr><th>key</th><th>value type</th><th>scope</th>
<tr>
<td>lines</td>
<td>arbitrary int</td>
<td class="text">Limit the return to the last <code>lines</code>-worth of lines: <code>{lines: 1
0}</code>. Note that this <u>does not</u> make the query or the return time better or faster for settlements with large event logs.</td>
</tr>
<tr>
<td>ly</td>
<td>arbitrary int</td>
<td class="text">
Limit the return to event log lines created <u>during</u> an arbitrary Lantern Year, e.g. <code>{ly: 9}</code>.<br/>
Note:
<ul class="embedded">
<li>This will always return <i>something</i> and you'll get an empty list back for Lantern Years with no events.</li>
<li>This param triggers a performance-optimized query and will return faster than a general call to the endpoint with no params.</li>
</ul>
</tr>
<tr>
<td>get_lines_after</td>
<td>event log OID</td>
<td class="text">Limit the return to event log lines created <u>after</u> an event log OID: <cod
e>{get_lines_after: "5a0370b54af5ca4306829050"}</code></td>
</tr>
<tr>
<td>survivor_id</td>
<td>arbitrary survivor's OID</td>
<td class="text">Limit the return to event log lines that are tagged with a survivor OID: <code>
{survivor_id: "5a0123b54af1ca42945716283"}</code></td>
</tr>
</table>
<p><b>Important!</b> Though the API will accept multiple filter
params at this endpoint, <b>POST</b>ing more than one of the
above can cause...unexpected output. YHBW.</p>
""",
},
"settlement_get_storage_settlement_id": {
"name": " /settlement/get_storage/<settlement_id>",
"methods": ['GET','OPTIONS'],
"subsection": "settlement_component_gets",
"desc": """\
<p>Hit this route to get representations of the settlement's storage.</p>
<p>What you get back is an array with two dictionaries, one for resources
and one for gear:</p>
<pre><code>[
{
"storage_type": "resources",
"total":0,
"name":"Resource",
"locations": [
{
"bgcolor":"B1FB17",
"handle":"basic_resources",
"name":"Basic Resources",
"collection": [
{
"handle":"_question_marks",
"name":"???",
"rules":[],
"consumable_keywords": ["fish","consumable","flower"],
"type_pretty": "Resources",
"keywords": ["organ","hide","bone","consumable"],
"desc":"You have no idea what monster bit this is. Can be used as a bone, organ, or hide!",
"type":"resources",
"sub_type":"basic_resources",
"quantity":0,"flippers":false
},
...
],
...
},
},
], </pre></code>
<p>This JSON is optimized for representation via AngularJS, i.e. iteration over
nested lists, etc.</p>
<p>Each dictionary in the main array has an array called <code>locations</code>,
which is a list of dictionaries where each dict represents a location within the
settlement.</p>
<p>Each location dictionary has an array called <code>collection</code> which is
a list of dictionaries where each dictionary is a piece of gear or a resource.</p>
<p>The attributes of the dictionaries within the <code>collection</code> array
include the <code>desc</code>, <code>quantity</code>, etc. of an individual
game asset (piece of gear or resource or whatever).</p>
""",
},
"zz_settlement_abandon_settlement_id": {
"name": "/settlement/abandon/<settlement_id>",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>As of January 2021, this route is deprecated. Please use the"
"<code>set_attribute</code> route (<b>POST</b> "
"<code>abandoned</code> as the <code>value</code>) instead.</p>"
),
},
"settlement_remove_settlement_id": {
"name": "/settlement/remove/<settlement_id>",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p><b>POST</b> (not <b>GET</b>) to this route to mark the settlement as
removed.</p>
<p>Once marked as removed, settlements are queued up by the API for removal
from the database: the next time the maintenance process runs, it will check
the timestap of the mark as removed event and purge the settlement
(and all survivors) from the database.</p>
<p><b>This cannot be undone.</b></p>
""",
},
#
# settlement SET attributes
#
"settlement_set_attribute_settlement_id": {
"name": "/settlement/set_attribute/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": (
"<p><b>POST</b> some JSON containing <code>attribute</code> and "
"a <code>value</code> keys where 'attribute' is the Settlement "
"attribute you want to set and and 'value' is what you want to set "
"it to.</p>"
),
'examples': [
"{attribute: 'survival_limit', value: 3}",
"{attribute: 'abandoned', value: true}",
"{attribute: 'abandoned', value: 'UNSET'}",
],
},
"settlement_set_last_accessed_settlement_id": {
"name": "/settlement/set_last_accessed/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p><b>DEPRECATED.</b></p>
<p>Starting in June 2021, settlement access information is handled automatically
by the API.</p>
<p>This endpoint is dead. Attempting to use it to manually set an access time
is no longer supported.</p>
""",
},
"settlement_set_name_settlement_id": {
"name": "/settlement/set_name/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p><b>POST</b> some JSON whose body contains the key 'name' and whatever the
new name is going to be as that key's value to change the settlement's
name:</p>
<code>{'name': 'The Black Lantern'}</code>
<p><b>Important!</b> Submitting an empty string will cause the API to
default the settlement's name to "UNKNOWN". There are no technical
reasons (e.g. limitations) for this, but it breaks the display in most
client apps, so null/empty names are forbidden.</p>
""",
},
"settlement_set_inspirational_statue_settlement_id": {
"name": "/settlement/set_inspirational_statue/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Set the settlement's <code>inspirational_statue</code> attrib
by <b>POST</b>ing a Fighting Art handle to this route:</p>
<code>{'handle': 'leader'}</code>
<p>This route will actually check out the handle and barf on you
if you try to <b>POST</b> an unrecognized FA handle to it. YHBW.</p>
""",
},
"settlement_set_lantern_research_level_settlement_id": {
"name": "/settlement/set_lantern_research_level/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Set the Settlement's Lantern Research Level with some basic
JSON:</p>
<code>{'value': 3}</code>
<p>This route is preferably to a generic attribute setting route
becuase it a.) ignores values over 5 and b.) forces the attrib,
which is not part of the standard data motel, to exist if it does
not.</p>
<p>Definitely use this instead of <code>set_attribute</code>.</p>
""",
},
"settlement_update_set_lost_settlements_settlement_id": {
"name": "/settlement/set_lost_settlements/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Use this route to set a settlement's Lost Settlements total.</p>
<p><b>POST</b> some JSON containing the new value to set it to:</p>
<code>{"value": 2}</code>
<p>The above code would set the settlement's Lost Settlements total
to two; negative numbers will default to zero. </p>
""",
},
#
# settlement UPDATE attributes
#
"settlement_update_attribute_settlement_id": {
"name": "/settlement/update_attribute/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p><b>POST</b> some JSON containing an 'attribute' and a 'modifier'
key where 'attribute' is an integer settlement attrib and 'mofier' is
how much you want to increment it by:</p>
<code>{'attribute': 'death_count', 'modifier': -1}</code>
<p> This route also supports incrementing the <code>survival_limit
</code> and <code>death_count</code> routes.</p>
""",
},
"settlement_update_population_settlement_id": {
"name": "/settlement/update_population/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p><b>POST</b> some JSON containing the key 'modifier' whose value is
an integer that you want to add to the settlement's population
number.<p>
<p>This works basically identically to the <code>update_attribute</code>
route, so considering using that route instead. </p>
<p>For example, this JSON would add two to the settlement's
population number:</p>
<code>{'modifier': 2}</code>
<p><b>POST</b> negative numbers to decrease.</p>
<p><b>Important!</b> Settlement population can never go below zero,
so any 'modifier' values that would cause this simply cause the
total to become zero.</p>\
""",
},
"settlement_replace_game_assets_settlement_id": {
"name": "/settlement/replace_game_assets/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>This route functions nearly identically to the other update-type routes in
this subsection, except for one crucial difference: it works on list-type
attributes of the settlement (whereas the others mostly work on string or
integer type attributes).</p>
<p>This route accepts a list of <code>handles</code> and a <code>type</code>
of game asset and then evalutes the settlement's current handles of that type,
removing and adding as necessary in order to bring the settlement's list in sync
with the incoming list. </p>
<p>Your POST body needs to define the attribute <code>type</code>
you're trying to update, as well as provide a list of handles
that represent the settlement's current asset list:</p>
<pre><code>{
"type": "locations",
"handles": [
"lantern_hoard","bonesmith","organ_grinder"
]
}</code></pre>
<p>Finally, a couple of tips/warnings on this route:<ul>
<li class="plain">The <code>handles</code> list/array is handled by the API as if it were a set, i.e. duplicates are silently ignored.</li>
<li class="plain">If any part of the update fails (i.e. individual add or remove operations), the whole update will fail and <u>no changes to the settlement will be saved</u>.</li>
<li class="plain">This route does not support Location or Innovation levels! (Use <code>set_location_level</code> or <code>set_innovation_level</code> for that.)</li>
</ul></p>
""",
},
"settlement_update_endeavor_tokens_settlement_id": {
"name": "/settlement/update_endeavor_tokens/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>Use this route to change a settlement's endeavor token count.</p>
<p><b>POST</b> some JSON containing the number to modify by:</p>
<code>{"modifier": 2}</code>
<p>The above code would add two to the settlement's current total,
whereas the code below would decrement by one:</p>
<code>{"modifier": -1}</code>
""",
},
"settlement_update_toggle_strain_milestone_settlement_id": {
"name": "/settlement/toggle_strain_milestone/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p><b>DEPRECATED</b>.</p>
<p>This endpoint is deprecated in the June 2021 release of the API.</p>
<p>Use <code>set_strain_milestones</code> instead.</p>
""",
},
#
# bulk survivor management
#
"settlement_update_survivors_settlement_id": {
"name": "/settlement/update_survivors/<settlement_id>",
"subsection": "settlement_manage_survivors",
"desc": """\
<p>Use this route to update a specific group of survivors, e.g.
Departing survivors.</p>
<p><b>POST</b> some JSON including the type of survivors to include,
the attribute to modify, and the modifier:</p>
<code>{include: 'departing', attribute: 'Insanity', modifier: 1}</code>
<p><b>Important!</b> This route currently only supports the
<code>include</code> value 'departing' and will error/fail/400 on
literally anything else.</p>\
""",
},
#
# settlement: manage expansions
#
"settlement_update_add_expansions_settlement_id": {
"name": "/settlement/add_expansions/<settlement_id>",
"subsection": "settlement_manage_expansions",
"desc": """\
<p>Add expansions to a settlement by <b>POST</b>ing a list of expansion handles.
The body of your post should be a JSON-style list:</p>
<code>{'expansions': ['beta_challenge_scenarios','dragon_king']}</code>
<p>
Note that this route not only updates the settlement sheet, but also
adds/removes timeline events, updates the settlement's available game
assets (e.g. items, locations, etc.).
</p>
""",
},
"settlement_update_rm_expansions_settlement_id": {
"name": "/settlement/rm_expansions/<settlement_id>",
"subsection": "settlement_manage_expansions",
"desc": """\
<p>Remove expansions from a settlement by <b>POST</b>ing a list of
expansion handles. The body of your post should be a JSON-style
list:</p>
<code>{'expansions': ['manhunter','gorm','spidicules']}</code>
<p>
Note that this route not only updates the settlement sheet, but also
adds/removes timeline events, updates the settlement's available game
assets (e.g. items, locations, etc.).
</p>
<p><b>Important!</b> We're all adults here, and the KDM API will
<i>not</i> stop you from removing expansion handles for expansions
that are required by your settlement's campaign. If you want to
prevent users from doing this, that's got to be part of your UI/UX
considerations.</p>
""",
},
#
# settlement: manage monsters
#
"settlement_set_current_quarry_settlement_id": {
"name": "/settlement/set_current_quarry/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p>This route sets the settlement's 'current_quarry' attribute,
which is the monster that the settlement's Departing Survivors are
currently hunting.</p><p><b>POST</b> some simple JSON containing a monster
name (do not use handles for this):</p>
<code>{'current_quarry': 'White Lion Lvl 2'}</code>
<p>...or, the monster is unique:</p>
<code>{'current_quarry': 'Watcher'}</code>
<p><b>Important!</b> You're typically going to want to pull monster
names from the settlements' <code>game_assets -> defeated_monsters</code>
list (which is a list of monster names created for the settlement
based on expansion content, etc.)</p>
""",
},
"settlement_add_defeated_monster_settlement_id": {
"name": "/settlement/add_defeated_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> a 'monster' string to this route to add it to the
settlement's list of defeated monsters:</p>
<code>{'monster': 'White Lion (First Story)}</code> or
<code>{'monster': 'Flower Knight Lvl 1'}</code>
<p><b>Important!</b> Watch the strings on this one and try to avoid
free text: if the API cannot parse the monster name and match it to
a known monster type/name, this will fail.</p>
""",
},
"settlement_rm_defeated_monster_settlement_id": {
"name": "/settlement/rm_defeated_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> a 'monster' string to this route to remove it from the
settlement's list of defeated monsters, i.e. the <code>sheet.defeated_monsters</code>
array/list: </p>
<code>{'monster': 'Manhunter Lvl 4'}</code>
<p>Attempts to remove strings that do NOT exist in the list will
not fail (i.e. they will be ignored and fail 'gracefully').</p>
""",
},
"settlement_add_monster_settlement_id": {
"name": "/settlement/add_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<P>Use this route to add quarry or nemesis type monsters to the
settlement. <b>POST</b> some JSON containing the handle of the monster to
add it:</p>
<code>{'handle': 'flower_knight'}</code>
<p>The API will determine whether the monster is a nemesis or a quarry
and add it to the appropriate list. For nemesis monsters, use the
<code>/settlement/update_nemesis_levels</code> route (below) to manage
the checked/completed levels for that nemesis.</p>
<p>Make sure to check the settlement JSON <code>game_assets.monsters</code>
and use the correct handle for the desired monster.</p>
""",
},
"settlement_rm_monster_settlement_id": {
"name": "/settlement/rm_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> some JSON containing a quarry or nemesis type monster handle
to remove it from the settlement's list:</p>
<code>{'handle': 'sunstalker'}</code>
<p>The API will determine whether the monster is a quarry or a nemesis.
When a nemesis monster is removed, its level detail is also removed.</p>
""",
},
"settlement_update_nemesis_levels_settlement_id": {
"name": "/settlement/update_nemesis_levels/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p>Use this method to update the Settlement sheet's <code>nemesis_encounters</code>
dictionary, i.e. to indicate that a nemesis encounter has occurred.</p>
<p>A typical dictionary might look like this:</p>
<code> "nemesis_encounters": {"slenderman": [], "butcher": [1,2]}</code>
<p>In this example, the settlement has (somehow) encountered a
a level 1 Butcher, but has not yet encountered a Slenderman.</p>
<p>To update the dictionary, <b>POST</b> some JSON that includes the
nemesis monster's handle and the levels that are complete.</p>
<p><b>POST</b> this JSON to reset/undo/remove Butcher encounters:<p>
<code>{"handle": "butcher", "levels": []}</code>
<p><b>POST</b> this JSON to record an encounter with a level 1 Manhunter:</p>
<code>{"handle": "manhunter", "levels": [1]}</code>
""",
},
"settlement_add_milestone_settlement_id": {
"name": "/settlement/add_milestone/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> a milestone handle (get it from <code>game_assets</code>
to this route to add it to the settlement's list of milestones:</p>
<code>{handle: 'game_over'}</code>
<p>...or...</p>
<code>{handle: 'innovations_5'}</code>
<p>This endpoint will gracefully fail and politely ignore dupes.</p>
""",
},
"settlement_rm_milestone_settlement_id": {
"name": "/settlement/rm_milestone/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> a milestone handle (get it from <code>game_assets</code> to this
route to remove it from the settlement's list of milestones:</p>
<code>{handle: 'pop_15'}</code>
<p>...or...</p>
<code>{handle: 'first_death'}</code>
<p>This endpoint will gracefully fail and politely ignore attempts to remove
handles that are not present.</p>
""",
},
"settlement_set_principle_settlement_id": {
"name": "/settlement/set_principle/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> some JSON to this route to set or unset a settlement principle.
Request the handle of the <code>principle</code> and the election you want to
make:</p>
<pre><code>
{
principle: 'conviction',
election: 'romantic',
}</code></pre>
<p>This route has a couple of unusual behaviors to note:</p>
<ul>
<li class="plain">It requires both keys (i.e. you will get a 400 back if you
<b>POST</b> any JSON that does not include both).</li>
<li class="plain">It will accept a Boolean for 'election', because this is how
you 'un-set' a principle.</li>
</ul>
<p> To un-set a principle, simply post the principle handle and set the
<code>election</code> key to 'false':</p>
<code>{principle: 'new_life', election: false}</code>
<p> <b>Important!</b> Adding principles to (or removing them from) a
settlement automatically modifies all current survivors, in many
cases. If you've got survivor info up on the screen when you set a principle,
be sure to refresh any survivor info after <b>POST</b>ing JSON to this route!
</p>\
""",
},
#
# location controls
#
"settlement_add_location_settlement_id": {
"name": "/settlement/add_location/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p> <b>POST</b> a location <code>handle</code> to this route to add
it to the settlement's Locations:</p>
<code>{'handle': 'bone_smith'}</code>
""",
},
"settlement_rm_location_settlement_id": {
"name": "/settlement/rm_location/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p>This is basically the reverse of <code>add_location</code>
and works nearly identically. <b>POST</b> a JSON representation of a
Location handle to remove it from the settlement's list:</p>
<code>{'handle': 'barber_surgeon'}</code>
""",
},
"settlement_set_location_level_settlement_id": {
"name": "/settlement/set_location_level/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p>For Locations that have a level (e.g. the People of the
Sun's 'Sacred Pool'), you may set the Location's level by posting
the <code>handle</code> of the location and the desired level:</p>
<code>{'handle': 'sacred_pool', 'level': 2}</code>
""",
},
#
# innovation controls
#
"settlement_get_innovation_deck_settlement_id": {
"name": "/settlement/get_innovation_deck/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>Retrieve the settlement's current innovation deck as an array of asset names
by default.</p>
<p>Alternately, you can <b>POST</b> the parameter
<code>return_type: "dict"</code> to this endpoint to get a hash of innovations
(representing the settlement's Innovation Deck) back from this endpoint.</p>
<p>In the hash, innovation assets are sorted by their name (i.e. <i>not</i>
by their handle):<p>
<pre><code>{
"albedo": {
"handle": "albedo",
"name": "Albedo",
"consequences": [
"citrinitas"
],
"endeavors": [
"gorm_albedo"
],
"expansion": "gorm",
"type_pretty": "Innovations",
"sub_type_pretty": "Expansion",
"type": "innovations",
"sub_type": "expansion",
"innovation_type": "science"
},
"bed": {
"handle": "bed",
"name": "Bed",
"type": "innovations",
"endeavors": [
"bed_rest"
],
"type_pretty": "Innovations",
"sub_type_pretty": "Innovation",
"survival_limit": 1,
"sub_type": "innovation",
"innovation_type": "home"
},
...
"symposium": {
"handle": "symposium",
"name": "Symposium",
"consequences": [
"nightmare_training",
"storytelling"
],
"type": "innovations",
"settlement_buff": "When a survivor innovates, draw an additional 2 Innovation Cards to choose from.",
"type_pretty": "Innovations",
"sub_type_pretty": "Innovation",
"survival_limit": 1,
"sub_type": "innovation",
"innovation_type": "education"
}
}
</code></pre>
""",
},
"settlement_add_innovation_settlement_id": {
"name": "/settlement/add_innovation/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p> <b>POST</b> an Innovation <code>handle</code> to this route to add
it to the settlement's Innovations:</p>
<code>{'handle': 'hovel'}</code>
<p>...or:</p><code>{'handle': 'mastery_club'}</code>
<p><b>Important!</b> As far as the API is concerned, Principles (e.g.
'Graves', 'Survival of the Fittest', etc. <u>are not innovations</u>
and you <u>will</u> break the website if you try to add a principle
as if it were an innovation.</p>
<p>Use <code>set_principle</code> (below) instead.</p>
""",
},
"settlement_rm_innovation_settlement_id": {
"name": "/settlement/rm_innovation/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>This is basically the reverse of <code>add_innovation</code>
and works nearly identically. <b>POST</b> a JSON representation of an
Innovation handle to remove it from the settlement's list:</p>
<code>{'handle': 'mastery_club'}</code>
""",
},
"settlement_set_innovation_level_settlement_id": {
"name": "/settlement/set_innovation_level/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>For Innovations that have a level (e.g. the Slenderman's 'Dark
Water Research'), you may set the Innovation's level by posting
the <code>handle</code> of the innovation and the level:</p>
<code>{'handle': 'dark_water_research', 'level': 2}</code>
""",
},
#
# timeline!
#
"settlement_get_timeline_settlement_id": {
"name": "/settlement/get_timeline/<settlement_id>",
"subsection": "settlement_manage_timeline",
"methods": ['GET'],
"desc": """\
<p>Hit this endpoint to get a JSON representation of the
settlement's timeline.</p>
<p>This is read-only and optimized for performance, so you'll
get a timeline MUCH faster using this route than one of the
routes that pulls down the whole settlement.</p>
""",
},
"settlement_add_lantern_years_settlement_id": {
"name": "/settlement/add_lantern_years/<settlement_id>",
"subsection": "settlement_manage_timeline",
'methods': ['POST','OPTIONS'],
"desc": """\
<p><b>POST</b> a number (int) of years to add to the settlement's
Timeline:</p>
<code>{years: 5}</code>
<p><b>NB:</b> Timelines are capped at 50 LYs. If you try to add
a number of years that would take you above 50 LYs, you'll get a
400 back.</p>
""",
},
"settlement_set_current_lantern_year_settlement_id": {
"name": "/settlement/set_current_lantern_year/<settlement_id>",
"subsection": "settlement_manage_timeline",
'methods': ['POST','OPTIONS'],
"desc": """\
<p>To set the settlement's current LY, <b>POST</b> an int to this
endpoint:</p>
<code>{ly: 3}</code>
""",
},
'settlement_set_lantern_year': {
'name': '/settlement/set_lantern_year/<settlement_id>',
'methods': ['POST','OPTIONS'],
'subsection': 'settlement_manage_timeline',
'desc': """\
<p>Use this endpoint to compeltely overwrite a lantern year.</p>
<p>Requires the <code>ly</code> param, which should be a
whole lantern year represented as JSON, e.g:
<pre><code>
{
'year': 1,
'story_event': [
{'handle': 'core_returning_survivors'}
],
'settlement_event': [
{'handle': 'core_first_day'}
]
}
</pre></code>
</p>
""",
},
"zz_settlement_replace_lantern_year_settlement_id": {
"name": "/settlement/replace_lantern_year/<settlement_id>",
"subsection": "settlement_manage_timeline",
"desc": """\
<p><b>DEPRECATED</b>.</p>
<p>This endpoint was deprecated in the April 2021 release of the API.</p>
<p>It was removed completely in the June 2021 release.</p>
<p>Use <code>set_lantern_year</code> instead.</p>
""",
},
#
# settlement admins
#
"settlement_add_admin_settlement_id": {
"name": "/settlement/add_admin/<settlement_id>",
"subsection": "settlement_admin_permissions",
"methods": ["POST","OPTIONS"],
"desc": """\
<p><b>DEPRECATED</b>.</p>
<p>This endpoint is deprecated in the June 2021 release of the API.</p>
<p>Use <code>add_settlement_admin</code> instead.</p>
""",
},
"settlement_rm_admin_settlement_id": {
"name": "/settlement/rm_admin/<settlement_id>",
"subsection": "settlement_admin_permissions",
"methods": ["POST","OPTIONS"],
"desc": """\
<p><b>DEPRECATED</b>.</p>
<p>This endpoint is deprecated in the June 2021 release of the API.</p>
<p>Use <code>rm_settlement_admin</code> instead.</p>
""",
},
#
# settlement notes
#
"settlement_add_note_settlement_id": {
"name": "/settlement/add_note/<settlement_id>",
"subsection": "settlement_notes_management",
"methods": ["POST","OPTIONS"],
"desc": """\
<p>Since any player in a game is allowed to create settlement
notes, the JSON required by this endpoint must include a user's
OID.</p>
<p>This endpoint supports the following key/value pairs:</p>
<table class="embedded_table">
<tr><th>key</th><th><b>R</b>/O</th><th>value</th></tr>
<tr>
<td class="small_key">author_id</td>
<td class="type"><b>R</b></type>
<td class="value">The creator's OID as a string.</td>
</tr>
<tr>
<td class="small_key">note</td>
<td class="type"><b>R</b></type>
<td class="value">The note as a string. We accept HTML here, so if you want to display this back to your users as HTML, you can do that.</td>
</tr>
<tr>
<td class="small_key">author</td>
<td class="type">O</type>
<td class="value">The creator's login, e.g. <code>demo@kdm-manager.com</code>, as a string. Best practice is to NOT include this, unless you really know what you're doing.</td>
</tr>
<tr>
<td class="small_key">lantern_year</td>
<td class="type">O</type>
<td class="value">The Lantern Year the note was created. Defaults to the current LY if not specified.</td>
</tr>
</table>
<p>For example, to add a new note to a settlement, your <b>POST</b>
body will, at a minimum, look something like this:</p>
<code>
{
author_id: "5a26eb1a4af5ca786d1ed548",
note: "Nobody expects the Spanish Inquisition!"
}
</code>
<p><b>Important!</b> This route returns the OID of the
newly-created note:</p>
<code>{"note_oid": {"$oid": "5a2812d94af5ca03ef7db6c6"}}</code>
<p>...which can then be used to remove the note, if necessary
(see <code>rm_note</code> below).</p>
""",
},
"settlement_rm_note_settlement_id": {
"name": "/settlement/rm_note/<settlement_id>",
"subsection": "settlement_notes_management",
"methods": ["POST","OPTIONS"],
"desc": """\
<p><b>POST</b> the OID of a settlement note to remove it.</p>
<code>{_id: "5a26eb894af5ca786d1ed558"}</code>
<p>As long as you get a 200 back from this one, the note has
been removed. If you get a non-200 status (literally anything other
than a 200), something went wrong. </p>
""",
},
}
survivor_management = {
"survivor_get": {
"name": "/survivor/get/<survivor_id>",
"methods": ["GET", "OPTIONS"],
"desc": (
'<p>Retrieves a JSON representation of the survivor with OID '
'<i><survivor_id></i>.</p>'
'<p>As with other <code>GET</code> type routes, this one returns '
"a lot of info, but what you typically want is in the "
'<code>sheet</code> element:</p>'
"""<pre><code>{
"meta": {...},
"sheet": {
"_id": {
"$oid": "5febfb74d174525f6eca199b"
},
"meta": {...},
"email": "toconnell@thelaborinvain.com",
"born_in_ly": 0,
"created_on": {
"$date": 1609279252167
},
"created_by": {
"$oid": "5fad50306515930c165b006f"
},
"settlement": {
"$oid": "5febfb73d174525f6eca1998"
},
...
}</code></pre>"""
),
},
"survivor_get_lineage": {
"name": "/survivor/get_lineage/<survivor_id>",
"methods": ["GET", "OPTIONS"],
"desc": (
'<p>This endpoint returns non-game-related data about a '
'survivor, including their complete event log, i.e. the log '
'of changes and updates to the survivor.</p>'
'<p>The JSON that comes back from this endpoint <i>does not</i> '
"include the survivor's OID, so be careful with your local scope "
'when iterating through lists of survivors and calling this '
'endpoint.</p>'
),
},
"zz_survivor_get_survival_actions": {
"name": "/survivor/get_survival_actions/<survivor_id>",
"methods": ["GET", "OPTIONS"],
"desc": (
'<p>This endpoint is deprecated.</p>'
'<p>Please use the <code>survival_actions</code> element returned '
'by the <code>/survivor/get/<survivor_id></code> instead.</p>'
),
},
# survivor shset
"survivor_reset_attribute_details": {
"name": "/survivor/reset_attribute_details/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["GET", "POST", "OPTIONS"],
"desc": (
"<p><b>GET</b> or <b>POST</b> to this endpoint to remove all "
"gear and token effects from a survivor's attributes.</p>"
)
},
"survivor_reset_damage": {
"name": "/survivor/reset_damage/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["GET", "POST", "OPTIONS"],
"desc": (
"<p>Hit this endpoint to reset all damage locations.</p>"
)
},
"survivor_set_attribute": {
"name": "/survivor/set_attribute/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p><b>Important!</b> As with the Settlement record, this is "
"pretty much where you want to start with updates/edits for the "
"survivor record <i>before</i> looking into using specialty "
"routes.</p>"
"<p>Basically, try this one first, and if it doesn get the result "
"you want, then look at other endpoints.</p>"
"<p>For this endpoint, you want to <b>POST</b> some JSON that "
"includes both <code>attribute</code> and <code>value</code> keys, "
"with an integer value for <code>value</code>.</p>"
"<p>You can use this one to update pretty much any attribute of "
"the survivor sheet that is an integer, and it is essentially the "
"same as the <code>set_many_attributes</code> route (except for "
"how it only does one attribute at a time).</p>"
),
'examples': [
'{attribute: "survival", value: 3}',
'{attribute: "Head", value: 1}',
'{attribute: "Understanding", value: 2}',
'{attribute: "hunt_xp", value: 6}'
'{attribute: "bleeding_tokens", value: 3}',
],
},
"survivor_set_name": {
"name": "/survivor/set_name/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p><b>POST</b> a 'name' value to this endpoint to change the "
"survivor's name:</p><code>{name: 'Hungry Basalt'}</code>"
)
},
"survivor_set_sex": {
"name": "/survivor/set_sex/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>This endpoint accepts a one-character-long string of either "
"'M' or 'F' and use it to set the survivor's <code>sex</code> "
"attribute. </p>"
"<p>Note that survivors also have an <code>effective_sex</code> "
"attribute that the API changes automatically when certain gear "
"and/or A&Is are added to the survivor.</p>"
"<code>{'sex': 'F'}</code>"
)
},
"survivor_set_survival": {
"name": "/survivor/set_survival/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
'<p>This endpoint is deprecated.</p>'
'<p>Please use the <code>set_attribute</code> route instead.</p>'
"<p><b>POST</b> a 'value' to this endpoint to set the survival "
"number:</p> <code>{value: '1'}</code>"
"<p>PROTIP: the API will ignore negative numbers and default them "
"to zero.</p>"
)
},
"survivor_set_affinities": {
"name": "/survivor/set_affinities/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>This supersedes both <code>set_affinity</code>, and "
"<code>update_affinities</code>, which are both deprecated in "
"API releases higher than 1.50.n.</p>"
"<p>The idea here is to <b>POST</b> some JSON containing one or "
"all of the following keys: <code>red</code>, <code>green</code> "
"or <code>blue</code>, and for each of those keys to have an "
"integer value, e.g.:</p>"
"<code>{red: 1, green: 0, blue:4}</code>"
"<p>For convenience/laziness-sake, the API allows you to zero out "
"an affinity by <i>not</i> setting a value for it.</p>"
"<p><b>POST</b> <code>{red: 1, blue: 3}</code>, for example, to "
"set he survivor's red affinities to 1, their blue affinities to "
"three and their green affinities to zero.</p>"
),
'examples': [
'{red: 3, blue:1}',
'{green: 4, red: 1}',
'{blue: 0}'
],
},
"survivor_set_bleeding_tokens": {
"name": "/survivor/set_bleeding_tokens/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
'<p>This endpoint is deprecated.</p>'
'<p>Please use the <code>set_attribute</code> route instead.</p>'
"<p><b>POST</b> an integer to this endpoint to set the "
"survivor's <code>bleeding_tokens</code> attribute.</p>"
"<p>A couple of things to keep in mind about this attribute:"
"<ol><li>It cannot go below zero: the API will default "
"negative numbers to zero.</li><li>Each survivor has a "
"<code>max_bleeding_tokens</code> value that is determined "
"by their Fighting Arts, A&Is, etc. The API will default any "
"incoming values that are <i>greater</i> than this number back "
"to the <code>max_bleeding_tokens</code> value.</li></ol>"
),
'examples': [
"<code>{value: 4}</code>",
],
},
"survivor_set_status_flag": {
"name": "/survivor/set_status_flag/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>The API allows survivors to be 'flagged' with a status, which "
"is an attribute that always evaluates to Boolean true if it "
"exits, but which can <b>never be False</b>.</p>"
"<p>(This may see odd, but as a design pattern, it has its uses as "
"an unambigious indicator of status: allowing these types of "
"statuses to be false would result in potentially ambigious double "
"negatives, etc.)</p>"
"<p>To set a flag, simply <b>POST</b> it to this end point:</p>"
"<code>{flag: 'cannot_consume'}</code>"
"<p>To un-set a flag, <b>POST</b> the flag and the "
"<code>unset</code> key:</p>"
"<code>{flag: 'cannot_spend_survival', unset: true}</code>."
"<p>Supported flags include:</p><table>"
"<tr><td>cannot_activate_two_handed_weapons</td></tr>"
"<tr><td>cannot_activate_two_plus_str_gear</td></tr>"
"<tr><td>cannot_consume</td></tr>"
"<tr><td>cannot_be_nominated_for_intimacy</td></tr>"
"<tr><td>cannot_gain_bleeding_tokens</td></tr>"
"<tr><td>cannot_spend_survival</td></tr>"
"<tr><td>cannot_use_fighting_arts</td></tr>"
"<tr><td>departing</td></tr>"
"<tr><td>skip_next_hunt</td></tr>"
"</table>"
),
'examples': [
'{retired: true}',
'{retired: false}',
],
},
"survivor_set_retired": {
"name": "/survivor/set_retired/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>Takes a Boolean object as the value for a key called "
"<code>retired</code>. Rejects anything else.</p>"
),
'examples': [
'{retired: true}',
'{retired: false}',
],
},
"survivor_set_sword_oath": {
"name": "/survivor/set_sword_oath/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>Starting with release 1.44.307, which adds support for the "
"<b>Echoes of Death 3</b> expansion and the <i>Sword Oath</i> "
"Fighting Art, the API can also track the sword that a survivor "
"nominates as well as the number of wounds.</p>"
"<p>Just <b>POST</b> any valid gear handle and the number of "
"wounds to this endpoint:</p>"
),
'examples': [
"<code>{sword: 'bone_blade', wounds: 3}</code>",
],
},
"survivor_toggle_sotf_reroll": {
"name": "/survivor/toggle_sotf_reroll/<survivor_id>",
"subsection": "survivor_sheet",
"methods": ["GET", "POST", "OPTIONS"],
"desc": (
"<p>Hit this end point to toggle the survivor record's"
"<code>sotf_reroll</code> attribute.</p>"
"<p>If the record does not have this attribute, accessing this "
"endpoint will create it and set it to <code>true</code>.</p>"
"<p><b>Warning!<b></p> This attribute, since it is only used with "
"certain campaign content, is <b>not</b> part of the survivor data "
"modal and <b>cannot</b> be toggled using the "
"<code>toggle_boolean</code> endpoint!</p>"
),
},
# deprecated
"zz_toggle_status_flag": {
"name": "/survivor/toggle_status_flag/<survivor_id>",
"methods": ["GET", "OPTIONS"],
"subsection": "survivor_sheet",
"desc": (
'<p>This endpoint is deprecated.</p>'
'<p>Please use the <code>set_status_flag</code> route instead.</p>'
),
},
"zz_survivor_set_affinity": {
"name": "/survivor/set_affinity/<survivor_id>",
"methods": ["GET", "OPTIONS"],
"subsection": "survivor_sheet",
"desc": (
'<p>This endpoint is deprecated.</p>'
'<p>Please use the <code>set_affinities</code> route instead.</p>'
),
},
"zz_survivor_update_affinities": {
"name": "/survivor/update_affinities/<survivor_id>",
"methods": ["GET", "OPTIONS"],
"subsection": "survivor_sheet",
"desc": (
'<p>This endpoint is deprecated.</p>'
'<p>Please use the <code>set_affinities</code> route instead.</p>'
),
},
# survivor gear management
"add_cursed_item": {
"name": "/survivor/add_cursed_item/<survivor_id>",
"subsection": "survivor_gear_management",
"methods": ["POST", "OPTIONS"],
"desc": (
'<p><b>POST</b> some JSON that includes a gear handle to add it to '
"the survivor's list of cursed items.</p>"
),
'examples': [
"<code>{handle: 'thunder_maul'}</code>",
],
},
"rm_cursed_item": {
"name": "/survivor/rm_cursed_item/<survivor_id>",
"subsection": "survivor_gear_management",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>The reverse of the previous method. <b>POST</b> a gear handle "
"to remove it.</p>"
),
'examples': [
"<code>{handle: 'blue_lantern'}</code>",
],
},
"set_gear_grid": {
"name": "/survivor/set_gear_grid/<survivor_id>",
"subsection": "survivor_gear_management",
"methods": ["POST", "OPTIONS"],
"desc": (
'<p><b>POST</b> an array named <code>gear_grid</code> that '
'includes between one (1) and nine (9) key/value pairs '
'where the key is a gear grid location and the value is a '
'gear handle:</p>'
"""<pre><code>{'gear_grid': {
'top_left': 'bone_blade', 'top_middle': 'bone_blade', 'top_right': 'bone_blade',
'middle_left': 'bone_blade', 'middle_middle': 'bone_blade', 'middle_right': 'bone_blade',
'bottom_left': 'bone_blade', 'bottom_middle': 'bone_blade', 'bottom_right': 'bone_blade',
}
}</code></pre>"""
'<p>Yes, I am aware that the central location is named '
'"middle_middle": you laugh now, but you will thank me '
'when you are able to programmatically iterate the table '
'in one or two lines of code.</p>'
),
'examples': [
"<code>{top_left: 'bone_blade', 'top_middle': 'rawhide_headband'}</code>",
"<code>{middle_middle: 'rawhide_vest', 'bottom_right': 'brain_mint', bottom_left: 'lantern_greaves'}</code>",
],
},
# notes
"add_note": {
"name": "/survivor/add_note/<survivor_id>",
"subsection": "survivor_notes_management",
"methods": ["POST", "OPTIONS"],
"desc": (
"<b>POST</b> an array named <code>note</code> to this route that "
"includes the following key/value pairs:"
"<table><tr>"
"<th>key</th><th>req/optional</th><th>type</th><th>note</th>"
"</tr>"
"<tr><td>note</td><td>R</td><td>str</td>"
"<td>HTML is OK.</td></tr>"
"<tr><td>pinned</td><td>O</td><td>bool</td><td></td></tr>"
"</table>"
)
},
"update_note": {
"name": "/survivor/update_note/<survivor_id>",
"subsection": "survivor_notes_management",
"methods": ["POST", "OPTIONS"],
"desc": (
"<b>POST</b> a note (in JSON format) back to this endpoint "
"to update it. Note JSON that does not include the <i>_id</i>, "
"attribute will be rejected!"
)
},
"rm_note": {
"name": "/survivor/rm_note/<survivor_id>",
"subsection": "survivor_notes_management",
"methods": ["POST", "OPTIONS"],
"desc": (
"<b>POST</b> a note's OID to this endpoint to remove it: "
"<code>{_id: 5fbe989f6515932455f4c5da}</code>."
)
},
# survivor admin
"survivor_set_color_scheme": {
"name": "/survivor/set_color_scheme/<survivor_id>",
"methods": ["POST", "OPTIONS"],
"subsection": "survivor_admin",
"desc": (
"<p><b>POST</b> a <code>color_scheme</code> handle to this "
"endpoint to set the Survivor Sheet attribute of the same name.</p>"
"<p>There are couple of places where you can get a list of "
"available color scheme handles:</p>"
),
"examples": [
"{color_scheme: 'TK'}"
],
},
"add_favorite": {
"name": "/survivor/add_favorite/<survivor_id>",
"subsection": "survivor_admin",
"methods": ["POST", "OPTIONS"],
"desc": (
'<p>The API takes a bit of an unusual approach to making a '
"survivor a 'favorite' or starred survivor due to the fact that "
"users can remove settlements (i.e. the parent record of a "
"survivor record). Rather than having the User record contain a "
"list of favorite survivors, we actually make a list of users on "
"the survivor who have made the survivor one of their favorites."
"</p><p>To add a user's OID to the survivor's list of users who "
"have starred it, use the <code>users_email</code> key and an "
"OID."
),
'examples': [
"{user_email: 'toconnell@thelaborinvain.com'}"
]
},
"rm_favorite": {
"name": "/survivor/rm_favorite/<survivor_id>",
"subsection": "survivor_admin",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>This is effectively the reverse of the "
"<code>add_favorite</code> endpoint (above):</p>"
),
'examples': [
"{user_email: 'toconnell@thelaborinvain.com'}"
]
},
"survivor_set_email": {
"name": "/survivor/set_email/<survivor_id>",
"methods": ["POST", "OPTIONS"],
"subsection": "survivor_admin",
"desc": (
"<p>Sets the survivor's <code>email</code> attribute, which "
"determines the 'owner' of the survivor, from an access and "
"permissions perspective.</p>"
"<p><b>Important!</b> The API allows the creator of a survivor "
"and it's owner (as determined by the <code>email</code> "
"attribute) to modify it. Anyone else gets a 403.</p>"
),
"examples": [
"{email: 'toconnell@thelaborinvain.com'}"
],
},
"survivor_remove": {
"name": "/survivor/remove/<survivor_id>",
"subsection": "survivor_admin",
"methods": ["GET", "POST", "OPTIONS"],
"desc": (
"<p>Removes the survivor.</p>"
),
},
# survivor relationships
"survivor_set_parent": {
"name": "/survivor/set_parent/<survivor_id>",
"subsection": "survivor_relationships",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p>In order to set a survivor parent, you have to <b>POST</b> the "
"<code>role</code> of the parent, as well as the OID of the "
"parent.</p>"
"<p>Possible <code>role</code> values are <code>father</code> and "
"<code>mother</code>, all lower-case, exactly as they're printed "
"here.</p>."
"<p><b>POST</b>ing any other <code>role</code> values or an "
"invalid OID (of the parent) will get you a 400 back.</p>"
),
'examples': [
"{role: 'father', oid: '60020d77ea3701e3ef793a6f'}",
"{role: 'mother', oid: '51gea3596f57b836f182f691'}"
]
},
"survivor_set_partner": {
"name": "/survivor/set_partner/<survivor_id>",
"subsection": "survivor_relationships",
"methods": ["POST", "OPTIONS"],
"desc": (
"<p><b>POST</b> a key/value pair where the value is the OID of "
"the partner:</p>"
"<code>{partner_id: '89gea3596f57b836f182fabc'}</code>"
"<P>Finally, this end point supports a 'magic' value: if you use "
"the string <code>UNSET</code> (all caps) as the value for the "
"<code>partner_id</code>, this will cause the API To remove the "
"<code>partner_id</code> attribute from the survivor compeltely "
"(i.e. it will no longer be in the JSON of the serialized "
"survivor record.</p>"
),
'examples': [
"{partner_id: '60020d77ea3701e3ef793a6f'}"
]
},
}
| 41.092058 | 184 | 0.623867 |
73bdf346a5f9e02db1efb51fa5124a7b0d9fa0f5 | 5,498 | py | Python | kpiit/providers/piwik.py | lnielsen/kpiit | a48c1c2ff807aa63b04b776d010f7e6da9cfc2e0 | [
"MIT"
] | null | null | null | kpiit/providers/piwik.py | lnielsen/kpiit | a48c1c2ff807aa63b04b776d010f7e6da9cfc2e0 | [
"MIT"
] | null | null | null | kpiit/providers/piwik.py | lnielsen/kpiit | a48c1c2ff807aa63b04b776d010f7e6da9cfc2e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# KPIit is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Piwik provider."""
import subprocess
import cern_sso
import requests
import requests.exceptions
from celery.utils.log import get_task_logger
from kpiit.config import config
from kpiit.providers import BaseProvider
BASE_URL = config['providers']['piwik']['base_url']
URL = config['providers']['piwik']['url']
logger = get_task_logger(__name__)
class Piwik(object):
"""Static base class for accessing the Piwik API."""
name = None
cookie = None
@staticmethod
def has_kerberos_ticket():
return True if subprocess.call(['klist', '-s']) == 0 else False
@classmethod
def krb_ticket(cls, principal, keytab_file):
"""Retrieve the Kerberos ticket for `principal`."""
try:
if not Piwik.has_kerberos_ticket():
ret = subprocess.run(
['kinit', principal, '-k', '-t', keytab_file]
)
ret.check_returncode()
except subprocess.CalledProcessError as cpe:
logger.error('Failed to retrieve Kerberos ticket: %s' % cpe.cmd)
@classmethod
def krb_cookie(cls):
"""Retrieve the Kerberos cookie.
Note: Make sure the user has a valid Kerberos ticket before retrieving
the cookie.
"""
return cern_sso.krb_sign_on(BASE_URL)
@classmethod
def get(cls, url):
"""Make a GET API call to Piwik.
:param str url: API url
"""
if cls.cookie is None:
cls.krb_ticket(
config['providers']['piwik']['principal'],
config['providers']['piwik']['keytab_file']
)
cls.cookie = cls.krb_cookie()
response = requests.get(url, cookies=cls.cookie)
response.raise_for_status()
return response.json()
@classmethod
def build_url(cls, module, method, file_format='json', filter_limit=-1,
**kwargs):
"""Build API URL from the given parameters.
:param str module: API module (e.g. VisitsSummary)
:param str method: API method (e.g. getVisits)
:param str file_format: response file_format, defaults to 'json'
:param int filter_limit: max number of records to get, defaults to -1
:return: generated URL
"""
kwargs['method'] = '{}.{}'.format(module, method)
kwargs['format'] = file_format
if filter_limit != -1:
kwargs['filter_limit'] = filter_limit
query = ['{}={}'.format(key, value)
for key, value in kwargs.items()
if value is not None and value]
return URL.format(query='&'.join(query))
class PiwikAPI(Piwik):
"""Piwik API module."""
NAME = 'API'
@classmethod
def version(cls):
"""Get the Piwik API version."""
data = cls.get(cls.build_url(cls.NAME, 'getPiwikVersion'))
return data['value']
class PiwikVisitsSummary(Piwik):
"""Piwik API VisitsSummary module."""
NAME = 'VisitsSummary'
@classmethod
def visits(cls, site_id, period, date, segment=''):
"""Get number of visits for a site.
:param int site_id: ID of website
:param str period: range of when visits are counted
:param str date: date for when visits are counted
:param segment: -, defaults to ''
:return: number of visits
:rtype: str
"""
url = cls.build_url(cls.NAME, 'getVisits', idSite=site_id,
period=period, date=date, segment=segment)
data = cls.get(url)
return data['value']
@classmethod
def unique_visitors(cls, site_id, period, date, segment=''):
"""Get number of unique visitors for a site.
:param int site_id: ID of website
:param str period: range of when visits are counted
:param str date: date for when visits are counted
:param segment: -, defaults to ''
:return: number of unique visitors
:rtype: str
"""
url = cls.build_url(cls.NAME, 'getUniqueVisitors',
idSite=site_id, period=period, date=date,
segment=segment)
data = cls.get(url)
return data['value']
class PiwikProvider(BaseProvider):
"""Piwik provider."""
def __init__(self, site_id, period='day', date='yesterday'):
"""Initiate the Piwik provider."""
self.site_id = site_id
self.period = period
self.date = date
def collect(self):
"""Collect support stats from Service Now."""
if not self.site_id:
return {
'visits': None,
'visits_unique': None
}
try:
visits = PiwikVisitsSummary.visits(
self.site_id, self.period, self.date)
except ValueError:
logger.exception('Failed to collect number of visits')
visits = None
try:
unique_visits = PiwikVisitsSummary.unique_visitors(
self.site_id, self.period, self.date)
except ValueError:
logger.exception('Failed to collect number of unique visitors')
unique_visits = None
return {
'visits': visits,
'visits_unique': unique_visits
}
| 30.544444 | 78 | 0.58785 |
73be003273eecb74350a8c02395b6dfc3fe5bfbf | 6,039 | py | Python | python/ngraph/runtime.py | GBuella/ngraph | ec935c0bafc5ba8c6940d9c9dcf45ddeac487513 | [
"Apache-2.0"
] | null | null | null | python/ngraph/runtime.py | GBuella/ngraph | ec935c0bafc5ba8c6940d9c9dcf45ddeac487513 | [
"Apache-2.0"
] | null | null | null | python/ngraph/runtime.py | GBuella/ngraph | ec935c0bafc5ba8c6940d9c9dcf45ddeac487513 | [
"Apache-2.0"
] | null | null | null | # ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""Provide a layer of abstraction for the ngraph++ runtime environment."""
import logging
from typing import List, Union
import numpy as np
from ngraph.impl import Function, Node, Shape, serialize, util
from ngraph.impl.runtime import Backend, Executable, Tensor
from ngraph.utils.types import get_dtype, NumericData
from ngraph.exceptions import UserInputError
log = logging.getLogger(__name__)
def runtime(backend_name='CPU'): # type: (str) -> 'Runtime'
"""Create a Runtime object (helper factory).
Use signature to parameterize runtime as needed.
"""
return Runtime(backend_name)
class Runtime:
"""Represents the ngraph++ runtime environment."""
def __init__(self, backend_name): # type: (str) -> None
self.backend_name = backend_name
self.backend = Backend.create(backend_name)
def __repr__(self): # type: () -> str
return "<Runtime: Backend='{}'>".format(self.backend_name)
def computation(self, node_or_function, *inputs):
# type: (Union[Node, Function], *Node) -> 'Computation'
"""Return a callable Computation object."""
if isinstance(node_or_function, Node):
ng_function = Function(node_or_function, inputs, node_or_function.name)
return Computation(self, ng_function)
elif isinstance(node_or_function, Function):
return Computation(self, node_or_function)
else:
raise TypeError('Runtime.computation must be called with an nGraph Function object '
'or an nGraph node object an optionally Parameter node objects. '
'Called with: %s', node_or_function)
class Computation(object):
"""ngraph callable computation object."""
def __init__(self, runtime, ng_function):
# type: (Runtime, Function) -> None
self.runtime = runtime
self.function = ng_function
self.parameters = ng_function.get_parameters()
self.results = ng_function.get_results()
self.handle = self.runtime.backend.compile(self.function)
self.tensor_views = [] # type: List[Tensor]
for parameter in self.parameters:
shape = parameter.get_shape()
element_type = parameter.get_element_type()
self.tensor_views.append(runtime.backend.create_tensor(element_type, shape))
self.result_views = [] # type: List[Tensor]
for result in self.results:
shape = result.get_shape()
element_type = result.get_element_type()
self.result_views.append(runtime.backend.create_tensor(element_type, shape))
def __repr__(self): # type: () -> str
params_string = ', '.join([param.name for param in self.parameters])
return '<Computation: {}({})>'.format(self.function.get_name(), params_string)
def __call__(self, *input_values): # type: (*NumericData) -> List[NumericData]
"""Run computation on input values and return result."""
for tensor_view, value in zip(self.tensor_views, input_values):
if not isinstance(value, np.ndarray):
value = np.array(value)
Computation._write_ndarray_to_tensor_view(value, tensor_view)
self.handle.call(self.result_views, self.tensor_views)
results = []
for result_view in self.result_views:
result = np.ndarray(result_view.shape, dtype=get_dtype(result_view.element_type))
Computation._read_tensor_view_to_ndarray(result_view, result)
results.append(result)
return results
def serialize(self, indent=0): # type: (int) -> str
"""Serialize function (compute graph) to a JSON string.
:param indent: set indent of serialized output
:return: serialized model
"""
return serialize(self.function, indent)
@staticmethod
def _get_buffer_size(element_type, element_count): # type: (Tensor, int) -> int
return int((element_type.bitwidth / 8.0) * element_count)
@staticmethod
def _write_ndarray_to_tensor_view(value, tensor_view):
# type: (np.ndarray, Tensor) -> None
tensor_view_dtype = get_dtype(tensor_view.element_type)
if list(tensor_view.shape) != list(value.shape) and len(value.shape) > 0:
raise UserInputError("Provided tensor's shape: %s does not match the expected: %s.",
list(value.shape), list(tensor_view.shape))
if value.dtype != tensor_view_dtype:
log.warning(
'Attempting to write a %s value to a %s tensor. Will attempt type conversion.',
value.dtype,
tensor_view.element_type)
value = value.astype(tensor_view_dtype)
buffer_size = Computation._get_buffer_size(
tensor_view.element_type, tensor_view.element_count)
nparray = np.ascontiguousarray(value)
tensor_view.write(util.numpy_to_c(nparray), buffer_size)
@staticmethod
def _read_tensor_view_to_ndarray(tensor_view, output):
# type: (Tensor, np.ndarray) -> None
buffer_size = Computation._get_buffer_size(
tensor_view.element_type, tensor_view.element_count)
tensor_view.read(util.numpy_to_c(output), buffer_size)
| 41.9375 | 96 | 0.652426 |
73be03d7b9d17a327b44e10e9de91f9bf71f13ba | 7,627 | py | Python | handcam/scratch/dirty-view-oni.py | luketaverne/handcam | e294ebf2be8b5512c8607d3c8ba3f6946f3b8e30 | [
"MIT"
] | 1 | 2022-02-10T13:19:20.000Z | 2022-02-10T13:19:20.000Z | handcam/scratch/dirty-view-oni.py | luketaverne/handcam | e294ebf2be8b5512c8607d3c8ba3f6946f3b8e30 | [
"MIT"
] | null | null | null | handcam/scratch/dirty-view-oni.py | luketaverne/handcam | e294ebf2be8b5512c8607d3c8ba3f6946f3b8e30 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from primesense import openni2
from primesense import _openni2 as c_api
import matplotlib
from handcam.ltt.datasets.handcam.OrbbecCamParams import OrbbecCamParams
matplotlib.use("TkAgg")
import scipy.misc
import matplotlib.pyplot as plt # noqa: ignore=E402
# Refernce for rgb stream:
# https://3dclub.orbbec3d.com/t/astra-pro-rgb-stream-using-openni/1015
#####
#
# Config options
#
#####
mode = "table"
w = 640
h = 480
# Camera Params
fx_d = 578.938
fy_d = 578.938
cx_d = 318.496
cy_d = 251.533
k1_d = -0.094342
k2_d = 0.290512
p1_d = -0.299526
p2_d = -0.000318
k3_d = -0.000279
cam_matrix_d = np.array([[fx_d, 0, cx_d], [0, fy_d, cy_d], [0, 0, 1]])
dist_d = np.array([k1_d, k2_d, p1_d, p2_d, k3_d])
newcameramtx_d, roi_d = cv2.getOptimalNewCameraMatrix(
cam_matrix_d, dist_d, (w, h), 1, (w, h)
)
fx_rgb = 517.138
fy_rgb = 517.138
cx_rgb = 319.184
cy_rgb = 229.077
k1_rgb = 0.044356
k2_rgb = -0.174023
p1_rgb = 0.077324
p2_rgb = 0.001794
k3_rgb = -0.003853
cam_matrix_rgb = np.array([[fx_rgb, 0, cx_rgb], [0, fy_rgb, cy_rgb], [0, 0, 1]])
dist_rgb = np.array([k1_rgb, k2_rgb, p1_rgb, p2_rgb, k3_rgb])
newcameramtx_rgb, roi_rgb = cv2.getOptimalNewCameraMatrix(
cam_matrix_rgb, dist_rgb, (w, h), 1, (w, h)
)
RR = np.array([[1, -0.003, 0.002], [0.003, 1, 0.005], [-0.002, -0.005, 1]])
RR2d = np.array([[1, -0.003], [0.003, 1]])
# RR = np.array([
# [-0.003, 1 , 0.002],
# [1, 0.003,0.005],
# [-0.005, -0.002,1]
# ])
TT = np.array([-25.097, 0.288, -1.118])
TT2d = np.array([-25.097, 0.288])
RandT2d = np.zeros((2, 3))
RandT2d[0:2, 0:2] = RR2d
RandT2d[0:2, 2] = TT2d.transpose()
homog = np.zeros((4, 4))
homog[0:3, 0:3] = RR
homog[0:3, 3] = TT.transpose()
homog[3, 3] = 1
homog2 = np.zeros((3, 3))
homog2 = np.matmul(cam_matrix_d, RR)
# homog2[0:2,0:2] = np.array([[1,0],[0,1]])
# homog2[0:2,2] = TT[0:2].transpose()
# homog2[2,2] = 1
# R_d,R_rgb,P_d,P_rgb,_,_,_ = cv2.stereoRectify(cam_matrix_d,dist_d,cam_matrix_rgb,dist_rgb,(w,h),RR,TT)
# map_d1,map_d2=cv2.initUndistortRectifyMap(cam_matrix_d,dist_d,R_d,P_d,(w,h),cv2.CV_16SC2)
# map_rgb1,map_rgb2=cv2.initUndistortRectifyMap(cam_matrix_rgb,dist_rgb,R_rgb,P_rgb,(w,h),cv2.CV_16SC2)
R_d, R_rgb, P_d, P_rgb, _, _, _ = cv2.stereoRectify(
cam_matrix_d,
dist_d,
cam_matrix_rgb,
dist_rgb,
(w, h),
RR,
TT,
None,
None,
None,
None,
None,
cv2.CALIB_ZERO_DISPARITY,
)
print(P_d)
print(P_rgb)
map_d1, map_d2 = cv2.initUndistortRectifyMap(
cam_matrix_d, dist_d, R_d, P_d, (w, h), cv2.CV_32FC1
)
map_rgb1, map_rgb2 = cv2.initUndistortRectifyMap(
cam_matrix_rgb, dist_rgb, R_rgb, P_rgb, (w, h), cv2.CV_32FC1
)
###
#
# Trying with homography
#
###
#####
#
# Setup
#
#####
openni2.initialize("/local/home/luke/programming/OpenNI-Linux-x64-2.3/Redist")
dev = openni2.Device.open_file(
"/local/home/luke/datasets/handcam/150287-1-grasp_6/video.oni"
)
# Diagnostics: make sure we have some valid device
print(dev.get_device_info())
print(dev.has_sensor(c_api.OniSensorType.ONI_SENSOR_DEPTH))
print(dev.has_sensor(c_api.OniSensorType.ONI_SENSOR_COLOR))
dev.set_property(101, 1)
dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
dev.set_depth_color_sync_enabled(True)
dev.playback.set_speed(-1)
# Depth setup
depth_stream = dev.create_depth_stream()
depth_stream.start()
# depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480, fps=30))
rgb_stream = dev.create_color_stream()
rgb_stream.start()
print(rgb_stream.get_video_mode())
print(rgb_stream.get_number_of_frames())
print(depth_stream.get_video_mode())
print(depth_stream.get_number_of_frames())
num_frames = rgb_stream.get_number_of_frames()
# rgb_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_YUYV, resolutionX=640, resolutionY=480, fps=30))
valid_modes = ["arm", "table"]
#####
#
# Sanity Checks
#
#####
assert mode in valid_modes
plt.figure()
plt.show(block=False)
i = 1
max_val = 65553
def rescale_depth(img):
img = 255.0 * (img / 65535.0)
# img = (img - 100) * (255.0/ (255.0 - 100.0))
return img
def process_rgb_frame(frame):
frame_data = frame.get_buffer_as_uint8()
img = np.frombuffer(frame_data, dtype=np.uint8)
# img = rescale_depth(img)
# img = img.astype(np.uint8)
img.shape = (480, 640, 3)
# img = np.concatenate((img, img, img), axis=0)
# img = np.swapaxes(img, 0, 2)
# img = np.swapaxes(img, 0, 1)
# img = np.squeeze(img)
# img = cv2.applyColorMap(img, cv2.COLORMAP_HOT)
# print color_img.shape
# if mode == "table":
# img = cv2.flip(img, 0)
#
# img = cv2.undistort(img, cam_matrix_rgb, dist_rgb, None)
return img
def process_depth_frame(frame):
frame_data = frame.get_buffer_as_uint16()
img = np.frombuffer(frame_data, dtype=np.uint16)
# img = rescale_depth(img)
img = cv2.convertScaleAbs(img, alpha=(255.0 / 65535.0))
# img = 255 - img
img.shape = (1, 480, 640)
img = np.concatenate((img, img, img), axis=0)
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
img = img[:, :, 0]
# img = np.squeeze(img)
# img = cv2.normalize(img,dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)
img = cv2.applyColorMap(img, cv2.COLORMAP_HOT)
# print color_img.shape
# if mode == "table":
# img = cv2.flip(img, 0)
#
# img = cv2.undistort(img, cam_matrix_d, dist_d, None)
return img
def create_depth_overlay(depth_img, rgb_img):
# b_channel, g_channel, r_channel = cv2.split(img)
#
# alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 50 # creating a dummy alpha channel image.
#
# img = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
# rgb_img = cv2.undistort(rgb_img, cam_matrix_rgb, dist_rgb, None)
# depth_img = cv2.undistort(depth_img, cam_matrix_d, dist_d, None)
#
# depth_img = cv2.warpPerspective(depth_img, homog2, (w, h))
# depth_img = depth_img
# depth_img = cv2.remap(depth_img,map_d1,map_d2,cv2.INTER_LINEAR)
# rgb_img = cv2.remap(rgb_img, map_rgb1, map_rgb2, cv2.INTER_LINEAR)
# depth_img = cv2.warpAffine(depth_img,RandT2d,(w,h))
# depth_img = np.multiply(depth_img, RR)
img = rgb_img.copy()
# depth_img = cv2.warpPerspective(depth_img,RR,(w,h))
alpha = 0.8
cv2.addWeighted(depth_img, alpha, img, 1 - alpha, 0, img)
return img
dev.playback.set_speed(-1)
# first = False
# while True:
# if not first:
# first = True
# dev.playback.seek(rgb_stream, i)
#
# depth_frame = depth_stream.read_frame()
# depth_img = process_depth_frame(depth_frame)
#
# plt.hist(depth_img[:,:,0].ravel(), bins=100,bottom=1)
# plt.show()
while i < num_frames:
dev.playback.seek(rgb_stream, i)
rgb_frame = rgb_stream.read_frame()
rgb_img = process_rgb_frame(rgb_frame)
depth_frame = depth_stream.read_frame()
depth_img = process_depth_frame(depth_frame)
# scipy.misc.imsave('rgb' + str(i), rgb_img, format="png")
# scipy.misc.imsave('depth' + str(i), depth_img, format="png")
# ret, rgb_frame = cap.read()
# print ret
# cv2.imshow("rgb", cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR))
# cv2.imshow("depth", depth_img)
cv2.imshow("overlay", create_depth_overlay(depth_img, rgb_img))
# cv2.imshow("depth", depth_img)
cv2.waitKey(34)
i += 1
# print("Max val found: ", max_val)
openni2.unload()
| 25.854237 | 155 | 0.671562 |
73be0e180a9f3a61f8b6abe32d367650b953ef5e | 2,404 | py | Python | dvae/utils/eval_metric.py | sleglaive/DVAE-speech | df3ab438d169a8f852b0450eebf0a9f3229f891f | [
"MIT"
] | 41 | 2020-11-30T22:20:28.000Z | 2021-10-19T06:33:36.000Z | dvae/utils/eval_metric.py | sleglaive/DVAE-speech | df3ab438d169a8f852b0450eebf0a9f3229f891f | [
"MIT"
] | 2 | 2021-01-23T18:47:45.000Z | 2021-07-08T11:35:34.000Z | dvae/utils/eval_metric.py | sleglaive/DVAE-speech | df3ab438d169a8f852b0450eebf0a9f3229f891f | [
"MIT"
] | 9 | 2021-01-23T18:15:38.000Z | 2021-09-30T03:37:34.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Software dvae-speech
Copyright Inria
Year 2020
Contact : xiaoyu.bie@inria.fr
License agreement in LICENSE.txt
In this files, I write some equatinos to evaluate speech qualites after re-synthesis
"""
import numpy as np
import soundfile as sf
import soundfile as sf
from pypesq import pesq
from pystoi import stoi
def compute_median(data):
median = np.median(data, axis=0)
q75, q25 = np.quantile(data, [.75 ,.25], axis=0)
iqr = q75 - q25
CI = 1.57*iqr/np.sqrt(data.shape[0])
if np.any(np.isnan(data)):
raise NameError('nan in data')
return median, CI
def compute_rmse():
def get_result(file_est, file_ref):
x_est, _ = sf.read(file_est)
x_ref, _ = sf.read(file_ref)
# align
len_x = len(x_est)
x_ref = x_ref[:len_x]
# scaling
alpha = np.sum(x_est*x_ref) / np.sum(x_est**2)
# x_est_ = np.expand_dims(x_est, axis=1)
# alpha = np.linalg.lstsq(x_est_, x_ref, rcond=None)[0][0]
x_est_scaled = alpha * x_est
return np.sqrt(np.square(x_est_scaled - x_ref).mean())
return get_result
class EvalMetrics():
def __init__(self, metric='all'):
self.metric = metric
def eval(self, audio_est, audio_ref):
x_est, fs_est = sf.read(audio_est)
x_ref, fs_ref = sf.read(audio_ref)
# align
len_x = np.min([len(x_est), len(x_ref)])
x_est = x_est[:len_x]
x_ref = x_ref[:len_x]
# x_ref = x_ref / np.max(np.abs(x_ref))
if fs_est != fs_ref:
raise ValueError('Sampling rate is different amon estimated audio and reference audio')
if self.metric == 'rmse':
return compute_rmse(x_est, x_ref)
elif self.metric == 'pesq':
return pesq(x_ref, x_est, fs_est)
elif self.metric == 'stoi':
return stoi(x_ref, x_est, fs_est, extended=False)
elif self.metric == 'estoi':
return stoi(x_ref, x_est, fs_est, extended=True)
elif self.metric == 'all':
score_rmse = compute_rmse(x_est, x_ref)
score_pesq = pesq(x_ref, x_est, fs_est)
score_stoi = stoi(x_ref, x_est, fs_est, extended=False)
return score_rmse, score_pesq, score_stoi
else:
raise ValueError('Evaluation only support: rmse, pesq, (e)stoi, all')
| 29.317073 | 99 | 0.611481 |
73be1397473fec6a82e56efb99359c8c0bbbd58b | 1,106 | py | Python | master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/vcs/git/gittyup/tests/commit.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/vcs/git/gittyup/tests/commit.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/vcs/git/gittyup/tests/commit.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | from __future__ import absolute_import
from __future__ import print_function
#
# test/stage.py
#
import os
from shutil import rmtree
from sys import argv
from optparse import OptionParser
from gittyup.client import GittyupClient
from gittyup.objects import *
from util import touch, change
parser = OptionParser()
parser.add_option("-c", "--cleanup", action="store_true", default=False)
(options, args) = parser.parse_args(argv)
DIR = "commit"
if options.cleanup:
rmtree(DIR, ignore_errors=True)
print("commit.py clean")
else:
if os.path.isdir(DIR):
raise SystemExit("This test script has already been run. Please call this script with --cleanup to start again")
os.mkdir(DIR)
g = GittyupClient()
g.initialize_repository(DIR)
touch(DIR + "/test1.txt")
touch(DIR + "/test2.txt")
g.stage([DIR+"/test1.txt", DIR+"/test2.txt"])
g.commit("First commit", commit_all=True)
change(DIR + "/test1.txt")
g.stage([DIR+"/test1.txt"])
g.commit("Second commit", author="Alex Plumb <alexplumb@gmail.com>")
print("commit.py pass")
| 24.577778 | 121 | 0.690778 |
73be1a6b1ae2b9a982565b281616990f0da42cfe | 369 | py | Python | ophiuchus/tests/test_core.py | adrn/ophiuchus | fe7e937bf421d506ec252165f044d514f571667b | [
"MIT"
] | 1 | 2015-09-25T10:12:52.000Z | 2015-09-25T10:12:52.000Z | ophiuchus/tests/test_core.py | adrn/ophiuchus | fe7e937bf421d506ec252165f044d514f571667b | [
"MIT"
] | null | null | null | ophiuchus/tests/test_core.py | adrn/ophiuchus | fe7e937bf421d506ec252165f044d514f571667b | [
"MIT"
] | null | null | null | # coding: utf-8
""" Test ... """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
import sys
import logging
# Third-party
from astropy import log as logger
import matplotlib.pyplot as pl
import numpy as np
# Project
from ..potential import WangZhaoBarPotential
def test_derp():
pass
| 15.375 | 47 | 0.747967 |
73be23166a3d00f6703b0179427bc1e6fb622532 | 8,817 | py | Python | tests/test_vrf_attr.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | tests/test_vrf_attr.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | tests/test_vrf_attr.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | import pytest
from test_vrf import (
g_vars,
setup_vrf,
host_facts,
cfg_facts,
gen_vrf_neigh_file,
partial_ptf_runner
)
from tests.ptf_runner import ptf_runner
pytestmark = [
pytest.mark.topology('any')
]
# tests
class TestVrfAttrSrcMac():
new_vrf1_router_mac = '00:12:34:56:78:9a'
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_attr_src_mac(self, duthost, ptfhost, host_facts):
# -------- Setup ----------
extra_vars = { 'router_mac': self.new_vrf1_router_mac }
duthost.options['variable_manager'].extra_vars.update(extra_vars)
duthost.template(src="vrf/vrf_attr_src_mac.j2", dest="/tmp/vrf_attr_src_mac.json")
duthost.shell("config load -y /tmp/vrf_attr_src_mac.json")
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
# -------- Testing ----------
yield
# -------- Teardown ----------
extra_vars = { 'router_mac': host_facts['ansible_Ethernet0']['macaddress'] }
duthost.host.options['variable_manager'].extra_vars.update(extra_vars)
duthost.template(src="vrf/vrf_attr_src_mac.j2", dest="/tmp/vrf_attr_src_mac.json")
duthost.shell("config load -y /tmp/vrf_attr_src_mac.json")
def test_vrf_src_mac_cfg(self, duthost):
# get vrf1 new router_mac from config_db
vrf1_mac = duthost.shell("redis-cli -n 4 hget 'VRF|Vrf1' 'src_mac'")['stdout']
assert vrf1_mac == self.new_vrf1_router_mac
def test_vrf1_neigh_with_default_router_mac(self, partial_ptf_runner):
# send packets with default router_mac
partial_ptf_runner(
testname='vrf_test.FwdTest',
pkt_action='drop',
fwd_info='/tmp/vrf1_neigh.txt',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf1_neigh_with_new_router_mac(self, ptfhost, host_facts, testbed):
# send packets with new router_mac
ptf_runner(ptfhost,
"ptftests",
"vrf_test.FwdTest",
platform_dir='ptftests',
params={'testbed_type': testbed['topo']['name'],
'router_mac': self.new_vrf1_router_mac,
'fwd_info': "/tmp/vrf1_neigh.txt",
'src_ports': g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']},
log_file="/tmp/vrf_attr_src_mac_test.FwdTest2.log")
def test_vrf2_neigh_with_default_router_mac(self, partial_ptf_runner):
# verify router_mac of Vrf2 keep to be default router_mac
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf2_neigh.txt',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
class TestVrfAttrTTL():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_attr_ttl(self, duthost, ptfhost):
# -------- Setup ----------
duthost.copy(src="vrf/vrf_attr_ttl_action.json", dest="/tmp")
duthost.copy(src="vrf/vrf_restore.json", dest="/tmp")
duthost.shell("config load -y /tmp/vrf_attr_ttl_action.json")
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
# -------- Testing ----------
yield
# -------- Teardown ----------
duthost.shell("config load -y /tmp/vrf_restore.json")
def test_vrf1_drop_pkts_with_ttl_1(self, partial_ptf_runner):
# verify packets in Vrf1 with ttl=1 should be drop
partial_ptf_runner(
testname='vrf_test.FwdTest',
pkt_action='drop',
fwd_info='/tmp/vrf1_neigh.txt',
ttl=1,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf1_fwd_pkts_with_ttl_2(self, partial_ptf_runner):
# verify packets in Vrf1 with ttl=2 should be forward
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf1_neigh.txt',
ttl=2,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf2_fwd_pkts_with_ttl_1(self, partial_ptf_runner):
# verify packets in Vrf2 with ttl=1 should be forward
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf2_neigh.txt',
ttl=1,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
class TestVrfAttrIpAction():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_attr_ip_opt_action(self, duthost, ptfhost):
# -------- Setup ----------
duthost.copy(src="vrf/vrf_attr_ip_opt_action.json", dest="/tmp")
duthost.copy(src="vrf/vrf_restore.json", dest="/tmp")
duthost.shell("config load -y /tmp/vrf_attr_ip_opt_action.json")
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
# -------- Testing ----------
yield
# -------- Teardown ----------
duthost.shell("config load -y /tmp/vrf_restore.json")
def test_vrf1_drop_pkts_with_ip_opt(self, partial_ptf_runner):
# verify packets in Vrf1 with ip_option should be drop
partial_ptf_runner(
testname='vrf_test.FwdTest',
pkt_action='drop',
fwd_info='/tmp/vrf1_neigh.txt',
ip_option=True,
ipv4=True,
ipv6=False,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf1_fwd_pkts_without_ip_opt(self, partial_ptf_runner):
# verify packets in Vrf1 without ip_option should be forward
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf1_neigh.txt',
ip_option=False,
ipv4=True,
ipv6=False,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf2_fwd_pkts_with_ip_opt(self, partial_ptf_runner):
# verify packets in Vrf2 with ip_option should be forward
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf2_neigh.txt',
ip_option=True,
ipv4=True,
ipv6=False,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
class TestVrfAttrIpState():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_attr_ip_state(self, duthost, ptfhost):
# -------- Setup ----------
duthost.copy(src="vrf/vrf_attr_ip_state.json", dest="/tmp")
duthost.copy(src="vrf/vrf_restore.json", dest="/tmp")
duthost.shell("config load -y /tmp/vrf_attr_ip_state.json")
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
# -------- Testing ----------
yield
# -------- Teardown ----------
duthost.shell("config load -y /tmp/vrf_restore.json")
def test_vrf1_drop_v4(self, partial_ptf_runner):
# verify ipv4 L3 traffic is dropped in vrf1
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf1_neigh.txt',
pkt_action='drop',
ipv4=True,
ipv6=False,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf1_forward_v6(self, partial_ptf_runner):
# verify ipv6 L3 traffic is forwarded in vrf1
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf1_neigh.txt',
ipv4=False,
ipv6=True,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf2_forward_v4(self, partial_ptf_runner):
# verify ipv4 L3 traffic is forwarded in vrf2
partial_ptf_runner(
testname='vrf_test.FwdTest',
fwd_info='/tmp/vrf2_neigh.txt',
ipv4=True,
ipv6=False,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
def test_vrf2_drop_v6(self, partial_ptf_runner):
# verify ipv6 L3 traffic is dropped in vrf2
partial_ptf_runner(
testname='vrf_test.FwdTest',
pkt_action='drop',
fwd_info='/tmp/vrf2_neigh.txt',
ipv4=False,
ipv6=True,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
| 36.585062 | 97 | 0.615515 |
73be31ef87b41cc4384ab010c54c8b771725ec85 | 353 | py | Python | Sec10_Math/q0470.py | OctoberChang/LeetCode-Solutions | bb7958194e7b196729611cbad19ee792ba41c429 | [
"MIT"
] | 2 | 2021-01-26T00:59:47.000Z | 2021-11-20T02:55:13.000Z | Sec10_Math/q0470.py | OctoberChang/LeetCode-Solutions | bb7958194e7b196729611cbad19ee792ba41c429 | [
"MIT"
] | null | null | null | Sec10_Math/q0470.py | OctoberChang/LeetCode-Solutions | bb7958194e7b196729611cbad19ee792ba41c429 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# The rand7() API is already defined for you.
# def rand7():
# @return a random integer in the range 1 to 7
class Solution:
def rand10(self):
"""
:rtype: int
"""
while True:
x = (rand7()-1)*7 + (rand7()-1)
if x < 40:
return x%10 + 1
| 18.578947 | 46 | 0.492918 |
73be6c50f61d79c38fdc27e76a4283abd02d6bab | 40 | py | Python | carla/recourse_methods/catalog/clue/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 140 | 2021-08-03T21:53:32.000Z | 2022-03-20T08:52:02.000Z | carla/recourse_methods/catalog/clue/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 54 | 2021-03-07T18:22:16.000Z | 2021-08-03T12:06:31.000Z | carla/recourse_methods/catalog/clue/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 16 | 2021-08-23T12:14:58.000Z | 2022-03-01T00:52:58.000Z | # flake8: noqa
from .model import Clue
| 10 | 23 | 0.725 |
73be82fd4dcb6504fca544db2f17004fd2e5a7f6 | 1,835 | py | Python | google-cloud-sdk/lib/surface/logging/resource_descriptors/list.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/lib/surface/logging/resource_descriptors/list.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/lib/surface/logging/resource_descriptors/list.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging resource-descriptors list' command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import base
class List(base.ListCommand):
"""Lists all available resource descriptors."""
@staticmethod
def Args(parser):
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Collection(self):
return 'logging.resourceDescriptors'
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The list of log entries.
"""
return list_pager.YieldFromList(
util.GetClient().monitoredResourceDescriptors,
util.GetMessages().LoggingMonitoredResourceDescriptorsListRequest(),
field='resourceDescriptors', limit=args.limit,
batch_size=args.limit, batch_size_attribute='pageSize')
List.detailed_help = {
'DESCRIPTION': """\
Lists all available resource descriptors that are used by Stackdriver
Logging. Each log entry must be associated with a valid resource
descriptor.
""",
}
| 31.637931 | 79 | 0.733515 |
73be8626daf9dd6692582ed6e174ad5c8c0e73dc | 1,510 | py | Python | pysamba/rpc/dcerpc.py | vnation/wmi-1.3.14 | 170c5af4501087ebf35833386ca1345fafed723b | [
"MIT"
] | null | null | null | pysamba/rpc/dcerpc.py | vnation/wmi-1.3.14 | 170c5af4501087ebf35833386ca1345fafed723b | [
"MIT"
] | null | null | null | pysamba/rpc/dcerpc.py | vnation/wmi-1.3.14 | 170c5af4501087ebf35833386ca1345fafed723b | [
"MIT"
] | null | null | null | ###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008-2010, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
__doc__ = "Define common structures used to perform DCE-RPC calls"
from pysamba.library import *
class GUID(Structure):
_fields_ = [
('time_low', uint32_t),
('time_mid', uint16_t),
('time_hi_and_version', uint16_t),
('clock_seq', uint8_t*2),
('node', uint8_t*6),
]
class policy_handle(Structure):
_fields_ = [
('handle_type', uint32_t),
('uuid', GUID),
]
class dcerpc_syntax_id(Structure):
_fields_ = [
('uuid', GUID),
('if_version', uint32_t),
]
class dcerpc_pipe(Structure):
_fields_ = [
('context_id', uint32_t),
('syntax', dcerpc_syntax_id),
('transfer_syntax', dcerpc_syntax_id),
('conn', c_void_p), # lie: struct dcerpc_connection *
('binding', c_void_p), # lie: struct dcerpc_binding *
('last_fault_code', uint32_t),
('request_timeout', uint32_t),
]
| 29.607843 | 75 | 0.566225 |
73be967ba364aab6438359a78d6b5826f540b80b | 1,858 | py | Python | peartree/convert.py | yiyange/peartree | eb4fdab7a8484a621a0ee78d7723858b5ca2ac27 | [
"MIT"
] | 136 | 2017-11-21T22:45:12.000Z | 2022-03-25T14:10:26.000Z | peartree/convert.py | yiyange/peartree | eb4fdab7a8484a621a0ee78d7723858b5ca2ac27 | [
"MIT"
] | 156 | 2017-11-30T21:15:47.000Z | 2021-10-31T17:22:55.000Z | peartree/convert.py | yiyange/peartree | eb4fdab7a8484a621a0ee78d7723858b5ca2ac27 | [
"MIT"
] | 14 | 2018-01-01T19:03:35.000Z | 2021-08-12T17:48:51.000Z | import collections
import networkx as nx
def convert_to_digraph(G_orig: nx.MultiDiGraph) -> nx.DiGraph:
# Prevent upstream impacts
G = G_orig.copy()
dupes_dict = {}
for node_id in G.nodes():
nodes_to = []
for fr, to in G.out_edges(node_id):
nodes_to.append(to)
to_collection = collections.Counter(nodes_to).items()
dupes = [item for item, count in to_collection if count > 1]
if len(dupes) > 0:
dupes_dict[node_id] = {}
for dupe in dupes:
in_consideration = []
# Get all the edge attributes for this node pair
dupe_count = G.number_of_edges(node_id, dupe)
for i in range(dupe_count):
e = G.edges[node_id, dupe, i]
in_consideration.append(e)
# From the results, we optimistically select the fastest
# edge value and all associated key/values from the list
fastest_e = min(in_consideration, key=lambda x: x['length'])
dupes_dict[node_id][dupe] = fastest_e
# Now that we have a list of issue duplicates, we can
# iterate through the list and remove and replace edges
for fr in dupes_dict.keys():
to_dict = dupes_dict[fr]
for to in to_dict.keys():
# Remove all the edges that exist, we are going
# to start with a fresh slate (also, NetworkX makes
# it really hard to control which edges you are
# removing, otherwise)
for i in range(G.number_of_edges(fr, to)):
G.remove_edge(fr, to)
# Now let's start fresh and add a new, single, edge
G.add_edge(fr, to, **to_dict[to])
# Now we should be safe to return a clean directed graph object
return nx.DiGraph(G)
| 35.730769 | 76 | 0.586652 |
73bea8245b2ac281112dd89df2134546c03b2a06 | 15,535 | py | Python | src/blobtools/lib/view.py | blobtoolkit/blobtools-add | 88717a92b81170b6244b5693eca03689464d2cea | [
"MIT"
] | 2 | 2022-03-11T16:30:17.000Z | 2022-03-23T15:18:55.000Z | src/blobtools/lib/view.py | blobtoolkit/blobtools-add | 88717a92b81170b6244b5693eca03689464d2cea | [
"MIT"
] | 9 | 2022-03-04T14:58:28.000Z | 2022-03-30T20:32:05.000Z | src/blobtools/lib/view.py | blobtoolkit/blobtools-add | 88717a92b81170b6244b5693eca03689464d2cea | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# pylint: disable=no-member, too-many-branches, too-many-locals, too-many-statements, broad-except
"""
Generate plots using BlobToolKit Viewer.
Usage:
blobtools view [--format STRING...] [--host STRING] [--interactive]
[--out PATH] [--param STRING...] [--ports RANGE] [--prefix STRING]
[--preview STRING...] [--driver STRING] [--driver-log PATH]
[--local] [--remote] [--timeout INT] [--view STRING...] DIRECTORY
Options:
--format STRING Image format (svg|png). [Default: png]
--host STRING Hostname. [Default: http://localhost]
--interactive Start interactive session (opens dataset in Firefox/Chromium). [Default: False]
--out PATH Directory for outfiles. [Default: .]
--param key=value Query string parameter.
--ports RANGE Port range for viewer and API. [Default: 8000-8099]
--prefix STRING URL prefix. [Default: view]
--preview STRING Field name.
--driver STRING Webdriver to use (chromium or firefox). [Default: firefox]
--driver-log PATH Path to driver logfile for debugging. [Default: /dev/null]
--local Start viewer for local session. [Default: False]
--remote Start viewer for remote session. [Default: False]
--timeout INT Time to wait for page load in seconds. Default (0) is no timeout. [Default: 0]
--view STRING Plot type (blob|cumulative|snail). [Default: blob]
"""
import os
import shlex
import signal
import sys
import time
from pathlib import Path
from subprocess import PIPE
from subprocess import Popen
from docopt import docopt
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tqdm import tqdm
from .host import test_port
from .version import __version__
def file_ready(file_path, timeout, callback):
"""Check if file is ready."""
start_time = time.time()
while not os.path.exists(file_path):
elapsed_time = time.time() - start_time
if timeout and elapsed_time > timeout:
callback("Timeout waiting for file")
return False
# flush nfs cache by chowning parent to current owner
parent = os.path.dirname(os.path.abspath(file_path))
os.chown(parent, os.stat(parent).st_uid, os.stat(parent).st_gid)
time.sleep(1)
if os.path.isfile(file_path):
return True
raise ValueError("%s isn't a file!" % file_path)
def test_loc(args):
"""See if dataset needs to be hosted and, if so, find an empty port."""
info = args["--host"].split(":")
dataset = Path(args["DIRECTORY"]).name
level = "dataset"
if len(info) >= 2 and info[1] != "//localhost":
loc = "%s/%s/%s/dataset/%s" % (
args["--host"],
args["--prefix"],
dataset,
dataset,
)
return loc, None, None, None, level
if len(info) == 1 and info[0] != "localhost":
# need to add test for http vs https
loc = "http://%s/%s/%s/dataset/%s" % (
args["--host"],
args["--prefix"],
dataset,
dataset,
)
return loc, None, None, None, level
if len(info) == 3:
port = info[2]
available = test_port(port, "test")
if available:
print("ERROR: No service running on port %s" % port)
print(" Unable to connect to %s" % args["--host"])
sys.exit(1)
else:
loc = "%s/%s/%s/dataset/%s" % (
args["--host"],
args["--prefix"],
dataset,
dataset,
)
return loc, None, None, None, level
if args["DIRECTORY"] == "_":
parent = "_"
level = "blobdir"
else:
if not Path(args["DIRECTORY"]).exists():
print(
"ERROR: DIRECTORY '%s' must be a valid path to begin hosting."
% args["DIRECTORY"]
)
sys.exit(1)
dataset = Path(args["DIRECTORY"]).name
if (
Path("%s/meta.json" % args["DIRECTORY"]).is_file()
or Path("%s/meta.json.gz" % args["DIRECTORY"]).is_file()
):
parent = Path(args["DIRECTORY"]).resolve().absolute().parent
else:
level = "blobdir"
parent = Path(args["DIRECTORY"]).resolve().absolute()
port_range = args["--ports"].split("-")
api_port = False
port = False
for i in range(int(port_range[0]), int(port_range[1])):
if test_port(i, "test"):
if not api_port:
api_port = i
continue
if not port:
port = i
break
# directory = Path(__file__).resolve().parent.parent
cmd = "blobtools host --port %d --api-port %d %s" % (
port,
api_port,
parent,
)
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, encoding="ascii")
loc = "%s:%d/%s" % (args["--host"], port, args["--prefix"])
if level == "dataset":
loc += "/%s/dataset/%s" % (dataset, dataset)
else:
loc += "/all"
for i in tqdm(
range(0, 10),
unit="s",
ncols=75,
desc="Initializing viewer",
bar_format="{desc}",
):
poll = process.poll()
if poll is None:
if test_port(port, "test"):
break
time.sleep(1)
else:
print(process.stdout.read(), file=sys.stderr)
print(process.stderr.read(), file=sys.stderr)
print("ERROR: Viewer quit unexpectedly", file=sys.stderr)
print("Unable to run: %s" % cmd, file=sys.stderr)
sys.exit(1)
return loc, process, port, api_port, level
def firefox_driver(args):
"""Start firefox."""
import geckodriver_autoinstaller
geckodriver_autoinstaller.install()
outdir = os.path.abspath(args["--out"])
os.makedirs(Path(outdir), exist_ok=True)
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.download.folderList", 2)
profile.set_preference("browser.download.manager.showWhenStarting", False)
profile.set_preference("browser.download.dir", outdir)
profile.set_preference("browser.download.lastDir", args["--out"])
profile.set_preference(
"browser.helperApps.neverAsk.saveToDisk",
"image/png, image/svg+xml, text/csv, text/plain, application/json",
)
options = Options()
options.headless = not args["--interactive"]
display = Display(visible=0, size=(800, 600))
display.start()
driver = webdriver.Firefox(
options=options,
firefox_profile=profile,
service_log_path=args["--driver-log"],
)
time.sleep(2)
return driver, display
def chromium_driver(args):
"""Start chromium browser."""
import chromedriver_binary
outdir = os.path.abspath(args["--out"])
os.makedirs(Path(outdir), exist_ok=True)
options = webdriver.ChromeOptions()
if not args["--interactive"]:
options.add_argument("headless")
prefs = {}
prefs["profile.default_content_settings.popups"] = 0
prefs["download.default_directory"] = outdir
options.add_experimental_option("prefs", prefs)
display = Display(visible=0, size=(800, 600))
display.start()
driver = webdriver.Chrome(
options=options,
# executable_path=add option to set binary location,
service_log_path=args["--driver-log"],
)
time.sleep(2)
return driver, display
def static_view(args, loc, viewer):
"""Generate static images."""
qstr = "staticThreshold=Infinity"
qstr += "&nohitThreshold=Infinity"
qstr += "&plotGraphics=svg"
file_stem = Path(args["DIRECTORY"]).name
if file_stem == "_":
file_stem = "FXWY01"
if args["--format"] == "svg":
qstr += "&svgThreshold=Infinity"
shape = "circle"
for param in args["--param"]:
qstr += "&%s" % str(param)
key, value = param.split("=")
if key == "plotShape":
shape = value
timeout = int(args["--timeout"])
outdir = os.path.abspath(args["--out"])
driver = None
def handle_error(err):
"""Release resources before quitting."""
if viewer is not None:
viewer.send_signal(signal.SIGINT)
if driver is not None:
driver.quit()
display.stop()
print(err)
sys.exit(1)
if args["--driver"] == "firefox":
"""View dataset in Firefox."""
driver, display = firefox_driver(args)
elif args["--driver"] == "chromium":
"""View dataset in Chromium browser."""
driver, display = chromium_driver(args)
else:
handle_error("%s is not a valid driver" % args["--driver"])
try:
view = args["--view"][0]
if args["--preview"]:
qstr += "#Filters"
url = "%s/%s?%s" % (loc, view, qstr)
print("Loading %s" % url)
try:
driver.get(url)
except Exception as err:
handle_error(err)
for next_view in args["--view"]:
if next_view != view:
view = next_view
url = "%s/%s?%s" % (loc, view, qstr)
print("Navigating to %s" % url)
try:
driver.get(url)
except Exception as err:
handle_error(err)
for fmt in args["--format"]:
file = "%s.%s" % (file_stem, view)
if view == "blob":
file += ".%s" % shape
elif view == "busco":
view = "all_%s" % view
if fmt not in ("csv", "json"):
fmt = "json"
file += ".%s" % fmt
print("Fetching %s" % file)
el_id = "%s_save_%s" % (view, fmt)
print("waiting for element %s" % el_id)
unstable = True
start_time = time.time()
while unstable:
elapsed_time = time.time() - start_time
if timeout and elapsed_time > timeout:
handle_error("Timeout waiting for file")
try:
element = WebDriverWait(driver, timeout).until(
EC.visibility_of_element_located((By.ID, el_id))
)
element.click()
unstable = False
file_name = "%s/%s" % (outdir, file)
print("waiting for file '%s'" % file_name)
file_ready(file_name, timeout, handle_error)
except Exception as err:
unstable = True
time.sleep(1)
for preview in args["--preview"]:
print("Creating %s preview" % preview)
for fmt in args["--format"]:
el_id = "%s_preview_save_%s" % (preview, fmt)
file = "%s.%s.preview.%s" % (Path(args["DIRECTORY"]).name, preview, fmt)
try:
element = WebDriverWait(driver, timeout).until(
EC.visibility_of_element_located((By.ID, el_id))
)
element.click()
file_name = "%s/%s" % (outdir, file)
print("waiting for file '%s'" % file_name)
file_ready(file_name)
except Exception as err:
handle_error(err)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
driver.quit()
display.stop()
except Exception as err:
handle_error(err)
# print(err)
# if viewer is not None:
# viewer.send_signal(signal.SIGINT)
# driver.quit()
# display.stop()
return True
def interactive_view(args, loc, viewer, level):
if args["--driver"] == "firefox":
"""View dataset in Firefox."""
driver, display = firefox_driver(args)
elif args["--driver"] == "chromium":
"""View dataset in Chromium browser."""
driver, display = chromium_driver(args)
qstr = ""
for param in args["--param"]:
qstr += "&%s" % str(param)
try:
view = args["--view"][0]
if args["--preview"]:
qstr += "#Filters"
if level == "dataset":
url = "%s/%s" % (loc, view)
if qstr:
url += "?%s" % qstr
else:
url = loc if loc.endswith("all") else "%s/all" % loc
print("Loading %s" % url)
try:
driver.get(url)
except Exception as err:
print(err)
poll = viewer.poll()
while poll is None:
time.sleep(5)
poll = viewer.poll()
driver.quit()
display.stop()
if viewer is not None:
viewer.send_signal(signal.SIGINT)
except Exception as err:
print(err)
driver.quit()
display.stop()
if viewer is not None:
viewer.send_signal(signal.SIGINT)
return True
def remote_view(args, loc, viewer, port, api_port, level, remote):
"""View dataset remotely."""
qstr = ""
for param in args["--param"]:
qstr += "&%s" % str(param)
try:
view = args["--view"][0]
if args["--preview"]:
qstr += "#Filters"
if level == "dataset":
url = "%s/%s" % (loc, view)
if qstr:
url += "?%s" % qstr
print("View dataset at %s" % url)
else:
print("View datasets at %s" % loc)
if remote:
print("For remote access use:")
print(
" ssh -L %d:127.0.0.1:%d -L %d:127.0.0.1:%d username@remote_host"
% (port, port, api_port, api_port)
)
while True:
time.sleep(5)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
except Exception as err:
print("remote exception")
print(err)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
return True
def main(args):
"""Entrypoint for blobtools view."""
loc, viewer, port, api_port, level = test_loc(args)
try:
if args["--interactive"]:
interactive_view(args, loc, viewer, level)
elif args["--remote"]:
remote_view(args, loc, viewer, port, api_port, level, True)
elif args["--local"]:
remote_view(args, loc, viewer, port, api_port, level, False)
else:
static_view(args, loc, viewer)
except KeyboardInterrupt:
pass
finally:
time.sleep(1)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
time.sleep(1)
def cli():
"""Entry point."""
if len(sys.argv) == sys.argv.index(__name__.split(".")[-1]) + 1:
args = docopt(__doc__, argv=[])
else:
args = docopt(__doc__, version=__version__)
if not os.path.exists(os.environ["HOME"]):
os.mkdir(os.environ["HOME"])
main(args)
if __name__ == "__main__":
cli()
| 34.067982 | 109 | 0.538011 |
73bebce865dc8536e8489e6d641b29c48bde08da | 4,484 | py | Python | spark/cs110_autograder_register.py | BenjaminLiuPenrose/Berkeley-CS-100 | 84fe1633ddaa808ccbf0b5c9f55762e6766c1d3e | [
"BSD-2-Clause"
] | null | null | null | spark/cs110_autograder_register.py | BenjaminLiuPenrose/Berkeley-CS-100 | 84fe1633ddaa808ccbf0b5c9f55762e6766c1d3e | [
"BSD-2-Clause"
] | null | null | null | spark/cs110_autograder_register.py | BenjaminLiuPenrose/Berkeley-CS-100 | 84fe1633ddaa808ccbf0b5c9f55762e6766c1d3e | [
"BSD-2-Clause"
] | null | null | null | # Databricks notebook source exported at Mon, 11 Jul 2016 16:37:17 UTC
# MAGIC %md
# MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"> <img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png"/> </a> <br/> This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"> Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License. </a>
# COMMAND ----------
# MAGIC %md
# MAGIC #![Spark Logo](http://spark-mooc.github.io/web-assets/images/ta_Spark-logo-small.png) + ![Python Logo](http://spark-mooc.github.io/web-assets/images/python-logo-master-v3-TM-flattened_small.png)
# MAGIC
# MAGIC # Registering for the Course Autograder
# MAGIC
# MAGIC This notebook registers you for the course autograder. You need to use the autograder to get a grade for each lab.
# MAGIC
# MAGIC **You will only need to use this notebook once.**
# MAGIC
# MAGIC This notebook will help you create an _autograder token_. You will use that token when you submit each lab for grading, but you'll submit each lab using the
# MAGIC [autograder notebook](https://raw.githubusercontent.com/spark-mooc/mooc-setup/master/cs110_autograder.dbc).
# MAGIC
# MAGIC If you're interested in more details on the autograder, see the [Complete Autograder notebook](https://raw.githubusercontent.com/spark-mooc/mooc-setup/master/cs110_autograder_complete.dbc).
# MAGIC
# MAGIC ## NOTE TO CS105x and CS120x STUDENTS
# MAGIC
# MAGIC If you took CS105x or CS120x, and you already have an autograder private token, **DO NOT REGISTER FOR ANOTHER ONE.** Re-use your private token from CS105x.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Step 1: Register for the course autograder
# MAGIC
# MAGIC Enter your email address in the next cell. Your email address must be a valid email address.
# COMMAND ----------
# Replace <FILL_IN> with your email address in quotes (e.g., "tester@test.com")
username = <FILL_IN>
# COMMAND ----------
# MAGIC %md
# MAGIC Run the following cell. If you see an **ImportError**, you should verify that you added the `spark_mooc_meta` library to your cluster.
# MAGIC
# MAGIC <img src="http://spark-mooc.github.io/web-assets/images/autograder_LibraryError.png" alt="Drawing" style="width: 600px;"/>
# COMMAND ----------
# Verify that the username is set
from autograder import autograder
signup = autograder()
try:
print "Your username is " + username
except NameError:
assert False, "Your username is not set. Please check that you set your username in the previous cell and you exectuted the cell using SHIFT-ENTER."
try:
print "Your private token is: " + signup.signup(username)
except:
print "autograder signup failed. please detach the cluster and re-run the notebook"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Step 2: Import the Autograder Notebook
# MAGIC
# MAGIC Import a copy of the autograder notebook:
# MAGIC
# MAGIC 1. Download [this file](https://raw.githubusercontent.com/spark-mooc/mooc-setup/master/cs110_autograder.dbc). You'll get a file called `cs110_autograder.dbc`.
# MAGIC 2. In your Databricks Community Edition account, go to your home folder, and right click on it. Select "Import", and import `cs110_autograder.dbc`.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Step 3: Save your private token
# MAGIC
# MAGIC You will receive an email from the course autograder with a private token. Here is a sample email.
# MAGIC
# MAGIC <img src="http://spark-mooc.github.io/web-assets/images/autograder_signup_samplemail.png" alt="Drawing" style="width: 600px;"/>
# MAGIC
# MAGIC Copy the private token to the clipboard. Then, go to the `cs110_autograder` notebook you uploaded in Step 2, and look for a Python cell containing:
# MAGIC
# MAGIC ```
# MAGIC # private_token = <FILL_IN>
# MAGIC ```
# MAGIC
# MAGIC Uncomment the cell, so you get:
# MAGIC
# MAGIC ```
# MAGIC private_token = <FILL_IN>
# MAGIC ```
# MAGIC
# MAGIC and replace `<FILL IN>` with the private token you just copied to the clipboard. (Be sure to surround it with quotes.)
# MAGIC
# MAGIC <img src="http://spark-mooc.github.io/web-assets/images/autograder_private_token.png" alt="Drawing" />
# COMMAND ----------
# MAGIC %md
# MAGIC ## You're ready to go.
# MAGIC
# MAGIC You'll use the `cs110_autograder` notebook throughout the course, to submit each of your lab notebooks for grading.
# COMMAND ----------
| 43.115385 | 409 | 0.727475 |
73bece33673c9b47a7879ee7eba9e999a090a637 | 2,959 | py | Python | tests/gui/test_formbuilder.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | 156 | 2020-05-01T18:43:43.000Z | 2022-03-25T10:31:18.000Z | tests/gui/test_formbuilder.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | 299 | 2020-04-20T16:37:52.000Z | 2022-03-31T23:54:48.000Z | tests/gui/test_formbuilder.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | 41 | 2020-05-14T15:25:21.000Z | 2022-03-25T12:44:54.000Z | import yaml
from sleap.gui.dialogs import formbuilder
def test_formbuilder_dialog(qtbot):
dialog = formbuilder.FormBuilderModalDialog(form_name="labeled_clip_form")
dialog.set_message("foo")
assert dialog.message_fields[0].text() == "foo"
dialog.set_message("bar")
assert dialog.message_fields[0].text() == "bar"
def test_formbuilder(qtbot):
form_yaml = """
- name: method
label: Method
type: stacked
default: two
options: one,two,three
one:
- name: per_video
label: Samples Per Video
type: int
default: 20
range: 1,3000
- name: sampling_method
label: Sampling method
type: list
options: random,stride
default: stride
two:
- name: node
label: Node
type: list
- name: foo
label: Avogadro
type: sci
default: 6.022e23
three:
- name: node
label: Node
type: list
"""
items_to_create = yaml.load(form_yaml, Loader=yaml.SafeLoader)
field_options_lists = dict(node=("first option", "second option"))
layout = formbuilder.FormBuilderLayout(
items_to_create, field_options_lists=field_options_lists
)
form_data = layout.get_form_data()
assert "node" in form_data
assert form_data["node"] == "first option"
layout.set_field_options("node", ("new option", "another new option"))
form_data = layout.get_form_data()
assert form_data["node"] == "new option"
def test_optional_spin_widget(qtbot):
widget = formbuilder.OptionalSpinWidget()
widget.setValue(3)
assert widget.value() == 3
widget.check_widget.setChecked(True)
assert widget.value() is None
widget.check_widget.setChecked(False)
assert widget.value() == 3
widget.setValue("none")
assert widget.value() is None
def test_auto_double_widget(qtbot):
widget = formbuilder.OptionalSpinWidget(type="double", none_string="auto")
widget.setValue(3.2)
assert widget.value() == 3.2
widget.check_widget.setChecked(True)
assert widget.value() is "auto"
widget.check_widget.setChecked(False)
assert widget.value() == 3.2
widget.setValue("auto")
assert widget.value() == "auto"
widget.setValue(3.2)
assert widget.value() == 3.2
widget.setValue(None)
assert widget.value() == "auto"
def test_text_or_list_widget(qtbot):
widget = formbuilder.TextOrListWidget()
widget.setValue("foo")
assert widget.value() == "foo"
assert widget.mode == "text"
widget.set_options(["a", "b", "c"])
assert widget.mode == "list"
widget.setValue("b")
assert widget.value() == "b"
widget.setMode("text")
assert widget.value() == "b"
def test_string_list_widget(qtbot):
widget = formbuilder.StringListWidget()
widget.setValue("foo bar")
x = widget.getValue()
print(x)
assert x == ["foo", "bar"]
widget.setValue(["zip", "cab"])
assert widget.text() == "zip cab"
| 21.918519 | 78 | 0.656303 |
73bed724157fbe211ecdbf5f5cf0fcb148c64126 | 10,623 | py | Python | google/cloud/compute_v1/services/projects/transports/base.py | LaudateCorpus1/python-compute | a36c637f153c7b4ef49bb6a78c8b09f3746e7af1 | [
"Apache-2.0"
] | null | null | null | google/cloud/compute_v1/services/projects/transports/base.py | LaudateCorpus1/python-compute | a36c637f153c7b4ef49bb6a78c8b09f3746e7af1 | [
"Apache-2.0"
] | null | null | null | google/cloud/compute_v1/services/projects/transports/base.py | LaudateCorpus1/python-compute | a36c637f153c7b4ef49bb6a78c8b09f3746e7af1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ProjectsTransport(abc.ABC):
"""Abstract transport class for Projects."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.disable_xpn_host: gapic_v1.method.wrap_method(
self.disable_xpn_host, default_timeout=None, client_info=client_info,
),
self.disable_xpn_resource: gapic_v1.method.wrap_method(
self.disable_xpn_resource,
default_timeout=None,
client_info=client_info,
),
self.enable_xpn_host: gapic_v1.method.wrap_method(
self.enable_xpn_host, default_timeout=None, client_info=client_info,
),
self.enable_xpn_resource: gapic_v1.method.wrap_method(
self.enable_xpn_resource, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.get_xpn_host: gapic_v1.method.wrap_method(
self.get_xpn_host, default_timeout=None, client_info=client_info,
),
self.get_xpn_resources: gapic_v1.method.wrap_method(
self.get_xpn_resources, default_timeout=None, client_info=client_info,
),
self.list_xpn_hosts: gapic_v1.method.wrap_method(
self.list_xpn_hosts, default_timeout=None, client_info=client_info,
),
self.move_disk: gapic_v1.method.wrap_method(
self.move_disk, default_timeout=None, client_info=client_info,
),
self.move_instance: gapic_v1.method.wrap_method(
self.move_instance, default_timeout=None, client_info=client_info,
),
self.set_common_instance_metadata: gapic_v1.method.wrap_method(
self.set_common_instance_metadata,
default_timeout=None,
client_info=client_info,
),
self.set_default_network_tier: gapic_v1.method.wrap_method(
self.set_default_network_tier,
default_timeout=None,
client_info=client_info,
),
self.set_usage_export_bucket: gapic_v1.method.wrap_method(
self.set_usage_export_bucket,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def disable_xpn_host(
self,
) -> Callable[
[compute.DisableXpnHostProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def disable_xpn_resource(
self,
) -> Callable[
[compute.DisableXpnResourceProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def enable_xpn_host(
self,
) -> Callable[
[compute.EnableXpnHostProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def enable_xpn_resource(
self,
) -> Callable[
[compute.EnableXpnResourceProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetProjectRequest], Union[compute.Project, Awaitable[compute.Project]]
]:
raise NotImplementedError()
@property
def get_xpn_host(
self,
) -> Callable[
[compute.GetXpnHostProjectRequest],
Union[compute.Project, Awaitable[compute.Project]],
]:
raise NotImplementedError()
@property
def get_xpn_resources(
self,
) -> Callable[
[compute.GetXpnResourcesProjectsRequest],
Union[
compute.ProjectsGetXpnResources, Awaitable[compute.ProjectsGetXpnResources]
],
]:
raise NotImplementedError()
@property
def list_xpn_hosts(
self,
) -> Callable[
[compute.ListXpnHostsProjectsRequest],
Union[compute.XpnHostList, Awaitable[compute.XpnHostList]],
]:
raise NotImplementedError()
@property
def move_disk(
self,
) -> Callable[
[compute.MoveDiskProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def move_instance(
self,
) -> Callable[
[compute.MoveInstanceProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_common_instance_metadata(
self,
) -> Callable[
[compute.SetCommonInstanceMetadataProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_default_network_tier(
self,
) -> Callable[
[compute.SetDefaultNetworkTierProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_usage_export_bucket(
self,
) -> Callable[
[compute.SetUsageExportBucketProjectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
__all__ = ("ProjectsTransport",)
| 35.175497 | 101 | 0.642756 |
73bef83387c599ada0bb4726a4d47f62e55a7d32 | 1,078 | py | Python | messaging/load-sms.py | jj0hns0n/mednet | efb6681292e7ac8f870ee5967a5a2b352853ae35 | [
"BSD-3-Clause"
] | 2 | 2016-02-18T01:06:04.000Z | 2016-02-18T03:53:37.000Z | messaging/load-sms.py | jj0hns0n/mednet | efb6681292e7ac8f870ee5967a5a2b352853ae35 | [
"BSD-3-Clause"
] | null | null | null | messaging/load-sms.py | jj0hns0n/mednet | efb6681292e7ac8f870ee5967a5a2b352853ae35 | [
"BSD-3-Clause"
] | null | null | null | import sys, os
from time import *
import urllib2
import feedparser
import rfc822
sys.path.append('/var/projects')
os.environ['DJANGO_SETTINGS_MODULE'] ='mednet.settings'
from mednet import settings
from mednet.messaging.models import *
feedurl = ('http://%s:%s@%s') % (settings.GEOCHAT_USER, settings.GEOCHAT_PASS, settings.GEOCHAT_FEED)
d = feedparser.parse(feedurl)
length = len(d.entries)
for i in range(1,length):
guid = d.entries[i].guid
try:
existing_sms = IncomingSmsMessage.objects.get(guid=guid)
except:
print d.entries[i]
sender = d.entries[i].author.replace('sms://', '')
message = smart_unicode(d.entries[i].title,encoding='utf-8', strings_only=False, errors='strict')
date_sent = rfc822.parsedate(d.entries[i].updated)
date_sent = strftime("%Y-%m-%d %H:%M:%S", date_sent)
sms = IncomingSmsMessage()
sms.sender = sender
sms.message = message
sms.guid = guid.replace('http://eis.instedd.org:3000/', '')
sms.date_sent = date_sent
sms.status = 'NW'
sms.status_changed_date = date_sent
sms.save()
print sms
| 26.95 | 101 | 0.703154 |
73bf008213ef5a0b49b559bd4418ec981218b459 | 2,219 | py | Python | vapor_manager/dashboard/views.py | codescribblr/project-manager-django3 | d0dc79e992811eee3e35666acdb16bcafa16d98d | [
"MIT"
] | null | null | null | vapor_manager/dashboard/views.py | codescribblr/project-manager-django3 | d0dc79e992811eee3e35666acdb16bcafa16d98d | [
"MIT"
] | null | null | null | vapor_manager/dashboard/views.py | codescribblr/project-manager-django3 | d0dc79e992811eee3e35666acdb16bcafa16d98d | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
from django.db.models.aggregates import Sum
from django.utils import timezone
from django.views.generic import TemplateView
from vapor_manager.projects.models import Project
from vapor_manager.clients.models import Client
from vapor_manager.tasks.models import Task
from vapor_manager.servers.models import Server
class DashboardView(LoginRequiredMixin, TemplateView):
template_name = 'dashboard/index.html'
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['accounts'] = self.request.user.accounts.all()
data['active_projects'] = Project.objects.active().by_account(self.request.account)[:5]
data['active_clients'] = Client.objects.active().by_account(self.request.account)[:5]
data['active_tasks'] = Task.objects.open().by_account(self.request.account)[:5]
data['active_servers'] = Server.objects.active().by_account(self.request.account)[:5]
overdue_tasks_percent = 0
if Task.objects.open().by_account(self.request.account).filter(due_date__lt=timezone.now()).count() > 0:
overdue_tasks = Task.objects.open().by_account(self.request.account).filter(
due_date__lt=timezone.now()).count()
total_tasks = Task.objects.open().by_account(self.request.account).all().count()
overdue_tasks_percent = overdue_tasks / total_tasks * 100
total_monthly_server_cost = 0
if Server.objects.active().by_account(self.request.account).filter(cost__gt=0).count() > 0:
servers = Server.objects.active().by_account(self.request.account).filter(cost__gt=0)
total_monthly_server_cost = servers.aggregate(total_monthly_server_cost=Sum('cost'))
data['totals'] = {
'total_projects': Project.objects.active().by_account(self.request.account).all().count(),
'total_clients': Client.objects.active().by_account(self.request.account).all().count(),
'overdue_tasks_percent': '{:0.2f}'.format(overdue_tasks_percent),
'total_monthly_server_cost': total_monthly_server_cost,
}
return data
| 51.604651 | 112 | 0.711131 |
73bf88c562afb0ade72d8f796e4ae2f83d1dff1d | 1,092 | py | Python | rllab/rllab/envs/proxy_env.py | SurvivorT/SRTP | 1ddc0c4ec31d61daf9f4292c533722e61818eb51 | [
"MIT"
] | null | null | null | rllab/rllab/envs/proxy_env.py | SurvivorT/SRTP | 1ddc0c4ec31d61daf9f4292c533722e61818eb51 | [
"MIT"
] | null | null | null | rllab/rllab/envs/proxy_env.py | SurvivorT/SRTP | 1ddc0c4ec31d61daf9f4292c533722e61818eb51 | [
"MIT"
] | null | null | null | from .base import Env
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self):
return self._wrapped_env.reset()
@property
def action_space(self):
return self._wrapped_env.action_space
@property
def observation_space(self):
return self._wrapped_env.observation_space
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
def log_diagnostics(self, paths):
self._wrapped_env.log_diagnostics(paths)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
self._wrapped_env.terminate()
def get_param_values(self):
if hasattr(self._wrapped_env, 'get_param_values'):
return self._wrapped_env.get_param_values()
def set_param_values(self, params):
self._wrapped_env.set_param_values(params)
| 23.73913 | 58 | 0.67674 |
73bfa977dd79c1f5f5906a14d804b0be73bab71a | 3,690 | py | Python | intersight/models/server_result_handler_task_ref.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/server_result_handler_task_ref.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/server_result_handler_task_ref.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ServerResultHandlerTaskRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
ServerResultHandlerTaskRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this ServerResultHandlerTaskRef.
:return: The moid of this ServerResultHandlerTaskRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this ServerResultHandlerTaskRef.
:param moid: The moid of this ServerResultHandlerTaskRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this ServerResultHandlerTaskRef.
:return: The object_type of this ServerResultHandlerTaskRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this ServerResultHandlerTaskRef.
:param object_type: The object_type of this ServerResultHandlerTaskRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ServerResultHandlerTaskRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.6 | 79 | 0.551491 |
73bfcb8d4b957ca74e296f93f1466d5f3ac32aa2 | 6,171 | py | Python | blitzortung/files.py | wuan/bo-python | 86c90e437ff456092bf7c9eff8c85daffdd220f0 | [
"Apache-2.0"
] | 3 | 2015-04-09T22:33:59.000Z | 2019-02-12T12:52:16.000Z | blitzortung/files.py | wuan/bo-python | 86c90e437ff456092bf7c9eff8c85daffdd220f0 | [
"Apache-2.0"
] | 7 | 2015-05-23T13:38:14.000Z | 2019-12-13T20:43:12.000Z | blitzortung/files.py | wuan/bo-python | 86c90e437ff456092bf7c9eff8c85daffdd220f0 | [
"Apache-2.0"
] | 4 | 2015-12-13T12:40:40.000Z | 2021-07-09T10:48:16.000Z | # -*- coding: utf8 -*-
"""
Copyright 2014-2016 Andreas Würl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import glob
import json
import os
import subprocess
from . import builder
from blitzortung.data import Timestamp
class Raw(object):
BO_DATA_EXECUTABLE = 'bo-data'
def __init__(self, file_path):
self.file_path = file_path
def get_file_path(self):
return self.file_path
def get_file_name(self):
return os.path.basename(self.file_path)
def get_data(self, start_time=None, end_time=None):
return [builder.RawEvent().from_json(element).build()
for element in self.__execute(start_time, end_time)]
def get_waveform_data(self, start_time=None, end_time=None):
return [builder.RawWaveformEvent().from_json(element).build()
for element in self.__execute(start_time, end_time, '--long-data')]
def get_info(self, start_time=None, end_time=None):
return self.__execute(start_time, end_time, '--mode', 'info')
def get_histogram(self, start_time=None, end_time=None):
return self.__execute(start_time, end_time, '--mode', 'histogram')
def __repr__(self):
return "files.Raw(%s)" % (os.path.basename(self.file_path))
def __execute(self, start_time, end_time, *additional_args):
args = [self.BO_DATA_EXECUTABLE, '-j', '-i', self.file_path]
if start_time:
args += ['-s', start_time]
if end_time:
args += ['-e', end_time]
data_pipe = subprocess.Popen(args + list(additional_args), stdout=subprocess.PIPE)
(output, _) = data_pipe.communicate()
return json.loads(output)
class RawFile(object):
def __init__(self, config):
raw_file_names = glob.glob(os.path.join(config.get_raw_path(), '*.bor'))
raw_file_names.sort()
self.raw_files = {}
for raw_file_name in raw_file_names:
try:
date = datetime.datetime.strptime(raw_file_name[-12:-4], '%Y%m%d').date()
except ValueError:
continue
if not date in self.raw_files:
self.raw_files[date] = raw_file_name
else:
raise Exception("ERROR: double date! " + raw_file_name + " vs. " + self.raw_files[date])
def get(self, date):
if date in self.raw_files:
return self.raw_files[date]
else:
raise Exception("no file for date " + date.strftime('%Y-%m-%d'))
def get_dates(self):
dates = self.raw_files.keys()
dates.sort()
return dates
class Archive(object):
def __init__(self, config):
self.dates_filecount = {}
self.root_path = config.get_archive_path()
root_depth = self.__get_path_depth(self.root_path)
for current_path, dirs, files in os.walk(self.root_path):
depth = self.__get_path_depth(current_path) - root_depth
if depth == 3:
date_string = "-".join(self.__split_path_into_components(current_path)[-depth:])
self.dates_filecount[Timestamp(date_string)] = len(files)
def get_dates_filecount(self):
return self.dates_filecount
def get_files_for_date(self, date_string):
result = []
date = Timestamp(date_string)
if date in self.dates_filecount:
for file_path in glob.glob(os.path.join(self.__get_path_for_date(date), '*')):
result.append(Raw(file_path))
return result
def __get_path_for_date(self, date):
path = self.root_path
for format_string in ['%Y', '%m', '%d']:
path = os.path.join(path, date.strftime(format_string))
return path
def __get_path_depth(self, path):
return len(self.__split_path_into_components(path))
def __split_path_into_components(self, path):
(rest, last) = os.path.split(path)
if last == "":
return []
else:
components = self.__split_path_into_components(rest)
components.append(last)
return components
class Data(object):
def __init__(self, raw_file_path, time):
self.raw_file_path = raw_file_path
self.time = time
self.error = False
def get(self, long_format=False):
start = self.time.get_start_time()
start_time = start.strftime("%H%M")
end = self.time.get_end_minute()
end_time = end.strftime("%H%M")
self.error = False
raw_file = self.raw_file_path.get_paths(start.date())
if long_format:
return self.get_output(raw_file, start_time, end_time, True)
else:
return self.get_data(raw_file, start_time, end_time)
@staticmethod
def get_output(raw_file, start_time, end_time, long_format=False):
cmd = ['bo-data', '-i', raw_file, '-s', start_time, '-e', end_time]
if long_format:
cmd.append('--long-data')
data_pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(output, _) = data_pipe.communicate()
return output.splitlines(keepends=False)
def get_data(self, raw_file, start_time, end_time):
raw_events = []
for line in self.get_output(raw_file, start_time, end_time):
raw_event_builder = builder.RawEvent()
raw_event_builder.from_string(line)
raw_events.append(raw_event_builder.build())
return raw_events
def list(self):
for event in self.get():
print(event)
def list_long(self):
for line in self.get(True):
print(line)
| 31.166667 | 104 | 0.632474 |
73bfd504d347806b05122b9f71d89df291a95b13 | 2,070 | py | Python | bflib/tables/experience.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | 3 | 2017-10-28T11:28:38.000Z | 2018-09-12T09:47:00.000Z | bflib/tables/experience.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | null | null | null | bflib/tables/experience.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | null | null | null | class ExperienceRow(object):
__slots__ = ["hit_dice", "xp_value", "special_ability_bonus"]
def __init__(self, hit_dice, xp_value, special_ability_bonus):
self.hit_dice = hit_dice
self.xp_value = xp_value
self.special_ability_bonus = special_ability_bonus
class ExperienceTable(object):
inner_table = {
row.hit_dice: row for row in
(
ExperienceRow(0, 10, 3),
ExperienceRow(1, 25, 12),
ExperienceRow(2, 75, 25),
ExperienceRow(3, 145, 30),
ExperienceRow(4, 240, 40),
ExperienceRow(5, 360, 45),
ExperienceRow(6, 500, 55),
ExperienceRow(7, 670, 65),
ExperienceRow(8, 875, 70),
ExperienceRow(9, 1075, 75),
ExperienceRow(10, 1300, 90),
ExperienceRow(11, 1575, 95),
ExperienceRow(12, 1875, 100),
ExperienceRow(13, 2175, 110),
ExperienceRow(14, 2500, 115),
ExperienceRow(15, 2850, 125),
ExperienceRow(16, 3250, 135),
ExperienceRow(17, 3600, 145),
ExperienceRow(18, 4000, 160),
ExperienceRow(19, 4500, 175),
ExperienceRow(20, 5250, 200),
ExperienceRow(21, 6000, 225),
ExperienceRow(22, 6750, 250),
ExperienceRow(23, 7500, 275),
ExperienceRow(24, 8250, 300),
ExperienceRow(25, 9000, 325)
)
}
@classmethod
def get(cls, hit_die_value, include_bonus=0):
if hit_die_value < 1:
row = cls.inner_table[0]
elif hit_die_value > 25:
last_row = cls.inner_table[-1]
additional_dice = hit_die_value - 25
row = ExperienceRow(
hit_die_value,
last_row.xp_value + (750 * additional_dice),
last_row.special_ability_bonus + (25 * additional_dice)
)
else:
row = cls.inner_table[hit_die_value]
return row.xp_value + (row.special_ability_bonus * include_bonus)
| 34.5 | 73 | 0.557488 |
73bff9ed64ad83ef89122066bac51d2fb6ecf0d2 | 1,535 | py | Python | mps/mwt.py | Smarties89/mps | c3885329ea9ea52c1d60559658df55ba01b3b612 | [
"MIT"
] | 2 | 2015-10-08T22:06:18.000Z | 2017-04-30T18:03:41.000Z | mps/mwt.py | Smarties89/mps | c3885329ea9ea52c1d60559658df55ba01b3b612 | [
"MIT"
] | null | null | null | mps/mwt.py | Smarties89/mps | c3885329ea9ea52c1d60559658df55ba01b3b612 | [
"MIT"
] | null | null | null | import time
import logging
log = logging.getLogger(__name__)
class MemoizeWithTimeout(object):
"""Memoize With Timeout
The implementation builds on Leslie Polzer code from
http://code.activestate.com/recipes/325905-memoize-decorator-with-timeout
The code have been modified to support log and flushing.
"""
_caches = {}
_timeouts = {}
def __init__(self, timeout=2):
self.timeout = timeout
def collect(self):
"""Clear cache of results which have timed out"""
for func in self._caches:
cache = {}
for key in self._caches[func]:
if (time.time() - self._caches[func][key][1]) < self._timeouts[func]:
cache[key] = self._caches[func][key]
self._caches[func] = cache
def __call__(self, f):
self.cache = self._caches[f] = {}
self._timeouts[f] = self.timeout
def func(*args, **kwargs):
kw = sorted(kwargs.items())
key = (args, tuple(kw))
try:
v = self.cache[key]
log.info("cache")
if (time.time() - v[1]) > self.timeout or kwargs.get('mwt_flush', False):
raise KeyError
except KeyError:
log.info("new")
if 'mwt_flush' in kwargs:
del kwargs['mwt_flush']
v = self.cache[key] = f(*args,**kwargs),time.time()
return v[0]
func.func_name = f.__name__
return func
| 27.909091 | 89 | 0.541368 |
73c00ef1997fb8703a15d814a4fb8df40d609f9e | 2,803 | py | Python | mongo/tests/conftest.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | null | null | null | mongo/tests/conftest.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:50:17.000Z | 2018-08-15T05:50:17.000Z | mongo/tests/conftest.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:45:42.000Z | 2018-08-15T05:45:42.000Z | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import subprocess
import os
import logging
import time
import pymongo
import pytest
from datadog_checks.mongo import MongoDb
from . import common
log = logging.getLogger('conftest')
@pytest.fixture
def check():
check = MongoDb('mongo', {}, {})
return check
@pytest.fixture(scope="session")
def set_up_mongo():
cli = pymongo.mongo_client.MongoClient(
common.MONGODB_SERVER,
socketTimeoutMS=30000,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,)
foos = []
for _ in range(70):
foos.append({'1': []})
foos.append({'1': []})
foos.append({})
bars = []
for _ in range(50):
bars.append({'1': []})
bars.append({})
db = cli['test']
db.foo.insert_many(foos)
db.bar.insert_many(bars)
authDB = cli['authDB']
authDB.command("createUser", 'testUser', pwd='testPass', roles=[{'role': 'read', 'db': 'test'}])
db.command("createUser", 'testUser2', pwd='testPass2', roles=[{'role': 'read', 'db': 'test'}])
yield
tear_down_mongo()
def tear_down_mongo():
cli = pymongo.mongo_client.MongoClient(
common.MONGODB_SERVER,
socketTimeoutMS=30000,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,)
db = cli['test']
db.drop_collection("foo")
db.drop_collection("bar")
@pytest.fixture(scope="session")
def spin_up_mongo():
"""
Start a cluster with one master, one replica and one unhealthy replica and
stop it after the tests are done.
If there's any problem executing docker-compose, let the exception bubble
up.
"""
env = os.environ
compose_file = os.path.join(common.HERE, 'compose', 'docker-compose.yml')
env['DOCKER_COMPOSE_FILE'] = compose_file
args = [
"docker-compose",
"-f", compose_file
]
try:
subprocess.check_call(args + ["up", "-d"], env=env)
setup_sharding(env=env)
except Exception:
cleanup_mongo(args, env)
raise
yield
cleanup_mongo(args, env)
def setup_sharding(env=None):
curdir = os.getcwd()
compose_dir = os.path.join(common.HERE, 'compose')
os.chdir(compose_dir)
for i in xrange(5):
try:
subprocess.check_call(['bash', 'init.sh'], env=env)
os.chdir(curdir)
return
except Exception as e:
log.info(e)
time.sleep(5)
os.chdir(curdir)
raise e
def cleanup_mongo(args, env):
subprocess.check_call(args + ["down"], env=env)
# it creates a lot of volumes, this is necessary
try:
subprocess.check_call(['docker', 'volume', 'prune', '-f'])
except Exception:
pass
| 22.97541 | 100 | 0.622904 |
73c029e78ec379beb99e10e4a45b52e588698a41 | 8,003 | py | Python | nova/tests/unit/api/openstack/compute/test_scheduler_hints.py | badock/nova-tidb | 4c4591f2cd887fdc22828e12f0c297c051bbd912 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_scheduler_hints.py | badock/nova-tidb | 4c4591f2cd887fdc22828e12f0c297c051bbd912 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_scheduler_hints.py | badock/nova-tidb | 4c4591f2cd887fdc22828e12f0c297c051bbd912 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.api.openstack import compute
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute import servers as servers_v21
import nova.compute.api
from nova.compute import flavors
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
UUID = fakes.FAKE_UUID
CONF = cfg.CONF
class SchedulerHintsTestCaseV21(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCaseV21, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=UUID)
self._set_up_router()
def _set_up_router(self):
self.app = compute.APIRouterV21(init_only=('servers',
'os-scheduler-hints'))
def _get_request(self):
return fakes.HTTPRequest.blank('/fake/servers')
def test_create_server_without_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {})
return ([self.fake_instance], '')
self.stub_out('nova.compute.api.API.create', fake_create)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def _test_create_server_with_hint(self, hint):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], hint)
return ([self.fake_instance], '')
self.stub_out('nova.compute.api.API.create', fake_create)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
},
'os:scheduler_hints': hint,
}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_group_hint(self):
self._test_create_server_with_hint({'group': UUID})
def test_create_server_with_non_uuid_group_hint(self):
self._create_server_with_scheduler_hints_bad_request(
{'group': 'non-uuid'})
def test_create_server_with_different_host_hint(self):
self._test_create_server_with_hint(
{'different_host': '9c47bf55-e9d8-42da-94ab-7f9e80cd1857'})
self._test_create_server_with_hint(
{'different_host': ['9c47bf55-e9d8-42da-94ab-7f9e80cd1857',
'82412fa6-0365-43a9-95e4-d8b20e00c0de']})
def _create_server_with_scheduler_hints_bad_request(self, param):
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
},
'os:scheduler_hints': param,
}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
def test_create_server_bad_hints_non_dict(self):
self._create_server_with_scheduler_hints_bad_request('non-dict')
def test_create_server_bad_hints_long_group(self):
param = {'group': 'a' * 256}
self._create_server_with_scheduler_hints_bad_request(param)
def test_create_server_with_bad_different_host_hint(self):
param = {'different_host': 'non-server-id'}
self._create_server_with_scheduler_hints_bad_request(param)
param = {'different_host': ['non-server-id01', 'non-server-id02']}
self._create_server_with_scheduler_hints_bad_request(param)
class ServersControllerCreateTestV21(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
self._set_up_controller()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': fakes.FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
return instance
fake.stub_out_image_service(self)
self.stub_out('nova.db.instance_create', instance_create)
def _set_up_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
'osapi_v21')
self.no_scheduler_hints_controller = servers_v21.ServersController(
extension_info=ext_info)
def _verify_availability_zone(self, **kwargs):
self.assertNotIn('scheduler_hints', kwargs)
def _get_request(self):
return fakes.HTTPRequest.blank('/servers')
def _test_create_extra(self, params):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
body = dict(server=server)
body.update(params)
req = self._get_request()
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
server = self.no_scheduler_hints_controller.create(
req, body=body).obj['server']
def test_create_instance_with_scheduler_hints_disabled(self):
hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
params = {'OS-SCH-HNT:scheduler_hints': hints}
old_create = nova.compute.api.API.create
def create(*args, **kwargs):
self._verify_availability_zone(**kwargs)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_extra(params)
| 36.711009 | 78 | 0.631638 |
73c02a61047d66ea23b93ce605258f5490e1a9a7 | 8,937 | py | Python | tests/io/test_hdf_dataset.py | PedroAbreuQB/kedro | a38552a0266d4ad7b823f1640e98aefa6175fd33 | [
"Apache-2.0"
] | null | null | null | tests/io/test_hdf_dataset.py | PedroAbreuQB/kedro | a38552a0266d4ad7b823f1640e98aefa6175fd33 | [
"Apache-2.0"
] | null | null | null | tests/io/test_hdf_dataset.py | PedroAbreuQB/kedro | a38552a0266d4ad7b823f1640e98aefa6175fd33 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import PurePosixPath
import pandas as pd
import pytest
from fsspec.implementations.http import HTTPFileSystem
from fsspec.implementations.local import LocalFileSystem
from gcsfs import GCSFileSystem
from pandas.testing import assert_frame_equal
from s3fs.core import S3FileSystem
from kedro.io import DataSetError, HDFDataSet
from kedro.io.core import Version
HDF_KEY = "data"
@pytest.fixture
def filepath_hdf(tmp_path):
return str(tmp_path / "test.h5")
@pytest.fixture
def hdf_data_set(filepath_hdf, load_args, save_args):
return HDFDataSet(
filepath=filepath_hdf, key=HDF_KEY, load_args=load_args, save_args=save_args
)
@pytest.fixture
def versioned_hdf_data_set(filepath_hdf, load_version, save_version):
return HDFDataSet(
filepath=filepath_hdf, key=HDF_KEY, version=Version(load_version, save_version)
)
@pytest.fixture
def dummy_dataframe():
return pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]})
class TestHDFDataSet:
def test_save_and_load(self, hdf_data_set, dummy_dataframe):
"""Test saving and reloading the data set."""
hdf_data_set.save(dummy_dataframe)
reloaded = hdf_data_set.load()
assert_frame_equal(dummy_dataframe, reloaded)
def test_exists(self, hdf_data_set, dummy_dataframe):
"""Test `exists` method invocation for both existing and
nonexistent data set."""
assert not hdf_data_set.exists()
hdf_data_set.save(dummy_dataframe)
assert hdf_data_set.exists()
@pytest.mark.parametrize(
"load_args", [{"k1": "v1", "index": "value"}], indirect=True
)
def test_load_extra_params(self, hdf_data_set, load_args):
"""Test overriding the default load arguments."""
for key, value in load_args.items():
assert hdf_data_set._load_args[key] == value
@pytest.mark.parametrize(
"save_args", [{"k1": "v1", "index": "value"}], indirect=True
)
def test_save_extra_params(self, hdf_data_set, save_args):
"""Test overriding the default save arguments."""
for key, value in save_args.items():
assert hdf_data_set._save_args[key] == value
def test_load_missing_file(self, hdf_data_set):
"""Check the error when trying to load missing file."""
pattern = r"Failed while loading data from data set HDFDataSet\(.*\)"
with pytest.raises(DataSetError, match=pattern):
hdf_data_set.load()
@pytest.mark.parametrize(
"filepath,instance_type",
[
("s3://bucket/file.h5", S3FileSystem),
("file:///tmp/test.h5", LocalFileSystem),
("/tmp/test.h5", LocalFileSystem),
("gcs://bucket/file.h5", GCSFileSystem),
("https://example.com/file.h5", HTTPFileSystem),
],
)
def test_protocol_usage(self, filepath, instance_type):
data_set = HDFDataSet(filepath=filepath, key=HDF_KEY)
assert isinstance(data_set._fs, instance_type)
# _strip_protocol() doesn't strip http(s) protocol
if data_set._protocol == "https":
path = filepath.split("://")[-1]
else:
path = data_set._fs._strip_protocol(filepath)
assert str(data_set._filepath) == path
assert isinstance(data_set._filepath, PurePosixPath)
def test_catalog_release(self, mocker):
fs_mock = mocker.patch("fsspec.filesystem").return_value
filepath = "test.h5"
data_set = HDFDataSet(filepath=filepath, key=HDF_KEY)
data_set.release()
fs_mock.invalidate_cache.assert_called_once_with(filepath)
def test_save_and_load_df_with_categorical_variables(self, hdf_data_set):
"""Test saving and reloading the data set with categorical variables."""
df = pd.DataFrame(
{"A": [1, 2, 3], "B": pd.Series(list("aab")).astype("category")}
)
hdf_data_set.save(df)
reloaded = hdf_data_set.load()
assert_frame_equal(df, reloaded)
class TestHDFDataSetVersioned:
def test_version_str_repr(self, load_version, save_version):
"""Test that version is in string representation of the class instance
when applicable."""
filepath = "test.h5"
ds = HDFDataSet(filepath=filepath, key=HDF_KEY)
ds_versioned = HDFDataSet(
filepath=filepath, key=HDF_KEY, version=Version(load_version, save_version)
)
assert filepath in str(ds)
assert "version" not in str(ds)
assert filepath in str(ds_versioned)
ver_str = "version=Version(load={}, save='{}')".format(
load_version, save_version
)
assert ver_str in str(ds_versioned)
assert "HDFDataSet" in str(ds_versioned)
assert "HDFDataSet" in str(ds)
assert "protocol" in str(ds_versioned)
assert "protocol" in str(ds)
assert "key" in str(ds_versioned)
assert "key" in str(ds)
def test_save_and_load(self, versioned_hdf_data_set, dummy_dataframe):
"""Test that saved and reloaded data matches the original one for
the versioned data set."""
versioned_hdf_data_set.save(dummy_dataframe)
reloaded_df = versioned_hdf_data_set.load()
assert_frame_equal(dummy_dataframe, reloaded_df)
def test_no_versions(self, versioned_hdf_data_set):
"""Check the error if no versions are available for load."""
pattern = r"Did not find any versions for HDFDataSet\(.+\)"
with pytest.raises(DataSetError, match=pattern):
versioned_hdf_data_set.load()
def test_exists(self, versioned_hdf_data_set, dummy_dataframe):
"""Test `exists` method invocation for versioned data set."""
assert not versioned_hdf_data_set.exists()
versioned_hdf_data_set.save(dummy_dataframe)
assert versioned_hdf_data_set.exists()
def test_prevent_overwrite(self, versioned_hdf_data_set, dummy_dataframe):
"""Check the error when attempting to override the data set if the
corresponding hdf file for a given save version already exists."""
versioned_hdf_data_set.save(dummy_dataframe)
pattern = (
r"Save path \`.+\` for HDFDataSet\(.+\) must "
r"not exist if versioning is enabled\."
)
with pytest.raises(DataSetError, match=pattern):
versioned_hdf_data_set.save(dummy_dataframe)
@pytest.mark.parametrize(
"load_version", ["2019-01-01T23.59.59.999Z"], indirect=True
)
@pytest.mark.parametrize(
"save_version", ["2019-01-02T00.00.00.000Z"], indirect=True
)
def test_save_version_warning(
self, versioned_hdf_data_set, load_version, save_version, dummy_dataframe
):
"""Check the warning when saving to the path that differs from
the subsequent load path."""
pattern = (
r"Save version `{0}` did not match load version `{1}` "
r"for HDFDataSet\(.+\)".format(save_version, load_version)
)
with pytest.warns(UserWarning, match=pattern):
versioned_hdf_data_set.save(dummy_dataframe)
def test_http_filesystem_no_versioning(self):
pattern = r"HTTP\(s\) DataSet doesn't support versioning\."
with pytest.raises(DataSetError, match=pattern):
HDFDataSet(
filepath="https://example.com/file.h5",
key=HDF_KEY,
version=Version(None, None),
)
| 39.544248 | 87 | 0.681996 |
73c036c48b68af9b0b3f13d570e99e3c027c8247 | 4,499 | py | Python | services/web/server/tests/unit/login/conftest.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | services/web/server/tests/unit/login/conftest.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | 2 | 2018-05-13T09:10:57.000Z | 2019-03-06T08:10:40.000Z | services/web/server/tests/unit/login/conftest.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | """ Tests functionality that requires login users
"""
# pylint:disable=wildcard-import
# pylint:disable=unused-import
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import os
import sys
from pathlib import Path
import pytest
import sqlalchemy as sa
import trafaret_config
import yaml
import simcore_service_webserver.utils
from simcore_service_webserver.application import create_application
from simcore_service_webserver.db import DSN
from simcore_service_webserver.db_models import confirmations, metadata, users
from simcore_service_webserver.application_config import app_schema as app_schema
sys.path.append(str(Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent.parent.parent / 'helpers'))
@pytest.fixture(scope="session")
def here():
return Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
@pytest.fixture(scope="session")
def mock_dir(here):
return here / "mock"
@pytest.fixture(scope='session')
def osparc_simcore_root_dir(here):
root_dir = here.parent.parent.parent.parent.parent.parent.resolve()
assert root_dir.exists(), "Is this service within osparc-simcore repo?"
assert any(root_dir.glob("services/web/server")), "%s not look like rootdir" % root_dir
return root_dir
@pytest.fixture(scope="session")
def app_cfg(here, osparc_simcore_root_dir):
cfg_path = here / "config.yaml"
assert cfg_path.exists()
variables = dict(os.environ)
variables.update({
'OSPARC_SIMCORE_REPO_ROOTDIR': str(osparc_simcore_root_dir),
})
# validates and fills all defaults/optional entries that normal load would not do
cfg_dict = trafaret_config.read_and_validate(cfg_path, app_schema, vars=variables)
return cfg_dict
@pytest.fixture(scope='session')
def docker_compose_file(here, app_cfg):
""" Overrides pytest-docker fixture
"""
old = os.environ.copy()
cfg = app_cfg["db"]["postgres"]
# docker-compose reads these environs
os.environ['TEST_POSTGRES_DB']=cfg['database']
os.environ['TEST_POSTGRES_USER']=cfg['user']
os.environ['TEST_POSTGRES_PASSWORD']=cfg['password']
dc_path = here / 'docker-compose.yml'
assert dc_path.exists()
yield str(dc_path)
os.environ = old
@pytest.fixture(scope='session')
def postgres_service(docker_services, docker_ip, app_cfg):
cfg = app_cfg["db"]["postgres"]
cfg['host'] = docker_ip
cfg['port'] = docker_services.port_for('postgres', 5432)
url = DSN.format(**cfg)
# Wait until service is responsive.
docker_services.wait_until_responsive(
check=lambda: is_postgres_responsive(url),
timeout=30.0,
pause=0.1,
)
return url
@pytest.fixture
def postgres_db(app_cfg, postgres_service): # NOTE: if postgres_services started manually, comment
"""
For debugging, postgres_service can be started manually as
docker-compose -f docker-compose.debug.yml up
In that case, comment postgres_service)
"""
cfg = app_cfg["db"]["postgres"]
url = DSN.format(**cfg)
# NOTE: Comment this to avoid postgres_service
url = postgres_service
# Configures db and initializes tables
# Uses syncrounous engine for that
engine = sa.create_engine(url, isolation_level="AUTOCOMMIT")
metadata.create_all(bind=engine, tables=[users, confirmations], checkfirst=True)
yield engine
metadata.drop_all(engine)
engine.dispose()
@pytest.fixture
def server(loop, aiohttp_server, app_cfg, monkeypatch, aiohttp_unused_port, postgres_db): #pylint: disable=R0913
port = app_cfg["main"]["port"] = aiohttp_unused_port()
app = create_application(app_cfg)
path_mail(monkeypatch)
server = loop.run_until_complete( aiohttp_server(app, port=port) )
return server
@pytest.fixture
def client(loop, aiohttp_client, server):
client = loop.run_until_complete(aiohttp_client(server))
return client
# helpers ---------------
def path_mail(monkeypatch):
async def send_mail(*args):
print('=== EMAIL TO: {}\n=== SUBJECT: {}\n=== BODY:\n{}'.format(*args))
monkeypatch.setattr(simcore_service_webserver.login.utils, 'send_mail', send_mail)
def is_postgres_responsive(url):
"""Check if something responds to ``url`` """
try:
engine = sa.create_engine(url)
conn = engine.connect()
conn.close()
except sa.exc.OperationalError:
return False
return True
| 28.656051 | 122 | 0.717937 |
73c05a7fad317a4113f7ce51002efea3d8cbcdf7 | 1,036 | py | Python | characterCrowdSrc/exceptions.py | campbellwmorgan/charactercrowd | 968f53ea23c347d57e7e45d46206ab4dc8fb39ca | [
"Apache-2.0"
] | 1 | 2016-08-04T11:49:12.000Z | 2016-08-04T11:49:12.000Z | characterCrowdSrc/exceptions.py | campbellwmorgan/charactercrowd | 968f53ea23c347d57e7e45d46206ab4dc8fb39ca | [
"Apache-2.0"
] | null | null | null | characterCrowdSrc/exceptions.py | campbellwmorgan/charactercrowd | 968f53ea23c347d57e7e45d46206ab4dc8fb39ca | [
"Apache-2.0"
] | null | null | null | import sys, traceback
import pymel.core as pm
class CCCoreException(Exception):
pass
class CCGuiException(Exception):
pass
def wrapper(fn):
"""
Wraps exception handler
around common functions
to write trace and popup gui dialogs
"""
def innerWrapper(*args, **kwargs):
try:
result = fn(*args, **kwargs)
return result
except CCGuiException as e:
pm.confirmDialog(
title="CharacterCrowd Error",
message=str(e),
button=["OK"]
)
except CCCoreException as e:
print("CharacterCrowd:")
print(str(e))
pm.confirmDialog(
title="CharacterCrowd Error",
message=str(e),
button=["OK"],
)
except:
print("CharacterCrowd Exception:")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return innerWrapper
| 25.9 | 49 | 0.513514 |
73c07d8e24b6b3b1fa57e57c81b4f31d5c99ae49 | 1,558 | py | Python | atom/nucleus/python/test/test_document_api.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_document_api.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_document_api.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.api.document_api import DocumentApi # noqa: E501
from nucleus_api.rest import ApiException
class TestDocumentApi(unittest.TestCase):
"""DocumentApi unit test stubs"""
def setUp(self):
self.api = nucleus_api.api.document_api.DocumentApi() # noqa: E501
def tearDown(self):
pass
def test_create_document_using_post(self):
"""Test case for create_document_using_post
Create a Document # noqa: E501
"""
pass
def test_delete_document_using_delete(self):
"""Test case for delete_document_using_delete
Delete a tenant document by Id # noqa: E501
"""
pass
def test_get_document_all_using_get(self):
"""Test case for get_document_all_using_get
List all Documents # noqa: E501
"""
pass
def test_get_document_using_get(self):
"""Test case for get_document_using_get
Retrieve a Tenant Document by Document Id # noqa: E501
"""
pass
def test_update_document_using_put(self):
"""Test case for update_document_using_put
Update a Tenant Document # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 22.257143 | 75 | 0.662388 |
73c08af3383168b40bff87e52c13c9c64a5fb3ed | 1,403 | py | Python | tests/test_post_init.py | cnheider/warg | 4bcc5d3e86dd843773ca6cbca21bcab3b8ae84eb | [
"Apache-2.0"
] | 1 | 2018-11-30T09:14:28.000Z | 2018-11-30T09:14:28.000Z | tests/test_post_init.py | aivclab/warg | 4bcc5d3e86dd843773ca6cbca21bcab3b8ae84eb | [
"Apache-2.0"
] | 49 | 2019-04-02T12:06:10.000Z | 2019-08-31T14:30:12.000Z | tests/test_post_init.py | cnHeider/warg | 4bcc5d3e86dd843773ca6cbca21bcab3b8ae84eb | [
"Apache-2.0"
] | 1 | 2019-03-16T13:56:03.000Z | 2019-03-16T13:56:03.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from warg import drop_unused_kws
from warg.metas.post_init import PostInit
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
"""
def test_post_init_class():
class MyTestingClass(metaclass=PostInit):
"""
class with the metaclass passed as an argument"""
@drop_unused_kws
def __init__(self, *args, **kwargs):
print(kwargs)
def __post_init__(self, *args, **kwargs):
print(args, kwargs)
def __call__(self, *args, **kwargs):
print("a")
a = MyTestingClass("asdc", kas=2)
a()
def test_post_init_no_kws_class():
class MyTestingClass(metaclass=PostInit):
"""
class with the metaclass passed as an argument"""
@drop_unused_kws
def __init__(self, *args):
print("Init class")
@drop_unused_kws
def __post_init__(self, *args):
print(args)
def __call__(self, *args, **kwargs):
print("a")
a = MyTestingClass("asdc", kas=2)
a()
def test_no_post_init_class():
class MyTestingClass(metaclass=PostInit):
"""
class with the metaclass passed as an argument"""
def __init__(self):
print("Init class")
def __call__(self, *args, **kwargs):
print("a")
a = MyTestingClass()
a()
| 21.257576 | 57 | 0.579473 |
73c0fa87360537bf7f25bbc39ea14903bf7ff91b | 5,596 | py | Python | lines/cif2xy.py | stefsmeets/lines | 6993183517fbf99e2feda288d992b742af8816d1 | [
"MIT"
] | 3 | 2019-10-06T18:45:51.000Z | 2022-03-14T20:22:51.000Z | lines/cif2xy.py | stefsmeets/lines | 6993183517fbf99e2feda288d992b742af8816d1 | [
"MIT"
] | 2 | 2016-05-02T15:12:25.000Z | 2018-10-14T13:06:46.000Z | lines/cif2xy.py | stefsmeets/lines | 6993183517fbf99e2feda288d992b742af8816d1 | [
"MIT"
] | 3 | 2016-05-02T16:51:37.000Z | 2021-06-18T16:01:43.000Z | #!/usr/bin/env python
# Lines - a python plotting program
# Copyright (C) 2015 Stef Smeets
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import subprocess as sp
import sys
import os
import argparse
import numpy as np
from xcore.formats import read_cif
__version__ = "2015-10-01"
planck_constant = 6.62606957E-34
elementary_charge = 1.60217656E-19
speed_of_light = 2.99792458E8
def energy2wavelength(E):
"""Takes wavelength in keV, returns energy in Angstrom"""
# 1E3 from ev to kev, divide by 1E10 from angstrom to meter
return 1E10*planck_constant*speed_of_light/(E*1E3*elementary_charge)
def replace_extension(fn, new=".xy"):
"""replaces cif extension by xy extension"""
root, ext = os.path.splitext(fn)
basename = os.path.basename(root)
xy_out = basename + "." + new
return xy_out
def cif2xy(cif, wl=1.0):
print "Reading CIF:", cif
cell, atoms = read_cif(cif)
a, b, c, al, be, ga = cell.parameters
spgr = cell.spgr_name
title = 'lines'
xy_out = replace_extension(cif, new="xy")
focus_inp = open("focus.inp", 'w')
focus_out = "focus.out"
print >> focus_inp, """
Title {title}
SpaceGroup {spgr}
UnitCell {a} {b} {c} {al} {be} {ga}
Lambda {wl}
ProfileStartEndStep 2 49.99 0.002
ProfilePOLRA 1.0
ProfileFWHM UVW 0.001 0.0 0.0
#ProfileAsym a(i) -0.005 0.003 0
ProfilePeakShape PseudoVoigt
PseudoVoigtPeakRange 25
PseudoVoigtFracLorentz 0.5
ProfileBackground 0
#ProfileReferenceRefl
ProfileReferenceMax 50000
""".format(title=title,
spgr=spgr,
a=a,
b=b,
c=c,
al=al,
be=be,
ga=ga,
wl=wl)
for i, atom in atoms.iterrows():
label = atom.label
element = atom.symbol
if element == 'T':
element = 'Si'
x, y, z = atom.x, atom.y, atom.z
occ = atom.occ
u_iso = atom.biso / (8*np.pi**2)
print >> focus_inp, '{label:8} {element:4} {x:8.5f} {y:8.5f} {z:8.5f} {occ:.4f} {u_iso:.4f}'.format(label=label,
element=element,
x=x, y=y, z=z,
occ=occ,
u_iso=u_iso)
print >> focus_inp, "End"
focus_inp.close()
print "Generating powder pattern... (wl = {} A)".format(wl)
sp.call(
"focus -PowderStepScan {} > {}".format(focus_inp.name, focus_out), shell=True)
begin_switch = ">Begin stepscan"
end_switch = "&"
focus_stepscan = open(focus_out, 'r')
xye = open(xy_out, 'w')
do_print = 0
for line in focus_stepscan:
if line.startswith(end_switch):
break
elif do_print:
print >> xye, line,
elif line.startswith(begin_switch):
do_print = 1
focus_stepscan.next()
focus_stepscan.close()
xye.close()
return xy_out
def main():
description = """"""
epilog = 'Updated: {}'.format(__version__)
parser = argparse.ArgumentParser(description=description,
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
version=__version__)
def parse_wl(string):
wavelengths = {"cra1": 2.28970, "cra2": 2.29361, "cr": 2.2909,
"fea1": 1.93604, "fea2": 1.93998, "fe": 1.9373,
"cua1": 1.54056, "cua2": 1.54439, "cu": 1.5418,
"moa1": 0.70930, "moa2": 0.71359, "mo": 0.7107,
"aga1": 0.55941, "aga2": 0.56380, "ag": 0.5608, "sls": 1.0000}
if string.lower().endswith('kev'):
return energy2wavelength(float(string.lower().replace('kev', "")))
elif string.lower() in wavelengths:
return wavelengths[string.lower()]
else:
return float(string)
parser = argparse.ArgumentParser()
parser.add_argument("args",
type=str, metavar="FILE", nargs='*',
help="Paths to cif files.")
parser.add_argument("--wavelength",
action="store", type=parse_wl, dest='wavelength',
help="Specify the wavelength to use for the powder pattern generation. Default = 1.0 Angstrom")
parser.set_defaults(wavelength=1.0)
options = parser.parse_args()
args = options.args
for arg in args:
out = cif2xy(arg, wl=options.wavelength)
print "Printed powder pattern to", out
print
if __name__ == '__main__':
main()
| 30.747253 | 124 | 0.557362 |
73c102834152775b71d76c2763791ef131b1648e | 16,756 | py | Python | visual_automata/fa/dfa.py | ManuelALH/ProyectoTeoriaComputacionDFA | f989c71934f2d31f25c31f60ed1aab9e8e0a971a | [
"MIT"
] | 45 | 2021-03-19T14:14:16.000Z | 2022-03-29T11:20:53.000Z | visual_automata/fa/dfa.py | ManuelALH/ProyectoTeoriaComputacionDFA | f989c71934f2d31f25c31f60ed1aab9e8e0a971a | [
"MIT"
] | 2 | 2021-04-16T15:11:08.000Z | 2021-04-16T15:23:31.000Z | visual_automata/fa/dfa.py | ManuelALH/ProyectoTeoriaComputacionDFA | f989c71934f2d31f25c31f60ed1aab9e8e0a971a | [
"MIT"
] | 2 | 2021-03-24T11:01:54.000Z | 2021-04-24T03:10:49.000Z | """Classes and methods for working with visual deterministic finite automata."""
from typing import Union
import os
import pandas as pd
from pandas import DataFrame
from automata.fa.dfa import DFA
from colormath.color_objects import sRGBColor
from graphviz import Digraph
from IPython.display import display
from visual_automata.colors import (
create_palette,
hex_to_rgb_color,
list_cycler,
)
class VisualDFA:
"""A wrapper for an automata-lib deterministic finite automaton."""
def __init__(
self,
dfa: DFA = None,
*,
states: set = None,
input_symbols: set = None,
transitions: dict = None,
initial_state: str = None,
final_states: set = None
):
if dfa:
self.dfa = dfa
else:
self.dfa = DFA(
states=states,
input_symbols=input_symbols,
transitions=transitions,
initial_state=initial_state,
final_states=final_states,
)
# -------------------------------------------------------------------------
# Mimic behavior of automata-lib DFA.
@property
def states(self):
"""Pass on .states from the DFA"""
return self.dfa.states
@states.setter
def states(self, states: set):
"""Set .states on the DFA"""
self.dfa.states = states
@property
def input_symbols(self):
"""Pass on .input_symbols from the DFA"""
return self.dfa.input_symbols
@input_symbols.setter
def input_symbols(self, input_symbols: set):
"""Set .input_symbols on the DFA"""
self.dfa.input_symbols = input_symbols
@property
def transitions(self):
"""Pass on .transitions from the DFA"""
return self.dfa.transitions
@transitions.setter
def transitions(self, transitions: dict):
"""Set .transitions on the DFA"""
self.dfa.transitions = transitions
@property
def initial_state(self):
"""Pass on .initial_state from the DFA"""
return self.dfa.initial_state
@initial_state.setter
def initial_state(self, initial_state: str):
"""Set .initial_state on the DFA"""
self.dfa.initial_state = initial_state
@property
def final_states(self):
"""Pass on .final_states from the DFA"""
return self.dfa.final_states
@final_states.setter
def final_states(self, final_states: set):
"""Set .final_states on the DFA"""
self.dfa.final_states = final_states
def copy(self) -> DFA:
"""Create a deep copy of the automaton."""
return self.__class__(**vars(self))
def minify(self) -> DFA:
"""
Create a minimal DFA which accepts the same inputs as this DFA.
First, non-reachable states are removed.
Then, similar states are merged using Hopcroft's Algorithm.
retain_names: If True, merged states retain names.
If False, new states will be named 0, ..., n-1.
Returns:
DFA: A new minimal VisualDFA, if applicable.
"""
new_dfa = self.dfa.copy()
new_dfa = new_dfa.minify()
new_dfa = VisualDFA(new_dfa)
return new_dfa
# -------------------------------------------------------------------------
# Define new attributes.
@property
def table(self) -> DataFrame:
"""
Generates a transition table of the given VisualDFA.
Returns:
DataFrame: A transition table of the VisualDFA.
"""
initial_state = self.initial_state
final_states = [str(x) for x in self.final_states]
transitions = self.__transition_sort(self.transitions)
table: dict = {}
for state, transition in transitions.items():
if state == initial_state and state in final_states:
state = "→*" + state
elif state == initial_state:
state = "→" + state
elif state in final_states:
state = "*" + state
row: dict = {}
for input_symbol, next_state in transition.items():
if next_state in final_states:
row[input_symbol] = "*" + next_state
else:
row[input_symbol] = next_state
table[state] = row
table = pd.DataFrame.from_dict(table).T
return table
def __str__(self) -> str:
return self.table.to_string()
def __repr__(self) -> str:
return self.table.to_string()
# -------------------------------------------------------------------------
# Adapt behavior of automata-lib DFA.
# Works like DFA._get_next_current_state, without raising exceptions.
# Defined as a an internal/private method, prefixed with "__" instead of "_".
def __get_next_current_state(
self, current_state: str, input_symbol: str
) -> str:
"""
Follow the transition for the given input symbol on the current state.
Args:
current_state (str): Current state.
input_symbol (str): Input symbol.
Returns:
str: The next current state after entering input symbol.
"""
if input_symbol in self.dfa.transitions[current_state]:
return self.dfa.transitions[current_state][input_symbol]
# -------------------------------------------------------------------------
# Define helper methods.
@staticmethod
def __transition_sort(transitions: dict) -> dict:
"""
Sorts the transitions dictionary.
Args:
transitions (dict): Unsorted transitions.
Returns:
dict: Sorted transitions.
"""
transitions = dict(
sorted(
transitions.items(),
key=lambda k: k[0].replace("{", "").replace("}", ""),
)
)
for state, transition in transitions.items():
transitions[state] = dict(sorted(transition.items()))
return transitions
@staticmethod
def __transitions_pairs(transitions: dict) -> list:
"""
Generates a list of all possible transitions pairs for all input symbols.
Args:
transition_dict (dict): DFA transitions.
Returns:
list: All possible transitions for all the given input symbols.
"""
transition_possibilities: list = []
for state, transitions in transitions.items():
for symbol, transition in transitions.items():
transition_possibilities.append((state, transition, symbol))
return transition_possibilities
@staticmethod
def __transition_steps(
initial_state, final_states, input_str: str, transitions_taken: list, status: bool
) -> DataFrame:
"""
Generates a table of taken transitions based on the input string and it's result.
Args:
initial_state (str): The DFA's initial state.
final_states (set): The DFA's final states.
input_str (str): The input string to run on the DFA.
transitions_taken (list): Transitions taken from the input string.
status (bool): The result of the input string.
Returns:
DataFrame: Table of taken transitions based on the input string and it's result.
"""
current_states = transitions_taken.copy()
for i, state in enumerate(current_states):
if (
state == initial_state and state in
final_states
):
current_states[i] = "→*" + state
elif state == initial_state:
current_states[i] = "→" + state
elif state in final_states:
current_states[i] = "*" + state
new_states = current_states.copy()
del current_states[-1]
del new_states[0]
inputs = [str(x) for x in input_str]
transition_steps: dict = {
"Current state:": current_states,
"Input symbol:": inputs,
"New state:": new_states,
}
transition_steps = pd.DataFrame.from_dict(
transition_steps
)
transition_steps.index += 1
transition_steps = pd.DataFrame.from_dict(
transition_steps
).rename_axis("Step:", axis=1)
if status:
transition_steps.columns = pd.MultiIndex.from_product(
[["[Accepted]"], transition_steps.columns]
)
return transition_steps
else:
transition_steps.columns = pd.MultiIndex.from_product(
[["[Rejected]"], transition_steps.columns]
)
return transition_steps
# -------------------------------------------------------------------------
# Define new features.
def input_check(
self, input_str: str, return_result=False
) -> Union[bool, list, list]:
"""
Checks if string of input symbols results in final state.
Args:
input_str (str): The input string to run on the DFA.
return_result (bool, optional): Returns results to the show_diagram method. Defaults to False.
Raises:
TypeError: To let the user know a string has to be entered.
Returns:
Union[bool, list, list]: If the last state is the final state, transition pairs, and steps taken.
"""
if not isinstance(input_str, str):
raise TypeError(f"input_str should be a string. {input_str} is {type(input_str)}, not a string.")
current_state = self.dfa.initial_state
transitions_taken = [current_state]
symbol_sequence: list = []
status: bool = True
for symbol in input_str:
symbol_sequence.append(symbol)
current_state = self.__get_next_current_state(
current_state, symbol
)
transitions_taken.append(current_state)
if transitions_taken[-1] not in self.dfa.final_states:
status = False
else:
status = True
taken_transitions_pairs = [
(a, b, c)
for a, b, c in zip(
transitions_taken, transitions_taken[1:], symbol_sequence
)
]
taken_steps = self.__transition_steps(
initial_state=self.dfa.initial_state,
final_states=self.dfa.final_states,
input_str=input_str,
transitions_taken=transitions_taken,
status=status,
)
if return_result:
return status, taken_transitions_pairs, taken_steps
else:
return taken_steps # .to_string(index=False)
def show_diagram(
self,
input_str: str = None,
filename: str = None,
format_type: str = "png",
path: str = None,
*,
view=False,
cleanup: bool = True,
horizontal: bool = True,
reverse_orientation: bool = False,
fig_size: tuple = (8, 8),
font_size: float = 14.0,
arrow_size: float = 0.85,
state_seperation: float = 0.5,
) -> Digraph:
"""
Generates the graph associated with the given DFA.
Args:
dfa (DFA): Deterministic Finite Automata to graph.
input_str (str, optional): String list of input symbols. Defaults to None.
filename (str, optional): Name of output file. Defaults to None.
format_type (str, optional): File format [svg/png/...]. Defaults to "png".
path (str, optional): Folder path for output file. Defaults to None.
view (bool, optional): Storing and displaying the graph as a pdf. Defaults to False.
cleanup (bool, optional): Garbage collection. Defaults to True.
horizontal (bool, optional): Direction of node layout. Defaults to True.
reverse_orientation (bool, optional): Reverse direction of node layout. Defaults to False.
fig_size (tuple, optional): Figure size. Defaults to (8, 8).
font_size (float, optional): Font size. Defaults to 14.0.
arrow_size (float, optional): Arrow head size. Defaults to 0.85.
state_seperation (float, optional): Node distance. Defaults to 0.5.
Returns:
Digraph: The graph in dot format.
"""
# Converting to graphviz preferred input type,
# keeping the conventional input styles; i.e fig_size(8,8)
fig_size = ", ".join(map(str, fig_size))
font_size = str(font_size)
arrow_size = str(arrow_size)
state_seperation = str(state_seperation)
# Defining the graph.
graph = Digraph(strict=False)
graph.attr(
size=fig_size,
ranksep=state_seperation,
)
if horizontal:
graph.attr(rankdir="LR")
if reverse_orientation:
if horizontal:
graph.attr(rankdir="RL")
else:
graph.attr(rankdir="BT")
# Defining arrow to indicate the initial state.
graph.node("Initial", label="", shape="point", fontsize=font_size)
# Defining all states.
for state in sorted(self.dfa.states):
if (
state in self.dfa.initial_state and state in
self.dfa.final_states
):
graph.node(state, shape="doublecircle", fontsize=font_size)
elif state in self.dfa.initial_state:
graph.node(state, shape="circle", fontsize=font_size)
elif state in self.dfa.final_states:
graph.node(state, shape="doublecircle", fontsize=font_size)
else:
graph.node(state, shape="circle", fontsize=font_size)
# Point initial arrow to the initial state.
graph.edge("Initial", self.dfa.initial_state, arrowsize=arrow_size)
# Define all tansitions in the finite state machine.
all_transitions_pairs = self.__transitions_pairs(self.dfa.transitions)
if input_str is None:
for pair in all_transitions_pairs:
graph.edge(
pair[0],
pair[1],
label=" {} ".format(pair[2]),
arrowsize=arrow_size,
fontsize=font_size,
)
status = None
else:
status, taken_transitions_pairs, taken_steps = self.input_check(
input_str=input_str, return_result=True
)
remaining_transitions_pairs = [
x
for x in all_transitions_pairs
if x not in taken_transitions_pairs
]
# Define color palette for transitions
if status:
start_color = hex_to_rgb_color("#FFFF00")
end_color = hex_to_rgb_color("#00FF00")
else:
start_color = hex_to_rgb_color("#FFFF00")
end_color = hex_to_rgb_color("#FF0000")
number_of_colors = len(input_str)
palette = create_palette(
start_color, end_color, number_of_colors, sRGBColor
)
color_gen = list_cycler(palette)
# Define all tansitions in the finite state machine with traversal.
counter = 0
for pair in taken_transitions_pairs:
counter += 1
edge_color = next(color_gen)
graph.edge(
pair[0],
pair[1],
label=" [{}]\n{} ".format(counter, pair[2]),
arrowsize=arrow_size,
fontsize=font_size,
color=edge_color,
penwidth="2.5",
)
for pair in remaining_transitions_pairs:
graph.edge(
pair[0],
pair[1],
label=" {} ".format(pair[2]),
arrowsize=arrow_size,
fontsize=font_size,
)
# Write diagram to file. PNG, SVG, etc.
if filename:
graph.render(
filename=filename,
format=format_type,
directory=path,
cleanup=cleanup,
)
if view:
graph.render(view=True)
if input_str:
display(taken_steps)
return graph
else:
return graph
# -------------------------------------------------------------------------
| 33.850505 | 109 | 0.554428 |
73c11c466365722c81ec3c0ea297381e5117af6c | 20,159 | py | Python | SimpleGP/simplega.py | mgraffg/simplegp | 7e4639e3ac76571a4e67669cad6e8e775b3fc345 | [
"Apache-2.0"
] | 2 | 2015-03-18T17:26:20.000Z | 2019-03-18T17:28:16.000Z | SimpleGP/simplega.py | mgraffg/simplegp | 7e4639e3ac76571a4e67669cad6e8e775b3fc345 | [
"Apache-2.0"
] | 1 | 2015-04-19T17:02:49.000Z | 2015-07-21T18:48:34.000Z | SimpleGP/simplega.py | mgraffg/simplegp | 7e4639e3ac76571a4e67669cad6e8e775b3fc345 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import types
import os
import signal
import inspect
class BestNotFound(Exception):
"""
This exception is raised when the run exit with an non-viable individual.
"""
pass
class SimpleGA(object):
"""
SimpleGA is a steady state genetic algorithm with tournament selection,
uniform crossover and mutation.
>>> import numpy as np
>>> from SimpleGP import SimpleGA
First let us create a simple regression problem
>>> _ = np.random.RandomState(0)
>>> x = np.linspace(0, 1, 100)
>>> pol = np.array([0.2, -0.3, 0.2])
>>> X = np.vstack((x**2, x, np.ones(x.shape[0]))).T
>>> f = (X * pol).sum(axis=1)
The objective is to find the coefficients 0.2, -0.3, and 0.2
>>> s = SimpleGA.init_cl().train(X, f)
>>> s.run()
True
The coefficients are:
>>> print s._p[s.best]
[ 0.10430681 -0.18460194 0.17084382]
"""
def __init__(self, popsize=1000, ppm=0.1, chromosome_length=3,
tournament_size=2, generations=50, seed=None, verbose=False,
pxo=0.9, pm=0.2, stats=False, fname_best=None,
test_set=None, test_set_y=None,
save_only_best=False,
walltime=None,
dtype=np.float,
ind_dtype=np.int):
self._popsize = popsize
self._ppm = 1 - ppm
self._tournament_size = tournament_size
self._generations = generations
self._pxo = pxo
self._pm = pm
self._verbose = verbose
self._chromosome_length = chromosome_length
self.gens_ind = popsize
self._dtype = dtype
self._ind_dtype = ind_dtype
self._timeout = False
self._stats = stats
if stats:
self.fit_per_gen = np.zeros(self._generations)
self.seed = seed
self._best_fit = None
self._best = None
self._fname_best = fname_best
self._run = True
self._last_call_to_stats = 0
self._p = None
if test_set is not None:
self.set_test(test_set, y=test_set_y)
else:
self._test_set = test_set
self._test_set_y = test_set_y
self._early_stopping = None
self._save_only_best = save_only_best
self._walltime = walltime
signal.signal(signal.SIGTERM, self.on_exit)
if self._walltime is not None:
signal.signal(signal.SIGALRM, self.walltime)
signal.alarm(self._walltime)
def early_stopping_save(self, k, fit_k=None):
"""
Storing the best so far on the validation set.
This funtion is called from early_stopping
"""
assert fit_k is not None
self._early_stopping = [fit_k,
self.population[k].copy()]
@property
def early_stopping(self):
"""
Stores the best individual on the test_set
"""
return self._early_stopping
def fitness_validation(self, k):
"""
Fitness function used in the validation set.
In this case it is the one used on the evolution
"""
cnt = self._test_set_y.shape[0]
fit_k = -self.distance(self._test_set_y,
self._pr_test_set[:cnt])
return fit_k
@early_stopping.setter
def early_stopping(self, k):
if self._test_set_y is None:
return
fit_k = self.fitness_validation(k)
if self._early_stopping is None or fit_k > self._early_stopping[0]:
self.early_stopping_save(k, fit_k=fit_k)
self._early_stopping_gens_ind = self.gens_ind
@property
def seed(self):
"""
Seed used
"""
return self._seed
@seed.setter
def seed(self, v):
self._seed = v
if v is not None:
np.random.seed(self._seed)
@property
def population(self):
"""
Population
"""
return self._p
@property
def popsize(self):
"""Population size"""
return self._popsize
@popsize.setter
def popsize(self, popsize):
"""
Set the population size, it handles the case where the new
population size is smaller or larger than the current one
"""
if self._popsize == popsize:
return
if self._popsize > popsize:
index = self._fitness.argsort()[::-1][:popsize]
self._p = self._p[index]
self._fitness = self._fitness[index]
else:
d = popsize - self._popsize
cl = self._chromosome_length
self._p.resize((popsize, cl))
self._p[self._popsize:] = self.random_ind(size=((d, cl)))
self._popsize = popsize
@property
def generations(self):
"""Number of generations"""
return self._generations
@generations.setter
def generations(self, v):
self._generations = v
@classmethod
def _get_param_names(cls):
"""Get parameter names for the class"""
args = inspect.getargspec(cls.__init__)[0]
args.pop(0)
return args
@classmethod
def _bases(cls):
"""Get class' hierarchy"""
lst = list(cls.__bases__)
l = []
while len(lst):
k = lst.pop()
if k == object:
continue
lst += k.__bases__
l.append(k)
return l
def get_params(self, deep=True):
"""
Parameters and their values.
"""
params = {}
keys = set()
if deep:
for ins in self._bases():
keys.update(ins._get_param_names())
keys.update(self._get_param_names())
for key in keys:
params[key] = getattr(self, "_" + key)
return params
def init(self):
"""
Setting some variables to the defaults values
"""
self.gens_ind = self.popsize
self._run = True
self._last_call_to_stats = 0
self._best_fit = None
self._best = None
def walltime(self, *args, **kwargs):
"""
This method is called when the maximum number of seconds is reached.
"""
self.on_exit(*args, **kwargs)
self._timeout = True
def on_exit(self, *args, **kwargs):
"""
Method called at the end of the evolutionary process or when a
signal is received
"""
self.save()
self._run = False
def set_test(self, x, y=None):
"""
x is the set test, this is used to test, during the evolution, that
the best individual does not produce nan or inf
"""
self._test_set = x.astype(self._dtype, copy=False, order='C')
if y is not None:
self._test_set_y = y.astype(self._dtype, copy=False, order='C')
def fit(self, x, f, test=None, test_y=None, **kwargs):
"""
Fitting the model with x as inputs and f as outputs.
"""
self.train(x, f, **kwargs)
if test is not None:
self.set_test(test, y=test_y)
self.run()
return self
def train(self, x, f):
"""
This is to set the training set.
x and f are copy only if their types are not dtype
"""
self._x = x.astype(self._dtype, copy=False, order='C')
self._f = f.astype(self._dtype, copy=False, order='C')
return self
def crossover(self, father1, father2):
"""
crossover performs an uniform crossover
"""
mask = np.random.binomial(1, 0.5, self._p.shape[1]).astype(np.bool)
return father1 * mask + father2 * ~mask
def random_ind(self, size=None):
"""
Create a random individual
"""
size = size if size is not None else self._p.shape[1]
return np.random.uniform(-1, 1, size)
def mutation(self, father1):
"""
Mutation performs an uniform mutation with point mutation probability
set by ppm
"""
father2 = self.random_ind()
mask = np.random.binomial(1, self._ppm,
self._p.shape[1]).astype(np.bool)
return father1 * mask + father2 * ~mask
def selection(self, *args, **kwargs):
"""
Select a individual from the population.
"""
return self.tournament(*args)
def tournament(self, neg=False):
"""
Tournament selection, it also performs negative tournament selection if
neg=True
"""
if not neg:
func_cmp = lambda x, y: x < y
else:
func_cmp = lambda x, y: x > y
best = np.random.randint(self._popsize) if self._popsize > 2 else 0
for i in range(self._tournament_size-1):
comp = np.random.randint(self._popsize) if self._popsize > 2 else 1
while comp == best:
comp = np.random.randint(self._popsize)
if func_cmp(self.fitness(best), self.fitness(comp)):
best = comp
return best
def load_extras(self, fpt):
pass
def load_prev_run(self):
"""
Method used to load a previous run. It returns False if fails
"""
import gzip
def load(ftp):
self._p = np.load(fpt)
self._fitness = np.load(fpt)
self.gens_ind = np.load(fpt)
a = self._p
m = np.all(np.isfinite(a), axis=1)
if (~m).sum():
tmp = self.random_ind(size=((~m).sum(),
self._chromosome_length))
a[~m] = tmp
self._fitness[~m] = -np.inf
self.best = self._fitness.argmax()
if self._stats:
self.fit_per_gen = np.load(fpt)
self.load_extras(fpt)
try:
if self._fname_best.count('.gz'):
with gzip.open(self._fname_best, 'rb') as fpt:
load(fpt)
else:
with open(self._fname_best, 'rb') as fpt:
load(fpt)
if self._p.ndim == 2 and self._p.shape[0] == self._popsize \
and self._p.shape[1] == self._chromosome_length:
return True
except IOError:
pass
return False
def create_population(self):
"""
Create the initial population. It first called load_prev_run if
this method returns False then it creates the population.
create_population returns True if the population was created and
False if it was loaded from a previous run
"""
if self._fname_best is not None \
and os.path.isfile(self._fname_best) \
and self.load_prev_run():
return False
if self._p is not None:
return False
self._p = self.random_ind(size=(self._popsize,
self._chromosome_length))
self._fitness = np.zeros(self._popsize)
self._fitness[:] = -np.inf
return True
def eval(self, ind):
"""
Evaluate a individual it receives the actual individual, i.e., the
chromosomes
"""
return (self._x * ind).sum(axis=1)
def predict_test_set(self, ind):
"""Predicting the test set"""
return self.predict(self._test_set, ind)
def predict(self, X, ind=None):
"""
Outputs the evaluation of the (ind)-th individual when the
features are X
"""
if ind is None:
ind = self.best
return (X * self._p[ind]).sum(axis=1)
def distance(self, y, hy):
"""
Sum of squares errors
"""
return ((y - hy)**2).mean()
def fitness(self, ind):
"""
Computes the fitness of ind. If ind is an integer, then it
computes the fitness of the (ind)-th individual only if it has
not been previously computed.
"""
k = ind
if isinstance(ind, types.IntType):
if self._fitness[k] > -np.inf:
return self._fitness[k]
ind = self._p[ind]
f = self.eval(ind)
f = - self.distance(self._f, f)
if np.isnan(f):
f = -np.inf
if isinstance(k, types.IntType):
self._fitness[k] = f
self.new_best(k)
f = self._fitness[k]
return f
@property
def best(self):
"""
Get the position of the best individual
"""
if self._best is None:
raise BestNotFound()
return self._best
def get_best(self):
return self.best
def test_f(self, x):
"""This method test whether the prediction is valid. It is called from
new_best. Returns True when x is a valid prediction
"""
return ((not np.any(np.isnan(x))) and
(not np.any(np.isinf(x))))
@best.setter
def best(self, k):
"""Returns the best so far (the position in the population). It raises
an exception if the best has not been set
"""
return self.new_best(k)
def new_best_comparison(self, k):
"""
This function is called from new_best
"""
f = self._fitness[k]
return self._best_fit is None or self._best_fit < f
def new_best(self, k):
"""
This method is called to test whether the best so far is beaten by k.
Here is verified that the best individual is capable of
predicting the test set, in the case it is given.
"""
if self.new_best_comparison(k):
if self._test_set is not None:
# x = self._test_set
r = self.predict_test_set(k)
if not self.test_f(r):
self._fitness[k] = -np.inf
return False
self._pr_test_set = r
self.early_stopping = k
self._best_fit = self._fitness[k]
self._best = k
return True
return False
def pre_crossover(self, father1, father2):
"""
This function is call before calling crossover, the idea
is to test that the fathers are different.
It returns True when the fathers are different
"""
return not (father1 == father2)
def genetic_operators(self):
"""
Perform the genetic operations.
"""
son = None
if np.random.rand() < self._pxo:
father1 = self.tournament()
father2 = self.tournament()
while not self.pre_crossover(father1, father2):
father2 = self.tournament()
son = self.crossover(self._p[father1], self._p[father2])
if np.random.rand() < self._pm:
son = son if son is not None else self._p[self.tournament()]
son = self.mutation(son)
son = son if son is not None else self.random_ind()
return son
def kill_ind(self, kill, son):
"""
Replace the (kill)-th individual with son
"""
if self._best == kill:
raise BestNotFound("Killing the best so far!")
self._p[kill] = son
self._fitness[kill] = -np.inf
def stats(self):
"""This function is call every time an offspring is created. The
original idea is to print only statistics of the evolutionary process;
however, at this stage is also used to verify the memory in GPPDE.
This function is executed at the end of each generation and it returns
False if this is not the case, otherwise returns True.
"""
i = self.gens_ind
if i - self._last_call_to_stats < self._popsize:
return False
self._last_call_to_stats = i
if self._best is not None:
if self._stats:
self.fit_per_gen[i/self._popsize] = self._fitness[self.best]
if self._verbose:
print "Gen: " + str(i) + "/" + str(self._generations * self._popsize)\
+ " " + "%0.4f" % self._fitness[self.best]
return True
def run(self, exit_call=True):
"""
Steady state genetic algorithm. Returns True if the evolution
ended because the number of evaluations is reached. It returns False
if it receives a signal or finds a perfect solution.
The flag self._run is used to stop the evolution.
"""
self.create_population()
while (not self._timeout and
self.gens_ind < self._generations*self._popsize and self._run):
try:
self.stats()
son = self.genetic_operators()
kill = self.tournament(neg=True)
while kill == self._best:
kill = self.tournament(neg=True)
self._kill_ind = kill
self.kill_ind(kill, son)
self.gens_ind += 1
except KeyboardInterrupt:
if exit_call:
self.on_exit()
return False
self.stats()
flag = True
if not self._run:
flag = False
if exit_call:
self.on_exit()
return flag
def clear_population_except_best(self):
bs = self.best
mask = np.ones(self.popsize, dtype=np.bool)
mask[bs] = False
self.population[mask] = None
self._fitness[mask] = -np.inf
return mask
def save_extras(self, fpt):
pass
def save(self, fname=None):
"""
Save the population to fname if fname is None the save in
self._fname_best. If both are None then it does nothing.
"""
import gzip
def save_inner(fpt):
np.save(fpt, self._p)
np.save(fpt, self._fitness)
np.save(fpt, self.gens_ind)
if self._stats:
np.save(fpt, self.fit_per_gen)
self.save_extras(fpt)
fname = fname if fname is not None else self._fname_best
if fname is None:
return False
if self._save_only_best:
self.clear_population_except_best()
if fname.count('.gz'):
raise NotImplementedError('There is a bug here')
with gzip.open(fname, 'wb') as fpt:
save_inner(fpt)
else:
with open(fname, 'wb') as fpt:
save_inner(fpt)
return True
@classmethod
def init_cl(cls, generations=10000,
popsize=3, pm=0.1, pxo=0.9, seed=0,
**kwargs):
"""
Create a new instance of the class.
"""
ins = cls(generations=generations,
popsize=popsize,
seed=seed,
pxo=pxo,
pm=pm,
**kwargs)
return ins
@classmethod
def run_cl(cls, x, f, test=None,
**kwargs):
"""
Returns a trained system that does not output nan or inf neither
in the training set (i.e., x) or test set (i.e., test).
"""
ins = cls.init_cl(**kwargs).train(x, f)
if test is not None:
ins.set_test(test)
ins.run()
if ins._best is None:
raise BestNotFound()
return ins
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31.205882 | 86 | 0.550226 |
73c13404cbd29a6a00af8835804620358d33b49e | 4,788 | py | Python | test/test_task_short_read_assembler.py | michaelbarton/nucleotides-cli | 04c94773a9186dc67a887e91e3cdc9ba4a41d3fc | [
"BSD-3-Clause-LBNL"
] | null | null | null | test/test_task_short_read_assembler.py | michaelbarton/nucleotides-cli | 04c94773a9186dc67a887e91e3cdc9ba4a41d3fc | [
"BSD-3-Clause-LBNL"
] | null | null | null | test/test_task_short_read_assembler.py | michaelbarton/nucleotides-cli | 04c94773a9186dc67a887e91e3cdc9ba4a41d3fc | [
"BSD-3-Clause-LBNL"
] | null | null | null | import os.path, docker, funcy
import nose.tools as nose
import biobox.util as docker
import biobox.container as container
import helper.application as app_helper
import helper.file as file_helper
import helper.image as image_helper
import nucleotides.filesystem as fs
import nucleotides.command.run_image as run
import nucleotides.command.post_data as post
from nucleotides.task.short_read_assembler import ShortReadAssemblerTask as task
from nose.plugins.attrib import attr
def test_create_container():
app = app_helper.setup_app_state('sra', 'inputs')
cnt = run.create_container(app)
assert "Id" in cnt
image_helper.clean_up_container(cnt["Id"])
def test_run_container():
app = app_helper.setup_app_state('sra', 'inputs')
id_ = run.create_container(app)['Id']
docker.client().start(id_)
docker.client().wait(id_)
nose.assert_equal(container.did_exit_succcessfully(id_), True)
image_helper.clean_up_container(id_)
def test_output_file_paths():
app = app_helper.setup_app_state('sra', 'intermediates')
paths = task().output_file_paths(app)
for (_, f) in paths.items():
location = fs.get_task_file_path(app, "tmp/" + f)
nose.assert_true(os.path.isfile(location))
def test_copy_output_files():
app = app_helper.setup_app_state('sra', 'intermediates')
run.copy_output_files(app)
file_helper.assert_is_file(fs.get_task_file_path(app, 'outputs/container_log/e0e8af3790'))
file_helper.assert_is_file(fs.get_task_file_path(app, 'outputs/contig_fasta/de3d9f6d31'))
def test_complete_run_through():
app = app_helper.setup_app_state('sra', 'inputs')
image_helper.execute_image(app)
file_helper.assert_is_file(fs.get_task_file_path(app, 'outputs/contig_fasta/01eb7cec61'))
file_helper.assert_is_file(fs.get_task_file_path(app, 'outputs/container_runtime_metrics/metrics.json.gz'))
file_helper.assert_is_file(fs.get_task_file_path(app, 'outputs/container_log/1099992390'))
############################################
#
# Posting results
#
############################################
def test_short_read_assembler_successful_event_with_cgroup_data():
app = app_helper.setup_app_state('sra', 'outputs')
outputs = [{
"type" : "contig_fasta",
"location" : "/local/path",
"sha256" : "digest_1",
"url" : "s3://url/dir/file"}]
event = post.create_event_request(app, outputs)
nose.assert_equal({
"task" : 4,
"success" : True,
"metrics" : {
"total_cpu_usage_in_seconds" : 53.546,
"total_cpu_usage_in_seconds_in_kernelmode" : 1.75,
"total_cpu_usage_in_seconds_in_usermode" : 11.11,
"total_memory_usage_in_mibibytes" : 175.348,
"total_rss_in_mibibytes" : 80.543,
"total_read_io_in_mibibytes" : 38.641,
"total_write_io_in_mibibytes" : 0.0,
"total_wall_clock_time_in_seconds" : 0.0},
"files" : [
{"url" : "s3://url/dir/file",
"sha256" : "digest_1",
"type" : "contig_fasta"}]}, event)
def test_short_read_assembler_successful_event_with_incomplete_cgroup_data():
app = app_helper.setup_app_state('sra', 'incomplete_cgroup')
outputs = [{
"type" : "contig_fasta",
"location" : "/local/path",
"sha256" : "digest_1",
"url" : "s3://url/dir/file"}]
event = post.create_event_request(app, outputs)
nose.assert_equal(event['metrics']['total_rss_in_mibibytes'], 0.0)
def test_short_read_assembler_successful_event_without_cgroup_data():
"""
It is possible that an assembler could finish before the first set of cgroup
metrics are collected. In this case, we would not want the task to be considered
failed as long as contig files have been produced.
"""
app = app_helper.setup_app_state('sra', 'missing_cgroup')
outputs = [{
"type" : "contig_fasta",
"location" : "/local/path",
"sha256" : "digest_1",
"url" : "s3://url/dir/file"}]
event = post.create_event_request(app, outputs)
nose.assert_equal({
"task" : 4,
"success" : True,
"metrics" : {},
"files" : [
{"url" : "s3://url/dir/file",
"sha256" : "digest_1",
"type" : "contig_fasta"}]}, event)
def test_short_read_assembler_unsuccessful_event():
app = app_helper.setup_app_state('sra', 'task')
outputs = []
event = post.create_event_request(app, outputs)
nose.assert_equal(event, {"task" : 4, "success" : False, "files" : [], "metrics" : {}})
| 37.700787 | 111 | 0.637845 |
73c192667eb0fcd644028cec3f9ff001cfb9e111 | 125 | py | Python | states/state_balance.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | states/state_balance.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | states/state_balance.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | from aiogram.dispatcher.filters.state import StatesGroup, State
class Balancestate(StatesGroup):
balances_menu = State() | 31.25 | 63 | 0.808 |
73c1a62a25d281b0236a02d52580d0a848e3d1ec | 8,927 | py | Python | tests/test_pyscript.py | rpavani1998/cmd2 | 77d9015986bca909aae9181e2d72d0d835aeaa09 | [
"MIT"
] | null | null | null | tests/test_pyscript.py | rpavani1998/cmd2 | 77d9015986bca909aae9181e2d72d0d835aeaa09 | [
"MIT"
] | null | null | null | tests/test_pyscript.py | rpavani1998/cmd2 | 77d9015986bca909aae9181e2d72d0d835aeaa09 | [
"MIT"
] | null | null | null | """
Unit/functional testing for argparse completer in cmd2
Copyright 2018 Eric Lin <anselor@gmail.com>
Released under MIT license, see LICENSE file
"""
import os
import pytest
from cmd2.cmd2 import Cmd, with_argparser
from cmd2 import argparse_completer
from .conftest import run_cmd, StdOut
from cmd2.utils import namedtuple_with_defaults
class PyscriptExample(Cmd):
ratings_types = ['G', 'PG', 'PG-13', 'R', 'NC-17']
def _do_media_movies(self, args) -> None:
if not args.command:
self.do_help('media movies')
else:
print('media movies ' + str(args.__dict__))
def _do_media_shows(self, args) -> None:
if not args.command:
self.do_help('media shows')
if not args.command:
self.do_help('media shows')
else:
print('media shows ' + str(args.__dict__))
media_parser = argparse_completer.ACArgumentParser(prog='media')
media_types_subparsers = media_parser.add_subparsers(title='Media Types', dest='type')
movies_parser = media_types_subparsers.add_parser('movies')
movies_parser.set_defaults(func=_do_media_movies)
movies_commands_subparsers = movies_parser.add_subparsers(title='Commands', dest='command')
movies_list_parser = movies_commands_subparsers.add_parser('list')
movies_list_parser.add_argument('-t', '--title', help='Title Filter')
movies_list_parser.add_argument('-r', '--rating', help='Rating Filter', nargs='+',
choices=ratings_types)
movies_list_parser.add_argument('-d', '--director', help='Director Filter')
movies_list_parser.add_argument('-a', '--actor', help='Actor Filter', action='append')
movies_add_parser = movies_commands_subparsers.add_parser('add')
movies_add_parser.add_argument('title', help='Movie Title')
movies_add_parser.add_argument('rating', help='Movie Rating', choices=ratings_types)
movies_add_parser.add_argument('-d', '--director', help='Director', nargs=(1, 2), required=True)
movies_add_parser.add_argument('actor', help='Actors', nargs='*')
movies_delete_parser = movies_commands_subparsers.add_parser('delete')
shows_parser = media_types_subparsers.add_parser('shows')
shows_parser.set_defaults(func=_do_media_shows)
shows_commands_subparsers = shows_parser.add_subparsers(title='Commands', dest='command')
shows_list_parser = shows_commands_subparsers.add_parser('list')
@with_argparser(media_parser)
def do_media(self, args):
"""Media management command demonstrates multiple layers of subcommands being handled by AutoCompleter"""
func = getattr(args, 'func', None)
if func is not None:
# Call whatever subcommand function was selected
func(self, args)
else:
# No subcommand was provided, so call help
self.do_help('media')
foo_parser = argparse_completer.ACArgumentParser(prog='foo')
foo_parser.add_argument('-c', dest='counter', action='count')
foo_parser.add_argument('-t', dest='trueval', action='store_true')
foo_parser.add_argument('-n', dest='constval', action='store_const', const=42)
foo_parser.add_argument('variable', nargs=(2, 3))
foo_parser.add_argument('optional', nargs='?')
foo_parser.add_argument('zeroormore', nargs='*')
@with_argparser(foo_parser)
def do_foo(self, args):
print('foo ' + str(args.__dict__))
if self._in_py:
FooResult = namedtuple_with_defaults('FooResult',
['counter', 'trueval', 'constval',
'variable', 'optional', 'zeroormore'])
self._last_result = FooResult(**{'counter': args.counter,
'trueval': args.trueval,
'constval': args.constval,
'variable': args.variable,
'optional': args.optional,
'zeroormore': args.zeroormore})
bar_parser = argparse_completer.ACArgumentParser(prog='bar')
bar_parser.add_argument('first')
bar_parser.add_argument('oneormore', nargs='+')
bar_parser.add_argument('-a', dest='aaa')
@with_argparser(bar_parser)
def do_bar(self, args):
out = 'bar '
arg_dict = args.__dict__
keys = list(arg_dict.keys())
keys.sort()
out += '{'
for key in keys:
out += "'{}':'{}'".format(key, arg_dict[key])
print(out)
@pytest.fixture
def ps_app():
c = PyscriptExample()
c.stdout = StdOut()
return c
class PyscriptCustomNameExample(Cmd):
def __init__(self):
super().__init__()
self.pyscript_name = 'custom'
def do_echo(self, out):
print(out)
@pytest.fixture
def ps_echo():
c = PyscriptCustomNameExample()
c.stdout = StdOut()
return c
@pytest.mark.parametrize('command, pyscript_file', [
('help', 'help.py'),
('help media', 'help_media.py'),
])
def test_pyscript_help(ps_app, capsys, request, command, pyscript_file):
test_dir = os.path.dirname(request.module.__file__)
python_script = os.path.join(test_dir, 'pyscript', pyscript_file)
expected = run_cmd(ps_app, command)
assert len(expected) > 0
assert len(expected[0]) > 0
out = run_cmd(ps_app, 'pyscript {}'.format(python_script))
assert len(out) > 0
assert out == expected
@pytest.mark.parametrize('command, pyscript_file', [
('media movies list', 'media_movies_list1.py'),
('media movies list', 'media_movies_list2.py'),
('media movies list', 'media_movies_list3.py'),
('media movies list -a "Mark Hamill"', 'media_movies_list4.py'),
('media movies list -a "Mark Hamill" -a "Carrie Fisher"', 'media_movies_list5.py'),
('media movies list -r PG', 'media_movies_list6.py'),
('media movies list -r PG PG-13', 'media_movies_list7.py'),
('media movies add "My Movie" PG-13 --director "George Lucas" "J. J. Abrams"',
'media_movies_add1.py'),
('media movies add "My Movie" PG-13 --director "George Lucas" "J. J. Abrams" "Mark Hamill"',
'media_movies_add2.py'),
('foo aaa bbb -ccc -t -n', 'foo1.py'),
('foo 11 22 33 44 -ccc -t -n', 'foo2.py'),
('foo 11 22 33 44 55 66 -ccc', 'foo3.py'),
('bar 11 22', 'bar1.py'),
])
def test_pyscript_out(ps_app, capsys, request, command, pyscript_file):
test_dir = os.path.dirname(request.module.__file__)
python_script = os.path.join(test_dir, 'pyscript', pyscript_file)
run_cmd(ps_app, command)
expected, _ = capsys.readouterr()
assert len(expected) > 0
run_cmd(ps_app, 'pyscript {}'.format(python_script))
out, _ = capsys.readouterr()
assert len(out) > 0
assert out == expected
@pytest.mark.parametrize('command, error', [
('app.noncommand', 'AttributeError'),
('app.media.noncommand', 'AttributeError'),
('app.media.movies.list(artist="Invalid Keyword")', 'TypeError'),
('app.foo(counter="a")', 'TypeError'),
('app.foo("aaa")', 'ValueError'),
])
def test_pyscript_errors(ps_app, capsys, command, error):
run_cmd(ps_app, 'py {}'.format(command))
_, err = capsys.readouterr()
assert len(err) > 0
assert 'Traceback' in err
assert error in err
@pytest.mark.parametrize('pyscript_file, exp_out', [
('foo4.py', 'Success'),
])
def test_pyscript_results(ps_app, capsys, request, pyscript_file, exp_out):
test_dir = os.path.dirname(request.module.__file__)
python_script = os.path.join(test_dir, 'pyscript', pyscript_file)
run_cmd(ps_app, 'pyscript {}'.format(python_script))
expected, _ = capsys.readouterr()
assert len(expected) > 0
assert exp_out in expected
@pytest.mark.parametrize('expected, pyscript_file', [
("['_relative_load', 'alias', 'bar', 'cmd_echo', 'edit', 'eof', 'eos', 'foo', 'help', 'history', 'load', 'media', 'py', 'pyscript', 'quit', 'set', 'shell', 'shortcuts', 'unalias']",
'pyscript_dir1.py'),
("['movies', 'shows']", 'pyscript_dir2.py')
])
def test_pyscript_dir(ps_app, capsys, request, expected, pyscript_file):
test_dir = os.path.dirname(request.module.__file__)
python_script = os.path.join(test_dir, 'pyscript', pyscript_file)
run_cmd(ps_app, 'pyscript {}'.format(python_script))
out, _ = capsys.readouterr()
out = out.strip()
assert len(out) > 0
assert out == expected
def test_pyscript_custom_name(ps_echo, capsys, request):
message = 'blah!'
test_dir = os.path.dirname(request.module.__file__)
python_script = os.path.join(test_dir, 'pyscript', 'custom_echo.py')
run_cmd(ps_echo, 'pyscript {}'.format(python_script))
expected, _ = capsys.readouterr()
assert len(expected) > 0
expected = expected.splitlines()
assert message == expected[0]
| 37.041494 | 185 | 0.646802 |
73c1aacf6cb41161e7ed44dd3ac9484b85dc4770 | 17,767 | py | Python | arkane/logs/terachem.py | yubioinfo/RMG-Py | bfffc5b650934fe7ced117939d6b3b219f2317e5 | [
"MIT"
] | 1 | 2020-03-17T13:16:51.000Z | 2020-03-17T13:16:51.000Z | arkane/logs/terachem.py | yubioinfo/RMG-Py | bfffc5b650934fe7ced117939d6b3b219f2317e5 | [
"MIT"
] | null | null | null | arkane/logs/terachem.py | yubioinfo/RMG-Py | bfffc5b650934fe7ced117939d6b3b219f2317e5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
Arkane TeraChem module
Used to parse TeraChem output files
"""
import logging
import math
import os.path
import numpy as np
import rmgpy.constants as constants
from rmgpy.statmech import HarmonicOscillator, Conformer
from arkane.common import check_conformer_energy, get_element_mass, symbol_by_number
from arkane.exceptions import LogError
from arkane.logs.log import Log
################################################################################
class TeraChemLog(Log):
"""
Represent a log file from TeraChem. The attribute `path` refers to the
location on disk of the TeraChem log file of interest. Methods are provided
to extract a variety of information into Arkane classes and/or NumPy arrays.
"""
def __init__(self, path):
super(TeraChemLog, self).__init__(path)
def get_number_of_atoms(self):
"""
Return the number of atoms in the molecular configuration used in the TeraChem output file.
Accepted output files: TeraChem's log file, xyz format file, TeraChem's output.geometry file.
"""
n_atoms = 0
with open(self.path, 'r') as f:
file_extension = os.path.splitext(self.path)[1]
if file_extension == '.xyz':
n_atoms = int(f.readline())
else:
line = f.readline()
while line and n_atoms == 0:
if 'Total atoms:' in line:
n_atoms = int(line.split()[-1])
elif '****** QM coordinates ******' in line \
or 'Type X Y Z Mass' in line:
line = f.readline()
while line != '\n':
n_atoms += 1
line = f.readline()
line = f.readline()
return n_atoms
def load_force_constant_matrix(self):
"""
Return the force constant matrix (in Cartesian coordinates) from the
TeraChem log file. If multiple such matrices are identified,
only the last is returned. The units of the returned force constants
are J/m^2. If no force constant matrix can be found in the log file, ``None`` is returned.
"""
force = None
n_atoms = self.get_number_of_atoms()
n_rows = n_atoms * 3
with open(self.path, 'r') as f:
line = f.readline()
while line != '':
# Read force constant matrix
if '*** Hessian Matrix (Hartree/Bohr^2) ***' in line:
force = np.zeros((n_rows, n_rows), np.float64)
for i in range(int(math.ceil(n_rows / 6.0))):
# Matrix element rows
for j in range(n_rows):
line = f.readline()
while len(line.split()) not in [4, 7]:
# This is a header row
line = f.readline()
data = line.split()
for k in range(len(data) - 1):
force[j, i * 6 + k] = float(data[k + 1])
# Convert from atomic units (Hartree/Bohr^2) to SI (J/m^2)
force *= 4.35974417e-18 / 5.291772108e-11 ** 2
line = f.readline()
return force
def load_geometry(self):
"""
Return the optimum geometry of the molecular configuration from the
TeraChem log file. If multiple such geometries are identified, only the
last is returned.
"""
coords, numbers, masses = list(), list(), list()
with open(self.path) as f:
lines = f.readlines()
num_of_atoms = None # used to verify the result
if os.path.splitext(self.path)[1] == '.xyz':
skip_line = False
for line in lines:
if not skip_line and line.rstrip():
if len(line.split()) == 1 and line[0].isdigit():
num_of_atoms = int(line.rstrip())
skip_line = True # the next line is just a comment, skip it
continue
splits = line.split()
coords.append([float(c) for c in splits[1:]])
mass, num = get_element_mass(splits[0])
masses.append(mass)
numbers.append(num)
if skip_line:
skip_line = False
coords, numbers, masses = list(), list(), list()
else:
for i, line in enumerate(lines):
if 'Type X Y Z Mass' in line:
# this is an output.geometry file
j = i + 1
while lines[j].strip():
# example: ' C 0.6640965100 0.0039526500 0.0710079300 12.0000000000'
# or: ' C 0.512276 -0.516064 0.779232'
splits = lines[j].split()
coords.append([float(c) for c in splits[1:-1]])
masses.append(float(splits[-1]))
numbers.append(list(symbol_by_number.keys())[list(symbol_by_number.values()).index(splits[0])])
j += 1
break
if '*** Reference Geometry ***' in line:
# this is an output.out file, e.g., from a freq run
j = i + 2
while lines[j].strip():
# example: ' C 0.512276 -0.516064 0.779232'
splits = lines[j].split()
coords.append([float(c) for c in splits[1:]])
mass, num = get_element_mass(splits[0])
masses.append(mass)
numbers.append(num)
j += 1
break
coords = np.array(coords, np.float64)
numbers = np.array(numbers, np.int)
masses = np.array(masses, np.float64)
if len(coords) == 0 or len(numbers) == 0 or len(masses) == 0 \
or ((len(coords) != num_of_atoms or len(numbers) != num_of_atoms or len(masses) != num_of_atoms)
and num_of_atoms is not None):
raise LogError(f'Unable to read atoms from TeraChem geometry output file {self.path}. '
f'If this is a TeraChem optimization log file, try using either the '
f'frequencies calculation log file (important if torsion modes exist) or '
f'the "output.geometry" or a ".xyz" file instead.')
return coords, numbers, masses
def load_conformer(self, symmetry=None, spin_multiplicity=0, optical_isomers=None, label=''):
"""
Load the molecular degree of freedom data from an output file created as the result of a
TeraChem "Freq" calculation. As TeraChem's guess of the external symmetry number might not always correct,
you can use the `symmetry` parameter to substitute your own value;
if not provided, the value in the TeraChem output file will be adopted.
"""
modes, unscaled_freqs = list(), list()
converged = False
if optical_isomers is None:
_optical_isomers = self.get_symmetry_properties()[0]
if optical_isomers is None:
optical_isomers = _optical_isomers
with open(self.path, 'r') as f:
line = f.readline()
while line != '':
# Read spin multiplicity if not explicitly given
if 'Spin multiplicity' in line and spin_multiplicity == 0 and len(line.split()) == 3:
spin_multiplicity = int(float(line.split()[-1]))
logging.debug(f'Conformer {label} is assigned a spin multiplicity of {spin_multiplicity}')
# Read vibrational modes
elif 'Mode Eigenvalue(AU) Frequency(cm-1)' in line:
line = f.readline()
while line != '\n':
# example:
# 'Mode Eigenvalue(AU) Frequency(cm-1) Intensity(km/mol) Vib.Temp(K) ZPE(AU) ...'
# ' 1 0.0331810528 170.5666870932 52.2294230772 245.3982965841 0.0003885795 ...'
if 'i' not in line.split()[2]:
# only consider non-imaginary frequencies in this function
unscaled_freqs.append(float(line.split()[2]))
line = f.readline()
if 'Vibrational Frequencies/Thermochemical Analysis' in line:
converged = True
line = f.readline()
if not len(unscaled_freqs):
raise LogError(f'Could not read frequencies from TeraChem log file {self.path}')
if not converged:
raise LogError(f'TeraChem job {self.path} did not converge.')
modes.append(HarmonicOscillator(frequencies=(unscaled_freqs, "cm^-1")))
return Conformer(E0=(0.0, "kJ/mol"), modes=modes, spin_multiplicity=spin_multiplicity,
optical_isomers=optical_isomers), unscaled_freqs
def load_energy(self, zpe_scale_factor=1.):
"""
Load the energy in J/mol from a TeraChem log file. Only the last energy
in the file is returned, unless the log file represents a frequencies calculation,
in which case the first energy is returned. The zero-point energy is *not* included
in the returned value.
"""
e_elect, return_first = None, False
with open(self.path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'FREQUENCY ANALYSIS' in line:
return_first = True
if 'Ground state energy (a.u.):' in line:
e_elect = float(lines[i + 1].strip())
if return_first:
break
if 'FINAL ENERGY:' in line:
# example: 'FINAL ENERGY: -114.5008455547 a.u.'
e_elect = float(line.split()[2])
if return_first:
break
if e_elect is None:
raise LogError(f'Unable to find energy in TeraChem output file {self.path}.')
return e_elect * constants.E_h * constants.Na
def load_zero_point_energy(self):
"""
Load the unscaled zero-point energy in J/mol from a TeraChem log file.
"""
zpe = None
with open(self.path, 'r') as f:
for line in f:
if 'Vibrational zero-point energy (ZPE)' in line:
# example:
# 'Vibrational zero-point energy (ZPE) = 243113.467652369843563065 J/mol = 0.09259703 AU'
zpe = float(line.split('J/mol')[0].split()[-1])
logging.debug(f'ZPE is {zpe}')
if zpe is not None:
return zpe
else:
raise LogError(f'Unable to find zero-point energy in TeraChem output file {self.path}.')
def load_scan_energies(self):
"""
Extract the optimized energies in J/mol from a TeraChem torsional scan log file.
"""
v_list = list()
with open(self.path, 'r') as f:
lines = f.readlines()
v_index, expected_num_of_points = 0, 0
for line in lines:
if 'Scan Cycle' in line:
# example: '-=#=- Scan Cycle 5/37 -=#=-'
v_index += 1
if not expected_num_of_points:
expected_num_of_points = int(line.split()[3].split('/')[1])
if 'Optimized Energy:' in line:
# example: '-=#=- Optimized Energy: -155.0315243910 a.u.'
v = float(line.split()[3])
if len(v_list) == v_index - 1:
# append this point, it is in order
v_list.append(v)
elif len(v_list) < v_index - 1:
# seems like points in this scan are missing... add None's instead,
# later they'll be removed along with the corresponding angles
v_list.extend([None] * (v_index - 1 - len(v_list)))
else:
# we added more points that we should have, something is wrong with the log file or this method
raise LogError(f'Could not parse scan energies from {self.path}')
logging.info(' Assuming {0} is the output from a TeraChem PES scan...'.format(os.path.basename(self.path)))
v_list = np.array(v_list, np.float64)
# check to see if the scanlog indicates that one of the reacting species may not be the lowest energy conformer
check_conformer_energy(v_list, self.path)
# Adjust energies to be relative to minimum energy conformer
# Also convert units from Hartree/particle to J/mol
v_list -= np.min(v_list)
v_list *= constants.E_h * constants.Na
angles = np.arange(0.0, 2 * math.pi + 0.00001, 2 * math.pi / (len(v_list) - 1), np.float64)
# remove None's:
indices_to_pop = [v_list.index[entry] for entry in v_list if entry is None]
for i in reversed(indices_to_pop):
v_list.pop(i)
angles.pop(i)
if v_index != expected_num_of_points:
raise LogError(f'Expected to find {expected_num_of_points} scan points in TeraChem scan log file '
f'{self.path}, but found: {v_index}')
return v_list, angles
def load_negative_frequency(self):
"""
Return the imaginary frequency from a transition state frequency
calculation in cm^-1.
"""
frequency = None
with open(self.path, 'r') as f:
line = f.readline()
while line != '':
# Read vibrational modes
if 'Mode Eigenvalue(AU) Frequency(cm-1)' in line:
line = f.readline()
# example:
# 'Mode Eigenvalue(AU) Frequency(cm-1) Intensity(km/mol) Vib.Temp(K) ZPE(AU) ...'
# ' 1 0.0331810528 170.5666870932i 52.2294230772 245.3982965841 0.0003885795 ...'
frequency = -1 * float(line.split()[2][:-1]) # remove 'i'
break
f.readline()
if frequency is None:
raise LogError(f'Unable to find imaginary frequency in TeraChem output file {self.path}.')
return frequency
def load_scan_pivot_atoms(self):
"""Not implemented for TeraChem"""
raise NotImplementedError('The load_scan_pivot_atoms method is not implemented for TeraChem Logs')
def load_scan_frozen_atoms(self):
"""Not implemented for TeraChem"""
raise NotImplementedError('The load_scan_frozen_atoms method is not implemented for TeraChem Logs')
def get_D1_diagnostic(self):
"""Not implemented for TeraChem"""
raise NotImplementedError('The get_D1_diagnostic method is not implemented for TeraChem Logs')
def get_T1_diagnostic(self):
"""Not implemented for TeraChem"""
raise NotImplementedError('The get_T1_diagnostic method is not implemented for TeraChem Logs')
| 49.216066 | 119 | 0.522542 |
73c1b03c5cb3262cea1378c78d355827e450160b | 2,215 | py | Python | python/sdk/merlin/validation.py | Omrisnyk/merlin | cc2dbeabe52ac6e413db7f7647ed54c7edb7695f | [
"Apache-2.0"
] | 97 | 2020-10-15T08:03:56.000Z | 2022-03-31T22:30:59.000Z | python/sdk/merlin/validation.py | babywyrm/merlin | 29f669ab613d6808d0186067b948496b508caa96 | [
"Apache-2.0"
] | 91 | 2020-10-26T03:15:27.000Z | 2022-03-31T10:19:55.000Z | python/sdk/merlin/validation.py | babywyrm/merlin | 29f669ab613d6808d0186067b948496b508caa96 | [
"Apache-2.0"
] | 26 | 2020-10-21T03:53:36.000Z | 2022-03-16T06:43:15.000Z | # Copyright 2020 The Merlin Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import listdir
from os.path import isdir
def validate_model_dir(input_model_type, target_model_type, model_dir):
"""
Validates user-provided model directory based on file structure.
For tensorflow models, checking is only done on the subdirectory
with the largest version number.
:param input_model_type: type of given model
:param target_model_type: type of supposed model, dependent on log_<model type>(...)
:param model_dir: directory containing serialised model file
"""
from merlin.model import ModelType
if target_model_type == None and input_model_type == ModelType.TENSORFLOW:
path_isdir = [isdir(f'{model_dir}/{path}') for path in listdir(model_dir)]
if len(listdir(model_dir)) > 0 and all(path_isdir):
model_dir = f'{model_dir}/{sorted(listdir(model_dir))[-1]}'
if input_model_type != ModelType.PYFUNC and input_model_type != ModelType.PYFUNC_V2 and input_model_type != ModelType.CUSTOM:
file_structure_reqs_map = {
ModelType.XGBOOST: ['model.bst'],
ModelType.TENSORFLOW: ['saved_model.pb', 'variables'],
ModelType.SKLEARN: ['model.joblib'],
ModelType.PYTORCH: ['model.pt'],
ModelType.ONNX: ['model.onnx']
}
input_structure = listdir(model_dir)
file_structure_req = file_structure_reqs_map[input_model_type]
if not all([req in input_structure for req in file_structure_req]):
raise Exception(
f"Provided {input_model_type.name} model directory should contain all of the following: {file_structure_req}")
| 45.204082 | 129 | 0.710609 |
73c1d5a6aab4fb18848c8b22cfcc25cb949f0429 | 351 | py | Python | pfstratsim/triggers/trigger_interface.py | aarondorffeld/portfolio-strategy-simulation | 8c4771df24e3c45865c7df2a68e51ef018f7be1b | [
"MIT"
] | null | null | null | pfstratsim/triggers/trigger_interface.py | aarondorffeld/portfolio-strategy-simulation | 8c4771df24e3c45865c7df2a68e51ef018f7be1b | [
"MIT"
] | 42 | 2021-11-06T15:19:49.000Z | 2022-01-23T16:38:21.000Z | pfstratsim/triggers/trigger_interface.py | aarondorffeld/portfolio-strategy-simulation | 8c4771df24e3c45865c7df2a68e51ef018f7be1b | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
class TriggerInterface(metaclass=ABCMeta):
"""Trigger algorithm interface to be derived.
This is the strategy class on the strategy pattern for trigger algorithms.
"""
@abstractmethod
def __init__(self):
pass
@abstractmethod
def assess(self):
pass
| 21.9375 | 79 | 0.663818 |
73c21a16ffaa4c557b919f11ba0c830e8e523c89 | 528 | py | Python | microcosm_resourcesync/formatters/json_formatter.py | globality-corp/microcosm-resourcesync | a7c8f9e2f60e27ac7368e00e5c646d74006afcfb | [
"Apache-2.0"
] | null | null | null | microcosm_resourcesync/formatters/json_formatter.py | globality-corp/microcosm-resourcesync | a7c8f9e2f60e27ac7368e00e5c646d74006afcfb | [
"Apache-2.0"
] | 8 | 2017-02-02T20:01:53.000Z | 2019-11-08T11:42:34.000Z | microcosm_resourcesync/formatters/json_formatter.py | globality-corp/microcosm-resourcesync | a7c8f9e2f60e27ac7368e00e5c646d74006afcfb | [
"Apache-2.0"
] | 1 | 2019-03-17T03:46:33.000Z | 2019-03-17T03:46:33.000Z | """
JSON Formatter
"""
from json import dumps, loads
from microcosm_resourcesync.formatters.base import Formatter
class JSONFormatter(Formatter):
def load(self, data):
return loads(data)
def dump(self, dct):
# ensure deterministic output order for easier diffs
return dumps(dct, sort_keys=True) + "\n"
@property
def extension(self):
return ".json"
@property
def mime_types(self):
return [
"application/json",
"text/json",
]
| 18.206897 | 60 | 0.611742 |
73c22ca8f32fd947c6aacc4d4ad6fe7cf6954597 | 1,673 | py | Python | datasets/CLS/constructor/base_interface.py | zhangzhengde0225/SwinTrack | 526be17f8ef266cb924c6939bd8dda23e9b73249 | [
"MIT"
] | 143 | 2021-12-03T02:33:36.000Z | 2022-03-29T00:01:48.000Z | datasets/CLS/constructor/base_interface.py | zhangzhengde0225/SwinTrack | 526be17f8ef266cb924c6939bd8dda23e9b73249 | [
"MIT"
] | 33 | 2021-12-03T10:32:05.000Z | 2022-03-31T02:13:55.000Z | datasets/CLS/constructor/base_interface.py | zhangzhengde0225/SwinTrack | 526be17f8ef266cb924c6939bd8dda23e9b73249 | [
"MIT"
] | 24 | 2021-12-04T06:46:42.000Z | 2022-03-30T07:57:47.000Z | from datasets.base.common.constructor import BaseDatasetConstructorGenerator, BaseImageDatasetConstructor, \
BaseDatasetImageConstructorGenerator, BaseDatasetImageConstructor
class ImageClassificationImageConstructorGenerator(BaseDatasetImageConstructorGenerator):
def __init__(self, image: dict, root_path: str, category_id_name_map: dict, context):
super(ImageClassificationImageConstructorGenerator, self).__init__(context)
self.image = image
self.root_path = root_path
self.category_id_name_map = category_id_name_map
def __enter__(self):
return BaseDatasetImageConstructor(self.image, self.root_path, self.context, self.category_id_name_map)
class ImageClassificationDatasetConstructor(BaseImageDatasetConstructor):
def __init__(self, dataset: dict, root_path: str, version: int, context):
super(ImageClassificationDatasetConstructor, self).__init__(dataset, root_path, version, context)
def new_image(self):
image = {}
self.dataset['images'].append(image)
assert 'category_id_name_map' in self.dataset
category_id_name_map = self.dataset['category_id_name_map']
return ImageClassificationImageConstructorGenerator(image, self.root_path, category_id_name_map, self.context)
class ImageClassificationDatasetConstructorGenerator(BaseDatasetConstructorGenerator):
def __init__(self, dataset: dict, root_path: str, version: int):
super(ImageClassificationDatasetConstructorGenerator, self).__init__(dataset, root_path, version,
ImageClassificationDatasetConstructor)
| 52.28125 | 118 | 0.756127 |
73c2416b16b81c823a23c835bb234380f572dc50 | 9,301 | py | Python | python/ray/rllib/optimizers/multi_gpu_optimizer.py | cumttang/ray | eb1e5fa2cf26233701ccbda3eb8a301ecd418d8c | [
"Apache-2.0"
] | 2 | 2019-10-08T13:31:08.000Z | 2019-10-22T18:34:52.000Z | python/ray/rllib/optimizers/multi_gpu_optimizer.py | cumttang/ray | eb1e5fa2cf26233701ccbda3eb8a301ecd418d8c | [
"Apache-2.0"
] | 1 | 2018-12-26T01:09:50.000Z | 2018-12-26T01:09:50.000Z | python/ray/rllib/optimizers/multi_gpu_optimizer.py | cumttang/ray | eb1e5fa2cf26233701ccbda3eb8a301ecd418d8c | [
"Apache-2.0"
] | 6 | 2019-03-12T05:37:35.000Z | 2020-03-09T12:25:17.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import numpy as np
from collections import defaultdict
import tensorflow as tf
import ray
from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.optimizers.multi_gpu_impl import LocalSyncParallelOptimizer
from ray.rllib.optimizers.rollout import collect_samples, \
collect_samples_straggler_mitigation
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
from ray.rllib.evaluation.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
logger = logging.getLogger(__name__)
class LocalMultiGPUOptimizer(PolicyOptimizer):
"""A synchronous optimizer that uses multiple local GPUs.
Samples are pulled synchronously from multiple remote evaluators,
concatenated, and then split across the memory of multiple local GPUs.
A number of SGD passes are then taken over the in-memory data. For more
details, see `multi_gpu_impl.LocalSyncParallelOptimizer`.
This optimizer is Tensorflow-specific and require the underlying
PolicyGraph to be a TFPolicyGraph instance that support `.copy()`.
Note that all replicas of the TFPolicyGraph will merge their
extra_compute_grad and apply_grad feed_dicts and fetches. This
may result in unexpected behavior.
"""
@override(PolicyOptimizer)
def _init(self,
sgd_batch_size=128,
num_sgd_iter=10,
sample_batch_size=200,
num_envs_per_worker=1,
train_batch_size=1024,
num_gpus=0,
standardize_fields=[],
straggler_mitigation=False):
self.batch_size = sgd_batch_size
self.num_sgd_iter = num_sgd_iter
self.num_envs_per_worker = num_envs_per_worker
self.sample_batch_size = sample_batch_size
self.train_batch_size = train_batch_size
self.straggler_mitigation = straggler_mitigation
if not num_gpus:
self.devices = ["/cpu:0"]
else:
self.devices = [
"/gpu:{}".format(i) for i in range(int(math.ceil(num_gpus)))
]
self.batch_size = int(sgd_batch_size / len(self.devices)) * len(
self.devices)
assert self.batch_size % len(self.devices) == 0
assert self.batch_size >= len(self.devices), "batch size too small"
self.per_device_batch_size = int(self.batch_size / len(self.devices))
self.sample_timer = TimerStat()
self.load_timer = TimerStat()
self.grad_timer = TimerStat()
self.update_weights_timer = TimerStat()
self.standardize_fields = standardize_fields
logger.info("LocalMultiGPUOptimizer devices {}".format(self.devices))
self.policies = dict(
self.local_evaluator.foreach_trainable_policy(lambda p, i: (i, p)))
logger.debug("Policies to train: {}".format(self.policies))
for policy_id, policy in self.policies.items():
if not isinstance(policy, TFPolicyGraph):
raise ValueError(
"Only TF policies are supported with multi-GPU. Try using "
"the simple optimizer instead.")
# per-GPU graph copies created below must share vars with the policy
# reuse is set to AUTO_REUSE because Adam nodes are created after
# all of the device copies are created.
self.optimizers = {}
with self.local_evaluator.tf_sess.graph.as_default():
with self.local_evaluator.tf_sess.as_default():
for policy_id, policy in self.policies.items():
with tf.variable_scope(policy_id, reuse=tf.AUTO_REUSE):
if policy._state_inputs:
rnn_inputs = policy._state_inputs + [
policy._seq_lens
]
else:
rnn_inputs = []
self.optimizers[policy_id] = (
LocalSyncParallelOptimizer(
policy._optimizer, self.devices,
[v
for _, v in policy._loss_inputs], rnn_inputs,
self.per_device_batch_size, policy.copy))
self.sess = self.local_evaluator.tf_sess
self.sess.run(tf.global_variables_initializer())
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.remote_evaluators:
weights = ray.put(self.local_evaluator.get_weights())
for e in self.remote_evaluators:
e.set_weights.remote(weights)
with self.sample_timer:
if self.remote_evaluators:
if self.straggler_mitigation:
samples = collect_samples_straggler_mitigation(
self.remote_evaluators, self.train_batch_size)
else:
samples = collect_samples(
self.remote_evaluators, self.sample_batch_size,
self.num_envs_per_worker, self.train_batch_size)
if samples.count > self.train_batch_size * 2:
logger.info(
"Collected more training samples than expected "
"(actual={}, train_batch_size={}). ".format(
samples.count, self.train_batch_size) +
"This may be because you have many workers or "
"long episodes in 'complete_episodes' batch mode.")
else:
samples = self.local_evaluator.sample()
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
for policy_id, policy in self.policies.items():
if policy_id not in samples.policy_batches:
continue
batch = samples.policy_batches[policy_id]
for field in self.standardize_fields:
value = batch[field]
standardized = (value - value.mean()) / max(1e-4, value.std())
batch[field] = standardized
# Important: don't shuffle RNN sequence elements
if not policy._state_inputs:
batch.shuffle()
num_loaded_tuples = {}
with self.load_timer:
for policy_id, batch in samples.policy_batches.items():
if policy_id not in self.policies:
continue
policy = self.policies[policy_id]
tuples = policy._get_loss_inputs_dict(batch)
data_keys = [ph for _, ph in policy._loss_inputs]
if policy._state_inputs:
state_keys = policy._state_inputs + [policy._seq_lens]
else:
state_keys = []
num_loaded_tuples[policy_id] = (
self.optimizers[policy_id].load_data(
self.sess, [tuples[k] for k in data_keys],
[tuples[k] for k in state_keys]))
fetches = {}
with self.grad_timer:
for policy_id, tuples_per_device in num_loaded_tuples.items():
optimizer = self.optimizers[policy_id]
num_batches = (
int(tuples_per_device) // int(self.per_device_batch_size))
logger.debug("== sgd epochs for {} ==".format(policy_id))
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
batch_fetches = optimizer.optimize(
self.sess, permutation[batch_index] *
self.per_device_batch_size)
for k, v in batch_fetches.items():
iter_extra_fetches[k].append(v)
logger.debug("{} {}".format(i,
_averaged(iter_extra_fetches)))
fetches[policy_id] = _averaged(iter_extra_fetches)
self.num_steps_sampled += samples.count
self.num_steps_trained += samples.count
return fetches
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"load_time_ms": round(1000 * self.load_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
})
def _averaged(kv):
out = {}
for k, v in kv.items():
if v[0] is not None:
out[k] = np.mean(v)
return out
| 43.260465 | 79 | 0.586819 |
73c25655490a686948dde74c36e80ed36b635f0a | 1,482 | py | Python | floodsystem/flood.py | ryrolio/IA_Lent_Project | 9023dfb199b5db7676fef61f0fca46ab69707461 | [
"MIT"
] | null | null | null | floodsystem/flood.py | ryrolio/IA_Lent_Project | 9023dfb199b5db7676fef61f0fca46ab69707461 | [
"MIT"
] | null | null | null | floodsystem/flood.py | ryrolio/IA_Lent_Project | 9023dfb199b5db7676fef61f0fca46ab69707461 | [
"MIT"
] | 1 | 2022-01-28T11:46:05.000Z | 2022-01-28T11:46:05.000Z | """This module contains a collection of functions related to flood data."""
from .stationdata import *
from .station import MonitoringStation
from .utils import sorted_by_key
### TASK 2B
def stations_level_over_threshold(stations, tol):
"""Returns a list of stations that have a relative water level over the threshold"""
# Initialise a List
list_of_stations = []
# Run through the list of stations and extract stations that satisfy the requirement
for station in stations:
# Obtain the relative level for the station
relative_level = station.relative_water_level()
# Handle consistent and inconsistent water levels
if relative_level != None:
if relative_level > tol:
list_of_stations.append((station, relative_level))
list_of_stations = sorted_by_key(list_of_stations,1,reverse=True)
return list_of_stations
### TASK 2C
def stations_highest_rel_level(stations, N):
"""Returns a list of the N stations with the highest relative levels"""
# Produce a list that ranks all the stations by relative level
list_of_stations = []
for station in stations:
relative_level = station.relative_water_level()
if relative_level != None:
list_of_stations.append((station, relative_level))
list_of_stations = sorted_by_key(list_of_stations,1,reverse=True)
# Extract the first N entries
return list_of_stations[0:N] | 33.681818 | 88 | 0.706478 |
73c258701545989dee208eefad8aa47b62e69c5f | 3,951 | py | Python | website/project/__init__.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | website/project/__init__.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 4 | 2016-05-13T14:24:16.000Z | 2017-03-30T15:28:31.000Z | website/project/__init__.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import uuid
from django.apps import apps
from .model import PrivateLink
from framework.mongo.utils import from_mongo
from modularodm import Q
from modularodm.exceptions import ValidationValueError
from website.exceptions import NodeStateError
from website.util.sanitize import strip_html
def show_diff(seqm):
"""Unify operations between two compared strings
seqm is a difflib.SequenceMatcher instance whose a & b are strings"""
output = []
insert_el = '<span style="background:#4AA02C; font-size:1.5em; ">'
ins_el_close = '</span>'
del_el = '<span style="background:#D16587; font-size:1.5em;">'
del_el_close = '</span>'
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
content_a = strip_html(seqm.a[a0:a1])
content_b = strip_html(seqm.b[b0:b1])
if opcode == 'equal':
output.append(content_a)
elif opcode == 'insert':
output.append(insert_el + content_b + ins_el_close)
elif opcode == 'delete':
output.append(del_el + content_a + del_el_close)
elif opcode == 'replace':
output.append(del_el + content_a + del_el_close + insert_el + content_b + ins_el_close)
else:
raise RuntimeError('unexpected opcode')
return ''.join(output)
# TODO: This should be a class method of Node
def new_node(category, title, user, description='', parent=None):
"""Create a new project or component.
:param str category: Node category
:param str title: Node title
:param User user: User object
:param str description: Node description
:param Node project: Optional parent object
:return Node: Created node
"""
# We use apps.get_model rather than import .model.Node
# because we want the concrete Node class, not AbstractNode
Node = apps.get_model('osf.Node')
category = category
title = strip_html(title.strip())
if description:
description = strip_html(description.strip())
node = Node(
title=title,
category=category,
creator=user,
description=description,
parent=parent
)
node.save()
return node
def new_bookmark_collection(user):
"""Create a new bookmark collection project.
:param User user: User object
:return Node: Created node
"""
Collection = apps.get_model('osf.Collection')
existing_bookmark_collection = Collection.find(
Q('is_bookmark_collection', 'eq', True) &
Q('creator', 'eq', user) &
Q('is_deleted', 'eq', False)
)
if existing_bookmark_collection.count() > 0:
raise NodeStateError('Users may only have one bookmark collection')
collection = Collection(
title='Bookmarks',
creator=user,
is_bookmark_collection=True
)
collection.save()
return collection
def new_private_link(name, user, nodes, anonymous):
"""Create a new private link.
:param str name: private link name
:param User user: User object
:param list Node node: a list of node object
:param bool anonymous: make link anonymous or not
:return PrivateLink: Created private link
"""
key = str(uuid.uuid4()).replace('-', '')
if name:
name = strip_html(name)
if name is None or not name.strip():
raise ValidationValueError('Invalid link name.')
else:
name = 'Shared project link'
private_link = PrivateLink(
key=key,
name=name,
creator=user,
anonymous=anonymous
)
private_link.save()
private_link.nodes.add(*nodes)
private_link.save()
return private_link
template_name_replacements = {
('.txt', ''),
('_', ' '),
}
def clean_template_name(template_name):
template_name = from_mongo(template_name)
for replacement in template_name_replacements:
template_name = template_name.replace(*replacement)
return template_name
| 28.221429 | 99 | 0.657302 |
73c2621289bb6fdfecf7d5b7e7dd8268cb427967 | 117,047 | py | Python | tests/conftest.py | nbalacha/ocs-ci | 9c5a5474d62777e868b80894d6b0f3567a7b605d | [
"MIT"
] | null | null | null | tests/conftest.py | nbalacha/ocs-ci | 9c5a5474d62777e868b80894d6b0f3567a7b605d | [
"MIT"
] | null | null | null | tests/conftest.py | nbalacha/ocs-ci | 9c5a5474d62777e868b80894d6b0f3567a7b605d | [
"MIT"
] | null | null | null | import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import aws
from ocs_ci.utility import deployment_openshift_logging as ocp_logging_obj
from ocs_ci.utility import templating
from ocs_ci.utility import users
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import create_unique_resource_name
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.couchbase import CouchBase
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version or skipif_upgraded_from
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
if not (teardown or deploy):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
"""
Delete the project
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}Gi" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
reclaim_policy = (
instance.reclaim_policy if instance.kind == constants.PVC else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if not (teardown or skip_ocs_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
return RGW()
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
oc = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=config.ENV_DATA["cluster_namespace"]
)
rgw_deployments = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_deployments:
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory
"""
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory
"""
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for i in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
*args,
**kwargs,
)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health()
return created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
"""
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
"""
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def bucket_class_factory(request, mcg_obj, backingstore_factory):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
"""
return bucketclass_factory_implementation(request, mcg_obj, backingstore_factory)
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request, mcg_obj_session, backingstore_factory_session
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
"""
return bucketclass_factory_implementation(
request, mcg_obj_session, backingstore_factory_session
)
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
# Creates namespace opensift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = ocp_version
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = ocp_version
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_factory_fixture(request):
"""
Couchbase factory fixture
"""
couchbase = CouchBase()
def factory(replicas=3, run_in_bg=False, skip_analyze=True, sc_name=None):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Setup couchbase
couchbase.setup_cb()
# Create couchbase workers
couchbase.create_couchbase_worker(replicas=replicas, sc_name=sc_name)
# Run couchbase workload
couchbase.run_workload(replicas=replicas, run_in_bg=run_in_bg)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
if not (deploy or teardown or skip_ocs):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up again
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by restarting the nodes
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if float(config.ENV_DATA["ocs_version"]) < 4.6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if hasattr(request.node.config, "py_test_service"):
rp_service = request.node.config.py_test_service
if not hasattr(rp_service.RP, "rp_client"):
request.config._metadata[
"RP Launch URL:"
] = "Problem with RP, launch URL is not available!"
return
launch_id = rp_service.RP.rp_client.launch_id
project = rp_service.RP.rp_client.project
endpoint = rp_service.RP.rp_client.endpoint
launch_url = f"{endpoint}/ui/#{project}/launches/all/{launch_id}/{launch_id}"
config.REPORTING["rp_launch_url"] = launch_url
config.REPORTING["rp_launch_id"] = launch_id
config.REPORTING["rp_endpoint"] = endpoint
config.REPORTING["rp_project"] = project
request.config._metadata["RP Launch URL:"] = launch_url
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
| 33.008178 | 166 | 0.636573 |
73c2627c84bf4f1a6719a082d7e8442176e7729d | 14,077 | py | Python | pydmrs/matching/aligned_matching.py | vishalbelsare/pydmrs | 795b35dba4986fa9084eaa81fb16206cb131a752 | [
"MIT"
] | 14 | 2015-11-27T14:22:26.000Z | 2022-03-02T21:11:28.000Z | pydmrs/matching/aligned_matching.py | vishalbelsare/pydmrs | 795b35dba4986fa9084eaa81fb16206cb131a752 | [
"MIT"
] | 25 | 2015-11-27T16:08:19.000Z | 2019-08-28T10:13:06.000Z | pydmrs/matching/aligned_matching.py | vishalbelsare/pydmrs | 795b35dba4986fa9084eaa81fb16206cb131a752 | [
"MIT"
] | 8 | 2015-11-01T17:57:44.000Z | 2021-06-04T05:33:38.000Z | from pydmrs.core import SortDictDmrs, span_pred_key, abstractSortDictDmrs
from pydmrs.matching.common import are_equal_links
# ------------------------------------------------------------------------
def match_nodes(nodes1, nodes2, excluded=[]):
"""
:param nodes1: A list of Nodes from the DMRS to be matched, sorted by span_pred_key.
:param nodes2: A list of Nodes from the DMRS against which we match, sorted by span_pred_key.
:param excluded: A list of nodeids which should not be used for matching.
:return: A list of lists of nodeid pairs. The first element in the pair is from small DMRS, the second from the
larger one. The pairs are listed in reverse span_pred_key order of the corresponding nodes. Returns [] if no
match found.
"""
if not nodes1 or not nodes2:
return []
matches = []
earliest = len(nodes1)
longest = 0
for i, node2 in enumerate(nodes2):
if len(nodes2) - i < longest: # Not enough nodes left to beat the current longest match.
break
if excluded and node2.nodeid in excluded:
continue
for j, node1 in enumerate(nodes1):
if j > earliest: # To avoid repetition.
break
if node1 == node2:
best_matches = match_nodes(nodes1[j + 1:], nodes2[i + 1:], excluded=excluded)
if best_matches:
for match in best_matches:
match.append((node1.nodeid, node2.nodeid))
else:
best_matches = [[(node1.nodeid, node2.nodeid)]]
earliest = j
longest = max(longest, len(best_matches[0]))
matches.extend(best_matches)
if matches:
max_len = len(max(matches, key=len))
return [m for m in matches if len(m) == max_len]
else:
return []
def add_quantifier_matches(dmrs1, dmrs2, longest_matches):
for m in longest_matches:
q_pairs = []
for nodeid1, nodeid2 in m:
try:
q_link1 = dmrs1.get_in(nodeid1, rargname='RSTR', post='H').pop()
q_link2 = dmrs2.get_in(nodeid2, rargname='RSTR', post='H').pop()
except KeyError:
continue
if dmrs1[q_link1.start] == dmrs2[q_link2.start]:
q_pairs.append((q_link1.start, q_link2.start))
m.extend(q_pairs)
def get_compounds(dmrs, compound_preds):
compounds = []
for node in dmrs.iter_nodes():
if str(node.pred) in compound_preds:
arg1 = dmrs.get_out_nodes(node.nodeid, rargname='ARG1').pop().nodeid
arg2 = dmrs.get_out_nodes(node.nodeid, rargname='ARG2').pop().nodeid
compounds.append({"node": node, "args": (arg1, arg2)})
return compounds
def add_compound_matches(small_dmrs, large_dmrs, longest_matches, compound_preds):
small_compounds = get_compounds(small_dmrs, compound_preds)
large_compounds = get_compounds(large_dmrs, compound_preds)
for m in longest_matches:
cmpd_pairs = []
for small_cmpd in small_compounds:
query_arg1 = None
query_arg2 = None
for small, large in m:
if small == small_cmpd['args'][0]:
query_arg1 = large
elif small == small_cmpd['args'][1]:
query_arg2 = large
if query_arg1 and query_arg2:
break
else:
continue
for large_cmpd in large_compounds:
if (query_arg1, query_arg2) == large_cmpd['args']:
if small_cmpd['node'] == large_cmpd['node']:
cmpd_pairs.append((small_cmpd['node'].nodeid, large_cmpd['node'].nodeid))
m.extend(cmpd_pairs)
def find_extra_surface_nodeids(nodeids, large_dmrs):
""" Finds nodeids present in the aligned matched region of the large DMRS,
but which have no equivalents in the small DMRS.
:param nodeids Nodeids from the large DMRS which have equivalents in the small one, sorted by span_pred_key of
their nodes.
:param large_dmrs The large DMRS.
:return A list of additional nodeids sharing the span with nodeids but without equivalents in the small DMRS.
"""
max_cto = large_dmrs[nodeids[-1]].cto
extra_nodeids = []
reached_start = False
reached_end = False
for i, node in enumerate(large_dmrs.nodes):
if node.nodeid == nodeids[0]:
first_overlap_orderid = i
min_cfrom = node.cfrom
max_cto = max(max_cto, node.cto)
while True and first_overlap_orderid > 0:
prev_node = large_dmrs.nodes[first_overlap_orderid - 1]
prev_cfrom = prev_node.cfrom
if prev_cfrom == min_cfrom and prev_node.cto <= max_cto:
first_overlap_orderid -= 1
extra_nodeids.append(prev_node.nodeid)
max_cto = max(max_cto, prev_node.cto)
else:
break
reached_start = True
elif not reached_start:
continue
elif reached_end and node.cfrom >= max_cto:
break
else:
max_cto = max(max_cto, node.cto)
if node.nodeid not in nodeids and node.nodeid not in extra_nodeids:
extra_nodeids.append(node.nodeid)
if node.nodeid == nodeids[-1]:
reached_end = True
return extra_nodeids
def get_links(dmrs, nodeids):
"""
:param dmrs: A Dmrs object.
:param nodeids: A list of nodeids.
:return: A list of all links starting and ending on a node from nodeids.
"""
links = []
eq_links = set()
for nodeid in nodeids:
node_links = dmrs.get_out(nodeid)
for link in node_links:
if link.end in nodeids:
links.append(link)
node_links = dmrs.get_eq(nodeid)
for link in node_links:
if link not in eq_links:
eq_links.add(link)
links.extend(eq_links)
return links
def get_subgraph(dmrs, subgraph_nodeids):
""" Returns a subgraph of dmrs containing only nodes with subgraph_nodeids and all the links between them.
:param dmrs: A Dmrs object.
:param subgraph_nodeids: A list of nodeids.
:return A SortDictDmrs containing only nodes with subgraph_nodeids and links between them.
"""
nodes = [dmrs[nodeid] for nodeid in subgraph_nodeids]
return SortDictDmrs(nodes, links=get_links(dmrs, subgraph_nodeids), node_key=span_pred_key)
# -------------------------------------------------------------------------------
def get_link_diff(small_dmrs, matched_subgraph, matching_nodeids):
"""
:param small_dmrs A Dmrs which we're matching.
:param matched_subgraph A Dmrs. A subgraph of the larger DMRS returned as a match for small_dmrs.
:param matching_nodeids A list of pairs of nodeids. The first nodeid in each pair comes from small_dmrs, the second
comes from the large dmrs.
:return three list of links:
1) links present only in the small dmrs
2) links present only in the matched subgraph
3) common links.
"""
both = []
small_only = []
subgraph_only = []
checked_eq_links = set()
for small_nodeid, subgraph_nodeid in matching_nodeids:
if small_nodeid:
small_links = small_dmrs.get_out(small_nodeid) | small_dmrs.get_eq(small_nodeid)
subgraph_links = list(matched_subgraph.get_out(subgraph_nodeid))
links_flag = [False] * len(subgraph_links)
for link1 in small_links:
# Check if the EQ has been counted already.
if not link1.rargname:
if link1 in checked_eq_links:
continue
checked_eq_links.add(link1)
match_found = False
for link2 in subgraph_links:
if are_equal_links(link1, link2, small_dmrs, matched_subgraph):
both.append(link1)
match_found = True
links_flag[subgraph_links.index(link2)] = True
break
if not match_found:
small_only.append(link1)
for i in range(0, len(subgraph_links)):
if not links_flag[i]:
subgraph_only.append(subgraph_links[i])
else:
subgraph_only.extend(matched_subgraph.get_out(subgraph_nodeid))
checked_eq_links = set()
for nodeid in small_dmrs:
if nodeid not in list(zip(*matching_nodeids))[0]:
small_only.extend(small_dmrs.get_out(nodeid))
eq_links = small_dmrs.get_eq(nodeid)
small_only.extend({link for link in eq_links if link not in checked_eq_links})
checked_eq_links.update(eq_links)
return small_only, subgraph_only, both
# ------------------------------------------------------------------------------
## IMPORTANT ##
def get_matching_nodeids(small_dmrs, large_dmrs, all_surface=False, large_excluded=None):
""" Finds matching pairs of nodeids between small_dmrs and large_dmrs. Starts by matching all
nodes but quantifiers, then matches quantifiers for nouns with matches.
:param small_dmrs A DMRS object used as a match query,
:param large_dmrs A DMRS object to be searched for a match.
:param all_surface If true, include all nodes from the aligned surface region.
If false, find only the nodes with equivalents in small_dmrs.
:param large_excluded The nodeids from the large DMRS to be ignored during matching.
:return A list of lists of matched nodeid pairs (small_dmrs nodeid, large_dmrs nodeid).
A list of lists, in case more than one best match found.
"""
# Convert DMRSs to SortDictDmrs with span_pred_key node if needed.
if not isinstance(small_dmrs, SortDictDmrs) or (small_dmrs.node_key != span_pred_key):
small_dmrs = small_dmrs.convert_to(abstractSortDictDmrs(node_key=span_pred_key))
if not isinstance(large_dmrs, SortDictDmrs) or (large_dmrs.node_key != span_pred_key):
large_dmrs = large_dmrs.convert_to(abstractSortDictDmrs(node_key=span_pred_key))
# Filter quantifiers.
small_no_qs = [n for n in small_dmrs.nodes if not small_dmrs.is_quantifier(n.nodeid)]
large_no_qs = [n for n in large_dmrs.nodes if not large_dmrs.is_quantifier(n.nodeid)]
# Filter compound_name and compund predicates.
filtered_pred = ['compound', 'compound_name']
filtered_small = [n for n in small_no_qs if str(n.pred) not in filtered_pred]
filtered_large = [n for n in large_no_qs if str(n.pred) not in filtered_pred]
longest_matches = match_nodes(filtered_small, filtered_large,
excluded=large_excluded) # list of lists of nodeid pairs
add_quantifier_matches(small_dmrs, large_dmrs, longest_matches)
add_compound_matches(small_dmrs, large_dmrs, longest_matches, filtered_pred)
max_len = len(max(longest_matches, key=len)) if longest_matches else 0
longest_matches = [m for m in longest_matches if len(m) == max_len]
# Returned in reverse span_pred_key order.
all_matched_nodeids = []
for match in longest_matches:
matched_large_nodeids = list(reversed((list(zip(*match))[1]))) # span_pred_key order
if all_surface:
extra_overlap_nodeids = find_extra_surface_nodeids(matched_large_nodeids,
large_dmrs)
match.extend([(None, nodeid) for nodeid in extra_overlap_nodeids])
all_matched_nodeids.append(match)
return all_matched_nodeids
def get_matched_subgraph(matching_nodeids, large_dmrs):
"""
:param matching_nodeids: A list of pairs of matches nodeids from the small and large dmrs.
:param large_dmrs: A Dmrs.
:return: A Dmrs. A subgraph of large_dmrs containing only nodes with nodeids in matching_nodeids.
"""
present_large_nodeids = list(zip(*matching_nodeids))[1]
return get_subgraph(large_dmrs, present_large_nodeids)
def get_best_subgraph(nodeid_matches, small_dmrs, large_dmrs):
best_fscore = 0
best_score = 0, 0, 0
best_graphs = []
for match in nodeid_matches:
subgraph = get_matched_subgraph(match, large_dmrs)
score = get_score(small_dmrs, subgraph, match)
fscore = get_fscore(*score)
if fscore > best_fscore:
best_graphs = [subgraph]
best_fscore = fscore
best_score = score
elif fscore == best_fscore:
best_graphs.append(subgraph)
return best_graphs, best_score
def get_score(small_dmrs, matched_subgraph, matching_nodeids):
num_extra_nodes = len([pair for pair in matching_nodeids if pair[0] is None])
num_matched_nodes = len(matching_nodeids) - num_extra_nodes
num_missing_nodes = len(
[nodeid for nodeid in small_dmrs if nodeid not in list(zip(*matching_nodeids))[0]])
only_small_links, only_subgraph_links, shared_links = get_link_diff(small_dmrs,
matched_subgraph,
matching_nodeids)
num_extra_links = len(only_subgraph_links)
num_missing_links = len(only_small_links)
num_shared_links = len(shared_links)
num_correct = num_matched_nodes + num_shared_links
num_matched = num_correct + num_extra_links + num_extra_nodes
num_expected = num_correct + num_missing_links + num_missing_nodes
return num_correct, num_matched, num_expected
def get_fscore(num_correct, num_matched, num_expected):
precision = num_correct / num_matched if num_matched > 0 else 0
recall = num_correct / num_expected if num_expected > 0 else 0
return 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0 # fscore
| 43.717391 | 119 | 0.630958 |
73c26adafe2912143ef8c7d1ab6aa802fce639c2 | 10,853 | py | Python | src/onevision/datasets/cubepp/cubepp.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/datasets/cubepp/cubepp.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | src/onevision/datasets/cubepp/cubepp.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Cube++ dataset and datamodule.
Images were obtained with sensors of same type on Canon 550D and Canon 600D
cameras. As a calibration tool, SpyderCube was used due to its ability to
identify multiple illumination sources from different angles.
References:
https://github.com/Visillect/CubePlusPlus
"""
from __future__ import annotations
import glob
import os
import random
from typing import Callable
from typing import Optional
import matplotlib.pyplot as plt
from torch.utils.data import random_split
from onevision.cv import show_images
from onevision.cv import VisionBackend
from onevision.data import DataModule
from onevision.data import ImageEnhancementDataset
from onevision.data import ImageInfo
from onevision.data import VisionData
from onevision.data import VisionDataHandler
from onevision.factory import DATAMODULES
from onevision.factory import DATASETS
from onevision.nn import Phase
from onevision.type import Augment_
from onevision.type import Int3T
from onevision.utils import console
from onevision.utils import datasets_dir
from onevision.utils import progress_bar
__all__ = [
"CubePP",
"CubePPDataModule"
]
# MARK: - CubePP
@DATASETS.register(name="cube++")
class CubePP(ImageEnhancementDataset):
"""Cube++ dataset."""
# MARK: Magic Functions
def __init__(
self,
root : str,
split : str = "train",
shape : Int3T = (720, 1280, 3),
caching_labels : bool = False,
caching_images : bool = False,
write_labels : bool = False,
fast_dev_run : bool = False,
load_augment : Optional[dict] = None,
augment : Optional[Augment_] = None,
vision_backend : Optional[VisionBackend] = None,
transforms : Optional[Callable] = None,
transform : Optional[Callable] = None,
target_transform: Optional[Callable] = None,
*args, **kwargs
):
super().__init__(
root = root,
split = split,
shape = shape,
caching_labels = caching_labels,
caching_images = caching_images,
write_labels = write_labels,
fast_dev_run = fast_dev_run,
load_augment = load_augment,
augment = augment,
transforms = transforms,
transform = transform,
target_transform = target_transform,
vision_backend = vision_backend,
*args, **kwargs
)
# MARK: List Files
def list_files(self):
"""List image and label files."""
# NOTE: List all files
self.list_cubepp_files()
# NOTE: fast_dev_run, select only a subset of images
if self.fast_dev_run:
indices = [random.randint(0, len(self.image_paths) - 1)
for _ in range(self.batch_size)]
self.image_paths = [self.image_paths[i] for i in indices]
self.eimage_paths = [self.eimage_paths[i] for i in indices]
# self.label_paths = [self.label_paths[i] for i in indices]
self.custom_label_paths = [self.custom_label_paths[i] for i in indices]
# NOTE: Assertion
if (
len(self.image_paths) <= 0
or len(self.image_paths) != len(self.eimage_paths)
):
raise ValueError(
f"Number of images != Number of enhanced images: "
f"{len(self.image_paths)} != {len(self.eimage_paths)}."
)
console.log(f"Number of images: {len(self.image_paths)}.")
def list_cubepp_files(self):
"""List all IEC22 image data."""
with progress_bar() as pbar:
image_pattern = os.path.join(
self.root, "cube++", "png", "*.png"
)
for image_path in pbar.track(
glob.glob(image_pattern),
description=f"[bright_yellow]Listing Cube++ {self.split} images"
):
eimage_path = image_path.replace("png", "jpg")
custom_label_path = image_path.replace("png", "annotations_custom")
custom_label_path = custom_label_path.replace(".png", ".json")
self.image_paths.append(image_path)
self.eimage_paths.append(eimage_path)
# self.label_paths.append(label_path)
self.custom_label_paths.append(custom_label_path)
# MARK: Load Data
def load_label(
self,
image_path : str,
enhance_path : str,
label_path : Optional[str] = None,
custom_label_path: Optional[str] = None
) -> VisionData:
"""Load all labels from a raw label `file`.
Args:
image_path (str):
Image file.
enhance_path (str):
Enhanced image file.
label_path (str, optional):
Label file. Default: `None`.
custom_label_path (str, optional):
Custom label file. Default: `None`.
Returns:
data (VisionData):
`VisionData` object.
"""
# NOTE: If we have custom labels
if custom_label_path and os.path.isfile(custom_label_path):
return VisionDataHandler().load_from_file(
image_path = image_path,
label_path = custom_label_path,
eimage_path = enhance_path
)
# NOTE: Parse info
image_info = ImageInfo.from_file(image_path=image_path)
eimage_info = ImageInfo.from_file(image_path=enhance_path)
return VisionData(image_info=image_info, eimage_info=eimage_info)
def load_class_labels(self):
"""Load ClassLabels."""
pass
# MARK: - CubePPDataModule
@DATAMODULES.register(name="cube++")
class CubePPDataModule(DataModule):
"""Cube++ DataModule."""
# MARK: Magic Functions
def __init__(
self,
dataset_dir: str = os.path.join(datasets_dir, "cube++"),
name : str = "cube++",
*args, **kwargs
):
super().__init__(dataset_dir=dataset_dir, name=name, *args, **kwargs)
self.dataset_kwargs = kwargs
# MARK: Prepare Data
def prepare_data(self, *args, **kwargs):
"""Use this method to do things that might write to disk or that need
to be done only from a single GPU in distributed settings.
- Download.
- Tokenize.
"""
if self.class_labels is None:
self.load_class_labels()
def setup(self, phase: Optional[Phase] = None):
"""There are also data operations you might want to perform on every GPU.
Todos:
- Count number of classes.
- Build class_labels vocabulary.
- Perform train/val/test splits.
- Apply transforms (defined explicitly in your datamodule or
assigned in init).
- Define collate_fn for you custom dataset.
Args:
phase (Phase, optional):
Phase to use: [None, Phase.TRAINING, Phase.TESTING]. Set to
"None" to setup all train, val, and test data. Default: `None`.
"""
console.log(f"Setup [red]Cube++[/red] datasets.")
# NOTE: Assign train/val datasets for use in dataloaders
full_dataset = CubePP(
root=self.dataset_dir, split="train", **self.dataset_kwargs
)
train_size = int(0.8 * len(full_dataset))
val_size = len(full_dataset) - train_size
self.train, self.val = random_split(
full_dataset, [train_size, val_size]
)
self.test = self.val
self.class_labels = getattr(full_dataset, "class_labels", None)
self.collate_fn = getattr(full_dataset, "collate_fn", None)
if self.class_labels is None:
self.load_class_labels()
self.summarize()
def load_class_labels(self):
"""Load ClassLabels."""
pass
# MARK: - Main
if __name__ == "__main__":
# NOTE: Get DataModule
cfgs = {
"name": "cube++",
# Dataset's name.
"shape": [512, 512, 3],
# Image shape as [H, W, C]. This is compatible with OpenCV format.
"batch_size": 4,
# Number of samples in one forward & backward pass.
"caching_labels": True,
# Should overwrite the existing cached labels? Default: `False`.
"caching_images": False,
# Cache images into memory for faster training. Default: `False`.
"write_labels": False,
# After loading images and labels for the first time, we will convert it
# to our custom data format and write to files. If `True`, we will
# overwrite these files. Default: `False`.
"fast_dev_run": False,
# Take a small subset of the data for fast debug (i.e, like unit testing).
# Default: `False`.
"shuffle": True,
# Set to `True` to have the data reshuffled at every training epoch.
# Default: `True`.
"load_augment": {
"mosaic": 0.0,
"mixup" : 0.5,
},
# Augmented loading policy.
# Augmented loading policy.
"augment": {
"name": "paired_images_auto_augment",
# Name of the augmentation policy.
"policy": "enhancement",
# Augmentation policy. One of: [`enhancement`]. Default: `enhancement`.
"fill": None,
# Pixel fill value for the area outside the transformed image.
# If given a number, the value is used for all bands respectively.
"to_tensor": True,
# Convert a PIL Image or numpy.ndarray [H, W, C] in the range [0, 255]
# to a torch.FloatTensor of shape [C, H, W] in the range [0.0, 1.0].
# Default: `True`.
},
# Augmentation policy.
"vision_backend": VisionBackend.PIL,
# Vision backend option.
}
dm = CubePPDataModule(**cfgs)
dm.setup()
# NOTE: Visualize labels
if dm.class_labels:
dm.class_labels.print()
# NOTE: Visualize one sample
data_iter = iter(dm.train_dataloader)
input, target, shape = next(data_iter)
show_images(images=input, nrow=2, denormalize=True)
show_images(images=target, nrow=2, denormalize=True, figure_num=1)
plt.show(block=True)
| 35.46732 | 85 | 0.57643 |
73c26c2199f3b91fcec8b7f6c2864a7ec92d2a89 | 2,937 | py | Python | hybrid/train_model.py | aivatoglou/ABSA-GR | a0701de55200012f24a3060f63316b98515f83c9 | [
"MIT"
] | null | null | null | hybrid/train_model.py | aivatoglou/ABSA-GR | a0701de55200012f24a3060f63316b98515f83c9 | [
"MIT"
] | null | null | null | hybrid/train_model.py | aivatoglou/ABSA-GR | a0701de55200012f24a3060f63316b98515f83c9 | [
"MIT"
] | 1 | 2022-03-15T14:20:10.000Z | 2022-03-15T14:20:10.000Z | import time
import numpy as np
import torch
from sklearn.metrics import classification_report, f1_score
from torch.cuda.amp import autocast
def epoch_time(start_time, end_time):
"""Calculates the elapsed time between the epochs."""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def class_report(preds, y):
"""Prints the classification report for the test set."""
flat_preds = [x for sublist in preds for x in sublist]
flat_truth = [x for sublist in y for x in sublist]
print(classification_report(flat_truth, flat_preds))
return
# Define binary_accuracy function
def binary_accuracy(preds, y):
"""Calculates the macro F1-score."""
preds_flat = np.argmax(preds, axis=1).flatten()
labels_flat = y.flatten()
return f1_score(labels_flat, preds_flat, average="macro")
# Define train function
def train(model, iterator, optimizer, criterion, scaler, device):
"""The architecture's training routine."""
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
batch = tuple(b.to(device) for b in batch)
optimizer.zero_grad()
with autocast():
predictions = model(batch[0], batch[1], batch[2]).squeeze(1)
loss = criterion(predictions, batch[3].to(torch.int64))
acc = binary_accuracy(
predictions.detach().cpu().numpy(), batch[3].cpu().numpy()
)
# Clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
scaler.scale(loss).backward()
scaler.step(optimizer)
epoch_loss += loss.item()
epoch_acc += acc.item()
scaler.update()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion, device, print_report):
"""The architecture's evaluation routine."""
preds = []
truth = []
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
batch = tuple(b.to(device) for b in batch)
with autocast():
predictions = model(batch[0], batch[1], batch[2]).squeeze(1)
loss = criterion(predictions, batch[3].to(torch.int64))
acc = binary_accuracy(
predictions.detach().cpu().numpy(), batch[3].cpu().numpy()
)
preds_flat = np.argmax(
predictions.detach().cpu().numpy(), axis=1
).flatten()
preds.append(preds_flat)
truth.append(batch[3].cpu().numpy())
epoch_loss += loss.item()
epoch_acc += acc.item()
if print_report:
print(class_report(preds, truth))
return epoch_loss / len(iterator), epoch_acc / len(iterator)
| 24.07377 | 78 | 0.612189 |
73c27a6643a8550dd6c2294e45c26a15098174b9 | 4,024 | py | Python | Code to apply on BS output/Python/compute_t_on_gini.py | albertocottica/community-management-simulator | e942f854f41705fcb114a79308536a2765896e60 | [
"MIT"
] | null | null | null | Code to apply on BS output/Python/compute_t_on_gini.py | albertocottica/community-management-simulator | e942f854f41705fcb114a79308536a2765896e60 | [
"MIT"
] | null | null | null | Code to apply on BS output/Python/compute_t_on_gini.py | albertocottica/community-management-simulator | e942f854f41705fcb114a79308536a2765896e60 | [
"MIT"
] | null | null | null | import csv
dirPath = '/Users/albertocottica/github/local/community-management-simulator-2/Data/'
def readFile(filename):
'''
(str) => list of dicts
loads file filename into a list. Each item is a dict encoding one run in the model.
'''
with open (filename, 'r') as csvFile:
csvReader = csv.DictReader (csvFile, delimiter = ',', quotechar = '"')
runs = []
for row in csvReader:
runs.append(row)
return runs
def compute_t_ginis_new():
'''
(none) => list of dicts
computes the t statistic relative to the null hypothesis that gini2 == gini1
gini1 and gini2 are average cross-run ginis.
we have 18 cases: 3 valus for chattiness, 3 for intimacystrength, 2 for policy
we also have 1 priority test for each case: "more active" vs. "newer"
'''
results = []
chattiness_values = [".1", ".2", ".4"]
intimacy_values = ["1", "5", "11"]
policy_types = ["engage", "none"]
random_chat_states = ["true", "false"]
# lots of overwriting here
for c in chattiness_values:
for i in intimacy_values:
for pt in policy_types:
for rc in random_chat_states:
for ob in obs: # each observation (line) in the file is a run of the model
thisCase = {} # provisionally stores the t-statistics on this observation
if ob["globalchattiness"] == c and ob["intimacystrength"] == i and ob["policy"] == pt and ob["randomisedchattiness"] == rc:
thisCase["globalchattiness"] = c
thisCase["intimacystrength"] = i
thisCase["policy"] = pt
thisCase["randomisedchattiness"] = ob["randomisedchattiness"]
thisCase[ob["priority"] + "_ms_avg_gini"] = ob["ms_avg_gini"]
# print ('globalchattiness: ' + str(c) + '; intimacy: ' + str(i) + '; policy: ' + pt + ob["priority"] + "_ms_avg_gini")
thisCase[ob["priority"] + "_nc_avg_gini"] = ob["nc_avg_gini"]
thisCase[ob["priority"] + "_ms_se_gini"] = ob["ms_xrun_se_gini"]
thisCase[ob["priority"] + "_nc_se_gini"] = ob["nc_xrun_se_gini"]
print(thisCase)
results.append(thisCase)
for case in results:
# now compute the t statistics
ms_x2 = float(case["newer_ms_avg_gini"])
ms_s2 = float(case["newer_ms_se_gini"])
ms_x1 = float(case["more active_ms_avg_gini"])
ms_s1 = float(case["more active_ms_se_gini"])
ms_t = (ms_x1 - ms_x2) / (((ms_s1 ** 2 + ms_s2 ** 2) /24) ** (0.5))
case["ms_t_more active_newer"] = ms_t
nc_x2 = float(case["newer_nc_avg_gini"])
nc_s2 = float(case["newer_nc_se_gini"])
nc_x1 = float(case["more active_nc_avg_gini"])
nc_s1 = float(case["more active_nc_se_gini"])
nc_t = (nc_x1 - nc_x2) / (((nc_s1 ** 2 + nc_s2 ** 2) /24) ** (0.5))
case["nc_t_more active_newer"] = nc_t
return results
if __name__ == "__main__":
obs = readFile(dirPath + 'data-w-gini-v4-all.csv')
results = compute_t_ginis_new()
for item in results:
print ("globalchattiness: " + str(item["globalchattiness"]) + "; intimacy strength: " + str(item["intimacystrength"]) + "; policy: " + str(item["policy"]) + "; randomised chattiness: " + str(item["randomisedchattiness"]))
print ("***** inequality in membership strength ********")
print ("H0: ms_avg_gini(newer) = ms_avg_gini_more active => " + str(item["ms_t_newer_more active"]))
print ("***** inequality in number of comments ********")
print ("H0: nc_avg_gini(newer) = nc_avg_gini_more active => " + str(item["nc_t_newer_more active"]))
print ("******************************************************************")
| 45.727273 | 229 | 0.555169 |
73c28d2389462067d556abe8599abd412ab8bdba | 1,239 | py | Python | bin/fix_bed_for_bigbed_conversion.py | mr-c/eclip | 833a389b773e12492d316e61db802dd353404f4f | [
"BSD-3-Clause"
] | null | null | null | bin/fix_bed_for_bigbed_conversion.py | mr-c/eclip | 833a389b773e12492d316e61db802dd353404f4f | [
"BSD-3-Clause"
] | null | null | null | bin/fix_bed_for_bigbed_conversion.py | mr-c/eclip | 833a389b773e12492d316e61db802dd353404f4f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
narrowPeak,
cols 9 and 10 are just blank,
col 5 is 1000 for things that meet the >=3 l2fc and l10pval cutoffs and 200 otherwise (its just for ucsc track coloring)
"""
import numpy as np
import pandas as pd
import argparse
import os
ECLIP_HEADER = [
'chrom','start','end','pValue','signalValue','strand'
]
def combine_pvalue_fold(row):
return "{}|{}".format(row['pValue'], row['signalValue'])
def fix_bed(bed, fixed_bed):
peaks = pd.read_csv(bed, names=ECLIP_HEADER, sep='\t')
peaks['name'] = peaks.apply(combine_pvalue_fold, axis=1)
peaks['score'] = 0
peaks[[
'chrom','start','end','name','score','strand'
]].to_csv(
fixed_bed,
sep='\t',
header=False,
index=False
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_bed",
required=True,
)
parser.add_argument(
"--output_fixed_bed",
required=True,
)
# Process arguments
args = parser.parse_args()
bed = args.input_bed
output_fixed_bed = args.output_fixed_bed
# main func
fix_bed(bed, output_fixed_bed)
if __name__ == "__main__":
main()
| 21.736842 | 120 | 0.608555 |
73c2a7e219b469c56cadb90ec0996607d34067a3 | 987 | py | Python | pw_bloat/py/setup.py | antmicro/pigweed | a308c3354a6131425e3f484f07f05a1813948860 | [
"Apache-2.0"
] | null | null | null | pw_bloat/py/setup.py | antmicro/pigweed | a308c3354a6131425e3f484f07f05a1813948860 | [
"Apache-2.0"
] | 1 | 2021-06-18T13:54:41.000Z | 2021-06-18T13:54:41.000Z | pw_bloat/py/setup.py | antmicro/pigweed | a308c3354a6131425e3f484f07f05a1813948860 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""pw_bloat"""
import setuptools # type: ignore
setuptools.setup(
name='pw_bloat',
version='0.0.1',
author='Pigweed Authors',
author_email='pigweed-developers@googlegroups.com',
description='Tools for generating binary size report cards',
packages=setuptools.find_packages(),
package_data={'pw_bloat': ['py.typed']},
zip_safe=False,
install_requires=['pw_cli'],
)
| 34.034483 | 79 | 0.734549 |
73c2bb55e85a60a3c3b07367673abc51b523d4ba | 3,284 | py | Python | synth/forward_synth.py | kamikaze0923/ChemBo | 596b4108deef785c7afe25f9a68975e6d3a18225 | [
"MIT"
] | 29 | 2019-08-07T07:41:25.000Z | 2022-03-20T13:10:49.000Z | synth/forward_synth.py | ks-korovina/dragonfly_chemist | aa30a1cbacaee1d5c46e333f54781c90183f5cd7 | [
"MIT"
] | 10 | 2019-11-02T07:10:37.000Z | 2022-03-12T00:03:04.000Z | synth/forward_synth.py | cyclone923/ChemBo | 596b4108deef785c7afe25f9a68975e6d3a18225 | [
"MIT"
] | 4 | 2020-06-29T02:39:59.000Z | 2022-02-17T20:52:46.000Z | """
Implements forward synthesis
TODO:
* Template synthesis and sanity checks
Notes:
* Using pretrained models:
* There is code for MoleculeTransformers:
see https://github.com/pschwllr/MolecularTransformer
so train it?
* Another option:
https://github.com/connorcoley/rexgen_direct
"""
import sys
import logging
from mols.molecule import Molecule, Reaction
from rexgen_direct.core_wln_global.directcorefinder import DirectCoreFinder
from rexgen_direct.rank_diff_wln.directcandranker import DirectCandRanker
class ForwardSynthesizer:
"""
Class for answering forward prediction queries.
"""
def __init__(self):
# load trained model
pass
def predict_outcome(self, reaction):
"""
Using a predictor, produce the most likely reaction
Params:
reaction {Reaction} - reaction object that holds
lists of reactants/reagents
Returns:
{list[Molecule]} - list of k most likely reaction outcomes
"""
raise NotImplementedError("Implement in child class.")
class TemplateForwardSynthesizer(ForwardSynthesizer):
""" Class for rule-based synthesis using rdkit library. """
pass
class RexgenForwardSynthesizer(ForwardSynthesizer):
def __init__(self):
# load trained model
self.directcorefinder = DirectCoreFinder()
self.directcorefinder.load_model()
self.directcandranker = DirectCandRanker()
self.directcandranker.load_model()
def predict_outcome(self, reaction, k=1):
"""
Using a predictor, produce top-k most likely reactions
Params:
reaction {Reaction}
k {int} - how many top predictions to set and return
Returns:
{list[Molecule]} - list of products of reaction
"""
react = reaction.get_input_str()
try:
(react, bond_preds, bond_scores, cur_att_score) = self.directcorefinder.predict(react)
outcomes = self.directcandranker.predict(react, bond_preds, bond_scores)
except RuntimeError as e:
logging.error(f"Error occured in DirectCandRanker.predict: {e}")
raise e
res = []
for out in outcomes:
if out["smiles"]: # may be empty for some reason?
smiles = out["smiles"][0]
mol = Molecule(smiles)
mol.set_synthesis(reaction.inputs)
res.append(mol)
else:
continue
# outcomes are sorted by probability in decreasing order
res = res[:k]
# setting predicted products, if not already set:
reaction.set_products(res)
return res
if __name__=="__main__":
list_of_mols = ["[CH3:26][c:27]1[cH:28][cH:29][cH:30][cH:31][cH:32]1",
"[Cl:18][C:19](=[O:20])[O:21][C:22]([Cl:23])([Cl:24])[Cl:25]",
"[NH2:1][c:2]1[cH:3][cH:4][c:5]([Br:17])[c:6]2[c:10]1[O:9][C:8]"+
"([CH3:11])([C:12](=[O:13])[O:14][CH2:15][CH3:16])[CH2:7]2"
]
list_of_mols = [Molecule(smiles) for smiles in list_of_mols]
t = RexgenForwardSynthesizer()
reaction = Reaction(list_of_mols)
t.predict_outcome(reaction)
| 31.576923 | 98 | 0.617235 |
73c2bb653cb0f1165e62ccf55af8ac022cc16e3f | 1,590 | py | Python | phenotype/Predicate/Evaluator.py | kamalm87/phenotype | 92ff1c3ec6b21cd540caf10880ef532270d85110 | [
"MIT"
] | null | null | null | phenotype/Predicate/Evaluator.py | kamalm87/phenotype | 92ff1c3ec6b21cd540caf10880ef532270d85110 | [
"MIT"
] | null | null | null | phenotype/Predicate/Evaluator.py | kamalm87/phenotype | 92ff1c3ec6b21cd540caf10880ef532270d85110 | [
"MIT"
] | null | null | null | if __name__ == '__main__':
from sys import ( path as __sys_path__ )
from os.path import ( abspath as __abs_path__ )
__sys_path__.insert(0, __abs_path__('..'))
from itertools import starmap as __it_starmap__
from operator import (
lt as __lt__,
le as __le__,
eq as __eq__,
ne as __ne__,
ge as __ge__,
gt as __gt__,
)
from Core.Get import ( Name as __get_name__ )
from Core.Auxiliary import ( Apply as __func_apply__ )
class Evaluator:
''' '''
__slots__ = '_attributes'
def __init__(self, attributes):
''' '''
self._attributes = attributes
def __values__(self):
''' '''
return __func_apply__(*map(__get_name__,self._attributes))(self)
def __evaluate__(self, other):
''' '''
return zip( self.__values__(), other.__values__() )
def __lt__(self, other):
''' '''
return any( __it_starmap__(__lt__, self.__evaluate__(other) ) )
def __le__(self, other):
''' '''
return any( __it_starmap__(__le__, self.__evaluate__(other) ) )
def __eq__(self, other):
''' '''
return all( __it_starmap__(__eq__, self.__evaluate__(other) ) )
def __ne__(self, other):
''' '''
return all( __it_starmap__(__ne__, self.__evaluate__(other) ) )
def __ge__(self, other):
''' '''
return any( __it_starmap__(__ge__, self.__evaluate__(other) ) )
def __gt__(self, other):
''' '''
return any( __it_starmap__(__gt__, self.__evaluate__(other) ) )
| 33.829787 | 72 | 0.591824 |
73c2f8ac2835281cd363fd10ba9489fd4c988609 | 11,419 | py | Python | coinlendingbot/Bitfinex.py | m3h7/coinlendingbot | d6d217d46fc6e04caf0d4a963278b9895e6737e9 | [
"MIT"
] | 3 | 2018-07-13T12:42:48.000Z | 2021-03-22T01:15:32.000Z | coinlendingbot/Bitfinex.py | m3h7/coinlendingbot | d6d217d46fc6e04caf0d4a963278b9895e6737e9 | [
"MIT"
] | 1 | 2018-07-29T14:43:19.000Z | 2022-01-16T13:53:11.000Z | coinlendingbot/Bitfinex.py | m3h7/coinlendingbot | d6d217d46fc6e04caf0d4a963278b9895e6737e9 | [
"MIT"
] | 3 | 2020-05-05T12:41:37.000Z | 2022-01-21T14:48:17.000Z | # coding=utf-8
import hashlib
import hmac
import base64
import json
import requests
import time
import threading
import logging
from coinlendingbot.ExchangeApi import ExchangeApi
from coinlendingbot.ExchangeApi import ApiError
from coinlendingbot.Bitfinex2Poloniex import Bitfinex2Poloniex
from coinlendingbot.RingBuffer import RingBuffer
from coinlendingbot.websocket import ExchangeWsClient
class Bitfinex(ExchangeApi):
def __init__(self, cfg, weblog):
super(Bitfinex, self).__init__(cfg, weblog)
Bitfinex2Poloniex.all_currencies = self.all_currencies
self.logger = logging.getLogger(__name__)
self.lock = threading.RLock()
self.req_per_period = 1
self.default_req_period = 1000 # milliseconds, 1000 = 60/min
self.req_period = self.default_req_period
self.req_time_log = RingBuffer(self.req_per_period)
self.url = 'https://api.bitfinex.com'
self.apiVersion = 'v1'
self.symbols = []
self.timeout = int(self.cfg.get("BOT", "timeout", 30, 1, 180))
self._init_websocket()
def _init_websocket(self):
self.websocket = ExchangeWsClient('BITFINEX')
self.websocket.start()
for pair in self._get_symbols():
self.websocket.subscribe_ticker(pair)
@property
def _nonce(self):
"""
Returns a nonce
Used in authentication
"""
return str(int(time.time() * 100000))
def limit_request_rate(self):
super(Bitfinex, self).limit_request_rate()
def increase_request_timer(self):
super(Bitfinex, self).increase_request_timer()
def decrease_request_timer(self):
super(Bitfinex, self).decrease_request_timer()
def reset_request_timer(self):
super(Bitfinex, self).reset_request_timer()
def _sign_payload(self, payload):
j = json.dumps(payload)
data = base64.standard_b64encode(j.encode('utf8'))
h = hmac.new(self.apiSecret.encode('utf8'), data, hashlib.sha384)
signature = h.hexdigest()
return {
"X-BFX-APIKEY": self.apiKey,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data,
"Connection": "close"
}
def _request(self, method, request, payload=None, verify=True):
try:
r = {}
url = '{}{}'.format(self.url, request)
if method == 'get':
r = requests.get(url, timeout=self.timeout, headers={'Connection': 'close'})
else:
r = requests.post(url, headers=payload, verify=verify, timeout=self.timeout)
if r.status_code != 200:
statusCode = int(r.status_code)
if statusCode == 502 or statusCode in range(520, 530, 1):
raise ApiError('(1) API Error {}: The web server reported a bad gateway or gateway timeout error.'
.format(statusCode))
elif statusCode == 429:
self.increase_request_timer()
raise ApiError('(2) API Error {}: {}'.format(statusCode, r.text))
# Check in case something has gone wrong and the timer is too big
self.reset_request_timer()
return r.json()
except Exception as ex:
ex.message = "{0} requesting {1}".format(ex, self.url + request)
raise ex
@ExchangeApi.synchronized
def _post(self, command, payload=None, verify=True):
# keep the request per minute limit
self.limit_request_rate()
payload = payload or {}
payload['request'] = '/{}/{}'.format(self.apiVersion, command)
payload['nonce'] = self._nonce
signed_payload = self._sign_payload(payload)
return self._request('post', payload['request'], signed_payload, verify)
@ExchangeApi.synchronized
def _get(self, command):
# keep the request per minute limit
self.limit_request_rate()
request = '/{}/{}'.format(self.apiVersion, command)
return self._request('get', request)
def _get_symbols(self):
"""
A list of symbol names. Currently "btcusd", "ltcusd", "ltcbtc", ...
https://bitfinex.readme.io/v1/reference#rest-public-symbols
"""
if len(self.symbols) == 0:
bfx_resp = self._get('symbols')
currencies = self.all_currencies
output_currency = self.cfg.get_output_currency()
if output_currency not in currencies:
currencies.append(output_currency)
for symbol in bfx_resp:
base = symbol[3:].upper()
curr = symbol[:3].upper()
if ((base in ['BTC', 'USD'] and curr in currencies) or
(base in currencies and curr == 'BTC')):
self.symbols.append(symbol)
return self.symbols
def return_open_loan_offers(self):
"""
Returns active loan offers
https://bitfinex.readme.io/v1/reference#rest-auth-offers
"""
bfx_resp = self._post('offers')
resp = Bitfinex2Poloniex.convertOpenLoanOffers(bfx_resp)
return resp
def return_loan_orders(self, currency, limit=0):
bfx_resp = self.websocket.return_lendingbook(currency, limit)
resp = Bitfinex2Poloniex.convertLoanOrders(bfx_resp)
self.logger.debug("{} {}".format(currency, resp))
return resp
def return_active_loans(self):
"""
Returns own active loan offers
https://bitfinex.readme.io/v1/reference#rest-auth-offers
"""
bfx_resp = self._post('credits')
resp = Bitfinex2Poloniex.convertActiveLoans(bfx_resp)
return resp
def return_ticker(self):
"""
The ticker is a high level overview of the state of the market
"""
bfx_ticker = self.websocket.return_ticker()
ticker = Bitfinex2Poloniex.convertTicker(bfx_ticker)
self.logger.debug('ticker: {}'.format(ticker))
return ticker
def return_available_account_balances(self, account):
"""
Returns own balances sorted by account
https://bitfinex.readme.io/v1/reference#rest-auth-wallet-balances
"""
bfx_resp = self._post('balances')
balances = Bitfinex2Poloniex.convertAccountBalances(bfx_resp, account)
self.logger.debug("accout:{} result:{}".format(account, balances))
return balances
def cancel_loan_offer(self, currency, order_number):
"""
Cancels an offer
https://bitfinex.readme.io/v1/reference#rest-auth-cancel-offer
"""
payload = {
"offer_id": order_number,
}
bfx_resp = self._post('offer/cancel', payload)
success = 0
message = ''
try:
if bfx_resp['id'] == order_number:
success = 1
message = "Loan offer canceled ({:.4f} @ {:.4f}%).".format(float(bfx_resp['remaining_amount']),
float(bfx_resp['rate']) / 365)
except Exception as e:
message = "Error canceling offer: ", str(e)
success = 0
return {"success": success, "message": message}
def create_loan_offer(self, currency, amount, duration, auto_renew, lending_rate):
"""
Creates a loan offer for a given currency.
https://bitfinex.readme.io/v1/reference#rest-auth-new-offer
"""
payload = {
"currency": currency,
"amount": str(amount),
"rate": str(round(float(lending_rate), 10) * 36500),
"period": int(duration),
"direction": "lend"
}
try:
bfx_resp = self._post('offer/new', payload)
plx_resp = {"success": 0, "message": "Error", "orderID": 0}
if bfx_resp['id']:
plx_resp['orderId'] = bfx_resp['id']
plx_resp['success'] = 1
plx_resp['message'] = "Loan order placed."
return plx_resp
except Exception as e:
msg = str(e)
# "Invalid offer: incorrect amount, minimum is 50 dollar or equivalent in USD"
if "Invalid offer: incorrect amount, minimum is 50" in msg:
usd_min = 50
cur_min = usd_min
if currency != 'USD':
cur_min = usd_min / float(self.return_ticker()['USD_' + currency]['lowestAsk'])
raise Exception("Error create_loan_offer: Amount must be at least " + str(cur_min) + " " + currency)
else:
raise e
def return_balances(self):
"""
Returns balances of exchange wallet
https://bitfinex.readme.io/v1/reference#rest-auth-wallet-balances
"""
balances = self.return_available_account_balances('exchange')
return_dict = {cur: u'0.00000000' for cur in self.all_currencies}
return_dict.update(balances['exchange'])
return return_dict
def transfer_balance(self, currency, amount, from_account, to_account):
"""
Transfers values from one account/wallet to another
https://bitfinex.readme.io/v1/reference#rest-auth-transfer-between-wallets
"""
account_map = {
'margin': 'trading',
'lending': 'deposit',
'exchange': 'exchange'
}
payload = {
"currency": currency,
"amount": amount,
"walletfrom": account_map[from_account],
"walletto": account_map[to_account]
}
bfx_resp = self._post('transfer', payload)
plx_resp = {
"status": 1 if bfx_resp[0]['status'] == "success" else 0,
"message": bfx_resp[0]['message']
}
return plx_resp
def return_lending_history(self, start, stop, limit=500):
"""
Retrieves balance ledger entries. Search funding payments in it and returns
it as history.
https://bitfinex.readme.io/v1/reference#rest-auth-balance-history
"""
self.logger.debug("Start:{} Stop:{} Limit:{}".format(start, stop, limit))
history = []
for curr in self.all_currencies:
payload = {
"currency": curr,
"since": str(start),
"until": str(stop),
"limit": limit,
"wallet": "deposit"
}
bfx_resp = self._post('history', payload)
for entry in bfx_resp:
if 'Margin Funding Payment' in entry['description']:
amount = float(entry['amount'])
history.append({
"id": int(float(entry['timestamp'])),
"currency": curr,
"rate": "0.0",
"amount": "0.0",
"duration": "0.0",
"interest": str(amount / 0.85),
"fee": str(amount - amount / 0.85),
"earned": str(amount),
"open": Bitfinex2Poloniex.convertTimestamp(entry['timestamp']),
"close": Bitfinex2Poloniex.convertTimestamp(entry['timestamp'])
})
return history
| 36.482428 | 120 | 0.569402 |
73c314db3eac53ef6efc72961fba1d9ce7abc600 | 4,910 | py | Python | leetcode_python/Dynamic_Programming/knight-probability-in-chessboard.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Dynamic_Programming/knight-probability-in-chessboard.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Dynamic_Programming/knight-probability-in-chessboard.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
688. Knight Probability in Chessboard
Medium
On an n x n chessboard, a knight starts at the cell (row, column) and attempts to make exactly k moves. The rows and columns are 0-indexed, so the top-left cell is (0, 0), and the bottom-right cell is (n - 1, n - 1).
A chess knight has eight possible moves it can make, as illustrated below. Each move is two cells in a cardinal direction, then one cell in an orthogonal direction.
Each time the knight is to move, it chooses one of eight possible moves uniformly at random (even if the piece would go off the chessboard) and moves there.
The knight continues moving until it has made exactly k moves or has moved off the chessboard.
Return the probability that the knight remains on the board after it has stopped moving.
Example 1:
Input: n = 3, k = 2, row = 0, column = 0
Output: 0.06250
Explanation: There are two moves (to (1,2), (2,1)) that will keep the knight on the board.
From each of those positions, there are also two moves that will keep the knight on the board.
The total probability the knight stays on the board is 0.0625.
Example 2:
Input: n = 1, k = 0, row = 0, column = 0
Output: 1.00000
Constraints:
1 <= n <= 25
0 <= k <= 100
0 <= row, column <= n
"""
# V0
# IDEA : DP
class Solution(object):
def knightProbability(self, N, K, r, c):
dp = [[0 for i in range(N)] for j in range(N)]
dp[r][c] = 1
directions = [(1, 2), (1, -2), (2, 1), (2, -1), (-2, 1), (-2, -1), (-1, 2), (-1, -2)]
for k in range(K):
new_dp = [[0 for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
for d in directions:
x, y = i + d[0], j + d[1]
if x < 0 or x >= N or y < 0 or y >= N:
continue
new_dp[i][j] += dp[x][y]
dp = new_dp
return sum(map(sum, dp)) / float(8 ** K)
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/82747623
# IDEA : DP
# DP (dp[i][j]) hear means "how many times the horse can get to this place (i,j) in this round"
class Solution(object):
def knightProbability(self, N, K, r, c):
"""
:type N: int
:type K: int
:type r: int
:type c: int
:rtype: float
"""
dp = [[0 for i in range(N)] for j in range(N)]
dp[r][c] = 1
directions = [(1, 2), (1, -2), (2, 1), (2, -1), (-2, 1), (-2, -1), (-1, 2), (-1, -2)]
for k in range(K):
new_dp = [[0 for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
for d in directions:
x, y = i + d[0], j + d[1]
if x < 0 or x >= N or y < 0 or y >= N:
continue
new_dp[i][j] += dp[x][y]
dp = new_dp
return sum(map(sum, dp)) / float(8 ** K)
# V1'
# https://www.jiuzhang.com/solution/knight-probability-in-chessboard/#tag-highlight-lang-python
class Solution:
"""
@param N: int
@param K: int
@param r: int
@param c: int
@return: the probability
"""
def knightProbability(self, N, K, r, c):
# Write your code here.
next = [[-1, -2], [1, -2], [2, -1], [2, 1], [1, 2], [-1, 2], [-2, 1], [-2, -1]]
dp = [[0 for i in range(N)] for j in range(N)]
dp[r][c] = 1
for step in range(1, K + 1):
dpTemp = [[0 for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
for direction in next:
lastR, lastC = i - direction[0], j - direction[1]
if all([lastC >= 0, lastR >= 0, lastC < N, lastR < N]):
dpTemp[i][j] += dp[lastR][lastC] * 0.125
dp = dpTemp
res = 0.0
for i in range(N):
for j in range(N):
res += dp[i][j]
return res
# V2
# Time: O(k * n^2)
# Space: O(n^2)
class Solution(object):
def knightProbability(self, N, K, r, c):
"""
:type N: int
:type K: int
:type r: int
:type c: int
:rtype: float
"""
directions = \
[[ 1, 2], [ 1, -2], [ 2, 1], [ 2, -1], \
[-1, 2], [-1, -2], [-2, 1], [-2, -1]]
dp = [[[1 for _ in range(N)] for _ in range(N)] for _ in range(2)]
for step in range(1, K+1):
for i in range(N):
for j in range(N):
dp[step%2][i][j] = 0
for direction in directions:
rr, cc = i+direction[0], j+direction[1]
if 0 <= cc < N and 0 <= rr < N:
dp[step%2][i][j] += 0.125 * dp[(step-1)%2][rr][cc]
return dp[K%2][r][c] | 34.822695 | 216 | 0.48778 |
73c319c7780749e203f5a800420f7264ab5656e6 | 977 | py | Python | test/test_fft.py | computational-imaging/DepthFromDefocusWithLearnedOptics | c3cf3fb46f2151e67236a294f8b19ba2269a47ca | [
"MIT"
] | 19 | 2021-05-26T12:27:12.000Z | 2022-03-24T02:55:41.000Z | test/test_fft.py | computational-imaging/DepthFromDefocusWithLearnedOptics | c3cf3fb46f2151e67236a294f8b19ba2269a47ca | [
"MIT"
] | null | null | null | test/test_fft.py | computational-imaging/DepthFromDefocusWithLearnedOptics | c3cf3fb46f2151e67236a294f8b19ba2269a47ca | [
"MIT"
] | 4 | 2021-06-02T03:14:27.000Z | 2022-02-23T13:09:48.000Z | import itertools
import numpy as np
import pytest
import torch
from util.fft import fftshift, ifftshift
size = ((3,), (4,), (4, 4), (3, 4), (4, 3), (4, 5, 6))
@pytest.mark.parametrize('size', size)
def test_fftshift(size):
ndims = len(size)
x = torch.rand(size)
x_np = x.numpy()
for d in range(ndims):
for axes in itertools.combinations(range(ndims), d + 1):
y = fftshift(x, axes)
y_np = np.fft.fftshift(x_np, axes)
print(axes, size)
print(x, '\n', x_np)
print(y, '\n', y_np)
torch.testing.assert_allclose(y, y_np)
@pytest.mark.parametrize('size', size)
def test_ifftshift(size):
ndims = len(size)
x = torch.rand(size)
x_np = x.numpy()
for d in range(ndims):
for axes in itertools.combinations(range(ndims), d + 1):
y = ifftshift(x, axes)
y_np = np.fft.ifftshift(x_np, axes)
torch.testing.assert_allclose(y, y_np)
| 26.405405 | 64 | 0.581372 |
73c3205635123c0f9b8eb88cbf21b8df5c89f5ab | 8,080 | py | Python | Deeplearning_tutorial/RNN/RNNPytorch.py | JiaxinYangJX/MTH994 | 3037aa445fcd502a76eff399a3f2d34b11764764 | [
"MIT"
] | null | null | null | Deeplearning_tutorial/RNN/RNNPytorch.py | JiaxinYangJX/MTH994 | 3037aa445fcd502a76eff399a3f2d34b11764764 | [
"MIT"
] | null | null | null | Deeplearning_tutorial/RNN/RNNPytorch.py | JiaxinYangJX/MTH994 | 3037aa445fcd502a76eff399a3f2d34b11764764 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas as pd
import time
import random
from sklearn import preprocessing
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
tic = time.perf_counter()
#======================================Classes==================================
class RNNNet(nn.Module):
def __init__(self, D_in, T, H, D_out):
"""
D_in : input size, 28
T : time step, 28
H : #of hidden neurons
D_out : #of classes 10
"""
super(RNNNet, self).__init__()
# DRNN
self.rnn = nn.RNN(D_in, H, num_layers=1, batch_first=True) # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
self.linear = nn.Linear(H, D_out)
def forward(self, X):
"""
X: (N, T, D_in)
r_out: (N, T, D_out)
h_n: (n_layers, N, H)
"""
r_out, h_n = self.rnn(X, None) # None represents zero initial hidden state
# choose r_out at the last time step
out = self.linear(r_out[:, -1, :]) # [:,:,-1]
print(out.shape)
y_hat = F.log_softmax(out, dim=1)
return y_hat
#=================================Training & Testing============================
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
output = model(data)
loss = F.nll_loss(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if epoch % args.log_interval == 0:
# print('Train Epoch: {} [{}/{} ]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset), loss.item()))
def test(args, model, device, epoch, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
# pcc = PCC(output, target)[0]
# rmse = RMSE(output, target)
test_loss /= len(test_loader.dataset)
if epoch % args.log_interval == 0:
print('\n Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))
# print("[test_loss: {:.4f}] [PCC: {:.4f}] [RMSE: {:.4f}] [Epoch: {:d}] [2DCNN] ".format(test_loss, pcc, rmse, epoch))
def read_dataset(feature_file, label_file):
''' Read data set in *.csv to data frame in Pandas'''
df_X = pd.read_csv(feature_file)
df_y = pd.read_csv(label_file)
X = df_X.values # convert values in dataframe to numpy array (features)
y = df_y.values # convert values in dataframe to numpy array (label)
return X, y
def normalize_features(X_train, X_test):
from sklearn.preprocessing import StandardScaler #import libaray
scaler = StandardScaler() # call an object function
scaler.fit(X_train) # calculate mean, std in X_train
X_train_norm1 = scaler.transform(X_train) # apply normalization on X_train
X_test_norm1 = scaler.transform(X_test) # we use the same normalization on X_test
X_train_norm = np.reshape(X_train_norm1,(-1,28,28)) # reshape X to be a 3-D array
X_test_norm = np.reshape(X_test_norm1,(-1,28,28))
return X_train_norm, X_test_norm
def one_hot_encoder(y_train, y_test):
''' convert label to a vector under one-hot-code fashion '''
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
lb.fit(y_train)
y_train_ohe = lb.transform(y_train)
y_test_ohe = lb.transform(y_test)
return y_train_ohe, y_test_ohe
def main():
# Training settings
parser = argparse.ArgumentParser(description='MNIST')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100000, metavar='N',
help='input batch size for testing (default: 50)') # train itself 9221, test 3767
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.005)')
parser.add_argument('--momentum', type=float, default=0.3, metavar='M',
help='SGD momentum (default: 0.005)')
parser.add_argument('--weight_decay', type=float, default=0, metavar='M',
help='SGD momentum (default: 0.0005)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=2, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
#=================================Load Data=================================
X_train, y_train = read_dataset('MNIST_X_train.csv', 'MNIST_y_train.csv')
X_test, y_test = read_dataset('MNIST_X_test.csv', 'MNIST_y_test.csv')
X_train, X_test = normalize_features(X_train, X_test)
print('Trian:', X_train.shape)
print('Test:', X_test.shape)
print(y_train.shape)
print(y_test.shape)
#==================================Pack Data================================
train_data = torch.from_numpy(X_train).float() # numpy to tensor
test_data = torch.from_numpy(X_test).float()
trainset = torch.utils.data.TensorDataset(train_data, torch.from_numpy(y_train.ravel()))
testset = torch.utils.data.TensorDataset(test_data, torch.from_numpy(y_test.ravel()))
# Define data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(dataset=testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
#=================================Design Net================================
D_in = 28
T = 28
H = 64
D_out = 10
model = RNNNet(D_in, T, H, D_out).to(device)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer = optim.Adam(model.parameters(), lr=args.lr, eps=1e-08, weight_decay=args.weight_decay, amsgrad=False)
lr_adjust = optim.lr_scheduler.StepLR(optimizer, step_size = 200, gamma = 0.5, last_epoch = -1)
for epoch in range(1, args.epochs + 1):
lr_adjust.step()
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, epoch, test_loader)
if (args.save_model):
torch.save(model.state_dict(),"mnist_rnn.pt")
if __name__ == '__main__':
main()
toc = time.perf_counter()
print(("Elapsed time: %.1f [min]" % ((toc-tic)/60)))
print("==============================Finish=====================================")
| 43.44086 | 174 | 0.61349 |
73c39cc8314fd2fc18f06fa042120a483cefd90e | 1,109 | py | Python | attic/2019/contributions-2019/open/mudaliar-yptu/PWAF/testcases/floating_menu_test.py | Agriad/devops-course | 380aa43fa69efd71992933f013fb11a96ce16a74 | [
"MIT"
] | null | null | null | attic/2019/contributions-2019/open/mudaliar-yptu/PWAF/testcases/floating_menu_test.py | Agriad/devops-course | 380aa43fa69efd71992933f013fb11a96ce16a74 | [
"MIT"
] | 51 | 2021-04-08T11:39:59.000Z | 2021-05-07T12:01:27.000Z | attic/2019/contributions-2019/open/mudaliar-yptu/PWAF/testcases/floating_menu_test.py | Agriad/devops-course | 380aa43fa69efd71992933f013fb11a96ce16a74 | [
"MIT"
] | null | null | null | """
@author: Yi-Pei, Tu
@email: yptu@kth.se
@date: 21-Apr-19
"""
from pages import dynamic_controls_page
from pages.floating_menu_page import FloatingMenuPage
from pages.welcome_page import WelcomePage
from utility.drivermanager import DriverManagerFirefox, DriverManagerChrome
from nose.plugins.attrib import attr
@attr(group=['kth'])
class FloatingMenuTestFirefox(DriverManagerFirefox):
def test_floating_menu(self):
welcome_page = WelcomePage(self.driver)
welcome_page.verify_welcome_page().click_on_link("Floating Menu")
floating_menu_page = FloatingMenuPage(self.driver)
floating_menu_page.verify_floating_menu_page()
floating_menu_page.verify_floating_menu()
@attr(group=['kth'])
class FloatingMenuTestChrome(DriverManagerChrome):
def test_floating_menu(self):
welcome_page = WelcomePage(self.driver)
welcome_page.verify_welcome_page().click_on_link("Floating Menu")
floating_menu_page = FloatingMenuPage(self.driver)
floating_menu_page.verify_floating_menu_page()
floating_menu_page.verify_floating_menu() | 32.617647 | 75 | 0.772768 |
73c3a4eb4073c76dec4de8f45c539be0a06b605d | 1,474 | py | Python | tests/python/relay/test_pass_eta_expand.py | mingwayzhang/tvm | 3b287c4d4e6d83e6fd30db47ffa3d5481a332a63 | [
"Apache-2.0"
] | 48 | 2020-07-29T18:09:23.000Z | 2021-10-09T01:53:33.000Z | tests/python/relay/test_pass_eta_expand.py | mingwayzhang/tvm | 3b287c4d4e6d83e6fd30db47ffa3d5481a332a63 | [
"Apache-2.0"
] | 9 | 2021-04-02T02:28:07.000Z | 2022-03-26T18:23:59.000Z | tests/python/relay/test_pass_eta_expand.py | mingwayzhang/tvm | 3b287c4d4e6d83e6fd30db47ffa3d5481a332a63 | [
"Apache-2.0"
] | 42 | 2020-08-01T06:41:24.000Z | 2022-01-20T10:33:08.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import relay
import tvm.relay.module as _module
import tvm.relay.transform as _transform
def test_eta_expand_basic():
x = relay.var('x', 'int32')
orig = relay.Function([x], x)
mod = _module.Module.from_expr(orig)
seq = _transform.Sequential([_transform.EtaExpand()])
with _transform.PassContext(opt_level=3):
mod = seq(mod)
got = mod["main"]
y = relay.var('y', 'int32')
expected = relay.Function([y], orig(y))
gv = relay.GlobalVar("gv")
mod[gv] = expected
mod = _transform.InferType()(mod)
expected = mod["gv"]
assert(relay.analysis.alpha_equal(got, expected))
if __name__ == "__main__":
test_eta_expand_basic()
| 35.95122 | 62 | 0.721167 |
73c3bf67a1c905861695be70f7df41f5cf3e299c | 18,017 | py | Python | src/program/autoscheduler.py | bornhack/bornhack-website | 40ed0875f5129a4c8ae1887e33e7dedb4981dadc | [
"BSD-3-Clause"
] | 7 | 2017-04-14T15:28:29.000Z | 2021-09-10T09:45:38.000Z | src/program/autoscheduler.py | bornhack/bornhack-website | 40ed0875f5129a4c8ae1887e33e7dedb4981dadc | [
"BSD-3-Clause"
] | 799 | 2016-04-28T09:31:50.000Z | 2022-03-29T09:05:02.000Z | src/program/autoscheduler.py | bornhack/bornhack-website | 40ed0875f5129a4c8ae1887e33e7dedb4981dadc | [
"BSD-3-Clause"
] | 35 | 2016-04-28T09:23:53.000Z | 2021-05-02T12:36:01.000Z | import logging
from datetime import timedelta
from conference_scheduler import resources, scheduler
from conference_scheduler.lp_problem import objective_functions
from conference_scheduler.validator import is_valid_schedule, schedule_violations
from psycopg2.extras import DateTimeTZRange
from program.email import add_event_scheduled_email
from .models import EventType
logger = logging.getLogger("bornhack.%s" % __name__)
class AutoScheduler:
"""
The BornHack AutoScheduler. Made with love by Tykling.
Built around https://github.com/PyconUK/ConferenceScheduler which works with lists
of conference_scheduler.resources.Slot and conference_scheduler.resources.Event objects.
Most of the code in this class deals with massaging our data into a list of Slot and
Event objects defining the data and constraints for the scheduler.
Initialising this class takes a while because all the objects have to be created.
"""
def __init__(self, camp):
"""Get EventTypes, EventSessions and Events, build autoslot and autoevent objects"""
self.camp = camp
# Get all EventTypes which support autoscheduling
self.event_types = self.get_event_types()
# Get all EventSessions for the current event_types
self.event_sessions = self.get_event_sessions(self.event_types)
# Build a lookup dict of lists of EventSession IDs per EventType (for easy lookups later)
self.event_type_sessions = {}
for session in self.event_sessions:
if session.event_type not in self.event_type_sessions:
self.event_type_sessions[session.event_type] = []
self.event_type_sessions[session.event_type].append(session.id)
# Get all Events for the current event_types
self.events = self.get_events(self.event_types)
# Get autoslots
self.autoslots = self.get_autoslots(self.event_sessions)
# Build a lookup dict of autoslots per EventType
self.event_type_slots = {}
for autoslot in self.autoslots:
# loop over event_type_sessions dict and find our
for et, sessions in self.event_type_sessions.items():
if autoslot.session in sessions:
if et not in self.event_type_slots:
self.event_type_slots[et] = []
self.event_type_slots[et].append(autoslot)
break
# get autoevents and a lookup dict which maps Event id to autoevent index
self.autoevents, self.autoeventindex = self.get_autoevents(self.events)
def get_event_types(self):
"""Return all EventTypes which support autoscheduling"""
return EventType.objects.filter(support_autoscheduling=True)
def get_event_sessions(self, event_types):
"""Return all EventSessions for these EventTypes"""
return self.camp.event_sessions.filter(
event_type__in=event_types,
).prefetch_related("event_type", "event_location")
def get_events(self, event_types):
"""Return all Events that need scheduling"""
# return all events for these event_types, but..
return self.camp.events.filter(event_type__in=event_types).exclude(
# exclude Events that have been sceduled already...
event_slots__isnull=False,
# ...unless those events are autoscheduled
event_slots__autoscheduled=False,
)
def get_autoslots(self, event_sessions):
"""Return a list of autoslots for all slots in all EventSessions"""
autoslots = []
# loop over the sessions
for session in event_sessions:
# loop over available slots in this session
for slot in session.get_available_slots(count_autoscheduled_as_free=True):
autoslots.append(slot.get_autoscheduler_slot())
return autoslots
def get_autoevents(self, events):
"""Return a list of resources.Event objects, one for each Event"""
autoevents = []
autoeventindex = {}
eventindex = {}
for event in events:
autoevents.append(
resources.Event(
name=event.id,
duration=event.duration_minutes,
tags=event.tags.names(),
demand=event.demand,
)
)
# create a dict of events with the autoevent index as key and the Event as value
autoeventindex[autoevents.index(autoevents[-1])] = event
# create a dict of events with the Event as key and the autoevent index as value
eventindex[event] = autoevents.index(autoevents[-1])
# loop over all autoevents to add unavailability...
# (we have to do this in a seperate loop because we need all the autoevents to exist)
for autoevent in autoevents:
# get the Event
event = autoeventindex[autoevents.index(autoevent)]
# loop over all other event_types...
for et in self.event_types.all().exclude(pk=event.event_type.pk):
if et in self.event_type_slots:
# and add all slots for this EventType as unavailable for this event,
# this means we don't schedule a talk in a workshop slot and vice versa.
autoevent.add_unavailability(*self.event_type_slots[et])
# loop over all speakers for this event and add event conflicts
for speaker in event.speakers.all():
# loop over other events featuring this speaker, register each conflict,
# this means we dont schedule two events for the same speaker at the same time
conflict_ids = speaker.events.exclude(id=event.id).values_list(
"id", flat=True
)
for conflictevent in autoevents:
if conflictevent.name in conflict_ids:
# only the event with the lowest index gets the unavailability,
if autoevents.index(conflictevent) > autoevents.index(
autoevent
):
autoevent.add_unavailability(conflictevent)
# loop over event_conflicts for this speaker, register unavailability for each,
# this means we dont schedule this event at the same time as something the
# speaker wishes to attend.
# Only process Events which the AutoScheduler is handling
for conflictevent in speaker.event_conflicts.filter(
pk__in=events.values_list("pk", flat=True)
):
# only the event with the lowest index gets the unavailability
if eventindex[conflictevent] > autoevents.index(autoevent):
autoevent.add_unavailability(
autoevents[eventindex[conflictevent]]
)
# loop over event_conflicts for this speaker, register unavailability for each,
# only process Events which the AutoScheduler is not handling, and which have
# been scheduled in one or more EventSlots
for conflictevent in speaker.event_conflicts.filter(
event_slots__isnull=False
).exclude(pk__in=events.values_list("pk", flat=True)):
# loop over the EventSlots this conflict is scheduled in
for conflictslot in conflictevent.event_slots.all():
# loop over all slots
for slot in self.autoslots:
# check if this slot overlaps with the conflictevents slot
if conflictslot.when & DateTimeTZRange(
slot.starts_at,
slot.starts_at + timedelta(minutes=slot.duration),
):
# this slot overlaps with the conflicting event
autoevent.add_unavailability(slot)
# Register all slots where we have no positive availability
# for this speaker as unavailable
available = []
for availability in speaker.availabilities.filter(
available=True
).values_list("when", flat=True):
availability = DateTimeTZRange(
availability.lower, availability.upper, "()"
)
for slot in self.autoslots:
slotrange = DateTimeTZRange(
slot.starts_at,
slot.starts_at + timedelta(minutes=slot.duration),
"()",
)
if slotrange in availability:
# the speaker is available for this slot
available.append(self.autoslots.index(slot))
autoevent.add_unavailability(
*[
s
for s in self.autoslots
if not self.autoslots.index(s) in available
]
)
return autoevents, autoeventindex
def build_current_autoschedule(self):
"""Build an autoschedule object based on the existing published schedule.
Returns an autoschedule, which is a list of conference_scheduler.resources.ScheduledItem
objects, one for each scheduled Event. This function is useful for creating an "original
schedule" to base a new similar schedule off of."""
# loop over scheduled events and create a ScheduledItem object for each
autoschedule = []
for slot in self.camp.event_slots.filter(
autoscheduled=True, event__in=self.events
):
# loop over all autoevents to find the index of this event
for autoevent in self.autoevents:
if autoevent.name == slot.event.id:
# we need the index number of the event
eventindex = self.autoevents.index(autoevent)
break
# loop over the autoslots to find the index of the autoslot this event is scheduled in
scheduled = False
for autoslot in self.autoslots:
if (
autoslot.venue == slot.event_location.id
and autoslot.starts_at == slot.when.lower
and autoslot.session
in self.event_type_sessions[slot.event.event_type]
):
# This autoslot starts at the same time as the EventSlot, and at the same
# location. It also has the session ID of a session with the right EventType.
autoschedule.append(
resources.ScheduledItem(
event=self.autoevents[eventindex],
slot=self.autoslots[self.autoslots.index(autoslot)],
)
)
scheduled = True
break
# did we find a slot matching this EventInstance?
if not scheduled:
print(f"Could not find an autoslot for slot {slot} - skipping")
# The returned schedule might not be valid! For example if a speaker is no
# longer available when their talk is scheduled. This is fine though, an invalid
# schedule can still be used as a basis for creating a new similar schedule.
return autoschedule
def calculate_autoschedule(self, original_schedule=None):
"""Calculate autoschedule based on self.autoevents and self.autoslots,
optionally using original_schedule to minimise changes"""
kwargs = {}
kwargs["events"] = self.autoevents
kwargs["slots"] = self.autoslots
# include another schedule in the calculation?
if original_schedule:
kwargs["original_schedule"] = original_schedule
kwargs["objective_function"] = objective_functions.number_of_changes
else:
# otherwise use the capacity demand difference thing
kwargs[
"objective_function"
] = objective_functions.efficiency_capacity_demand_difference
# calculate the new schedule
autoschedule = scheduler.schedule(**kwargs)
return autoschedule
def calculate_similar_autoschedule(self, original_schedule=None):
"""Convenience method for creating similar schedules. If original_schedule
is omitted the new schedule is based on the current schedule instead"""
if not original_schedule:
# we do not have an original_schedule, use current EventInstances
original_schedule = self.build_current_autoschedule()
# calculate and return
autoschedule = self.calculate_autoschedule(original_schedule=original_schedule)
diff = self.diff(original_schedule, autoschedule)
return autoschedule, diff
def apply(self, autoschedule):
"""Apply an autoschedule by creating EventInstance objects to match it"""
# "The Clean Slate protocol sir?" - delete any existing autoscheduled Events
# TODO: investigate how this affects the FRAB XML export (for which we added a UUID on
# Slot objects). Make sure "favourite" functionality or bookmarks or w/e in
# FRAB clients still work after a schedule "re"apply. We might need a smaller hammer here.
deleted = self.camp.event_slots.filter(
# get all autoscheduled EventSlots
autoscheduled=True
).update(
# clear the Event
event=None,
# and autoscheduled status
autoscheduled=None,
)
# loop and schedule events
scheduled = 0
for item in autoschedule:
# each item is an instance of conference_scheduler.resources.ScheduledItem
event = self.camp.events.get(id=item.event.name)
slot = self.camp.event_slots.get(
event_session_id=item.slot.session,
when=DateTimeTZRange(
item.slot.starts_at,
item.slot.starts_at + timedelta(minutes=item.slot.duration),
"[)", # remember to use the correct bounds when comparing
),
)
slot.event = event
slot.autoscheduled = True
slot.save()
add_event_scheduled_email(slot)
scheduled += 1
# return the numbers
return deleted, scheduled
def diff(self, original_schedule, new_schedule):
"""
This method returns a dict of Event differences and Slot differences between
the two schedules.
"""
slot_diff = scheduler.slot_schedule_difference(
original_schedule,
new_schedule,
)
slot_output = []
for item in slot_diff:
slot_output.append(
{
"event_location": self.camp.event_locations.get(pk=item.slot.venue),
"starttime": item.slot.starts_at,
"old": {},
"new": {},
}
)
if item.old_event:
try:
old_event = self.camp.events.get(pk=item.old_event.name)
except self.camp.events.DoesNotExist:
old_event = item.old_event.name
slot_output[-1]["old"]["event"] = old_event
if item.new_event:
try:
new_event = self.camp.events.get(pk=item.new_event.name)
except self.camp.events.DoesNotExist:
new_event = item.old_event.name
slot_output[-1]["new"]["event"] = new_event
# then get a list of differences per event
event_diff = scheduler.event_schedule_difference(
original_schedule,
new_schedule,
)
event_output = []
# loop over the differences and build the dict
for item in event_diff:
try:
event = self.camp.events.get(pk=item.event.name)
except self.camp.events.DoesNotExist:
event = item.event.name
event_output.append(
{
"event": event,
"old": {},
"new": {},
}
)
# do we have an old slot for this event?
if item.old_slot:
event_output[-1]["old"][
"event_location"
] = self.camp.event_locations.get(id=item.old_slot.venue)
event_output[-1]["old"]["starttime"] = item.old_slot.starts_at
# do we have a new slot for this event?
if item.new_slot:
event_output[-1]["new"][
"event_location"
] = self.camp.event_locations.get(id=item.new_slot.venue)
event_output[-1]["new"]["starttime"] = item.new_slot.starts_at
# all good
return {"event_diffs": event_output, "slot_diffs": slot_output}
def is_valid(self, autoschedule, return_violations=False):
"""Check if a schedule is valid, optionally returning a list of violations if invalid"""
valid = is_valid_schedule(
autoschedule, slots=self.autoslots, events=self.autoevents
)
if not return_violations:
return valid
return (
valid,
schedule_violations(
autoschedule, slots=self.autoslots, events=self.autoevents
),
)
| 44.818408 | 98 | 0.589055 |
73c3c833cf73eb757fa92d8cb97f221dce8bf2ad | 607 | py | Python | notebooks/ab_test.py | avisionh/abtest | 9b4e640b67b2b4d2c3501f1549b63ddd675f058e | [
"MIT"
] | 1 | 2021-01-13T06:39:08.000Z | 2021-01-13T06:39:08.000Z | notebooks/ab_test.py | avisionh/abtest | 9b4e640b67b2b4d2c3501f1549b63ddd675f058e | [
"MIT"
] | null | null | null | notebooks/ab_test.py | avisionh/abtest | 9b4e640b67b2b4d2c3501f1549b63ddd675f058e | [
"MIT"
] | null | null | null | import src.utils.helper_ab_test as f
import pandas as pd
df = pd.read_csv(filepath_or_buffer="data/interim/df_conversion_clean.csv")
(
conversions_control,
total_users_control,
percent_convert_control,
) = f.report_conversions(
data=df,
group_col="group",
group_filter="control",
convert_col="converted",
page_col="landing_page",
)
(
conversions_treatment,
total_users_treatment,
percent_convert_treatment,
) = f.report_conversions(
data=df,
group_col="group",
group_filter="treatment",
convert_col="converted",
page_col="landing_page",
)
| 20.233333 | 75 | 0.718287 |
73c3d67bb9de83966fa618d5868eb55fc49bd270 | 4,322 | py | Python | alipay/aop/api/domain/Org.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/Org.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/Org.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Org(object):
def __init__(self):
self._org_id_number = None
self._org_id_type = None
self._org_legal_id_number = None
self._org_legal_id_type = None
self._org_legal_name = None
self._org_name = None
self._third_party_user_id = None
@property
def org_id_number(self):
return self._org_id_number
@org_id_number.setter
def org_id_number(self, value):
self._org_id_number = value
@property
def org_id_type(self):
return self._org_id_type
@org_id_type.setter
def org_id_type(self, value):
self._org_id_type = value
@property
def org_legal_id_number(self):
return self._org_legal_id_number
@org_legal_id_number.setter
def org_legal_id_number(self, value):
self._org_legal_id_number = value
@property
def org_legal_id_type(self):
return self._org_legal_id_type
@org_legal_id_type.setter
def org_legal_id_type(self, value):
self._org_legal_id_type = value
@property
def org_legal_name(self):
return self._org_legal_name
@org_legal_name.setter
def org_legal_name(self, value):
self._org_legal_name = value
@property
def org_name(self):
return self._org_name
@org_name.setter
def org_name(self, value):
self._org_name = value
@property
def third_party_user_id(self):
return self._third_party_user_id
@third_party_user_id.setter
def third_party_user_id(self, value):
self._third_party_user_id = value
def to_alipay_dict(self):
params = dict()
if self.org_id_number:
if hasattr(self.org_id_number, 'to_alipay_dict'):
params['org_id_number'] = self.org_id_number.to_alipay_dict()
else:
params['org_id_number'] = self.org_id_number
if self.org_id_type:
if hasattr(self.org_id_type, 'to_alipay_dict'):
params['org_id_type'] = self.org_id_type.to_alipay_dict()
else:
params['org_id_type'] = self.org_id_type
if self.org_legal_id_number:
if hasattr(self.org_legal_id_number, 'to_alipay_dict'):
params['org_legal_id_number'] = self.org_legal_id_number.to_alipay_dict()
else:
params['org_legal_id_number'] = self.org_legal_id_number
if self.org_legal_id_type:
if hasattr(self.org_legal_id_type, 'to_alipay_dict'):
params['org_legal_id_type'] = self.org_legal_id_type.to_alipay_dict()
else:
params['org_legal_id_type'] = self.org_legal_id_type
if self.org_legal_name:
if hasattr(self.org_legal_name, 'to_alipay_dict'):
params['org_legal_name'] = self.org_legal_name.to_alipay_dict()
else:
params['org_legal_name'] = self.org_legal_name
if self.org_name:
if hasattr(self.org_name, 'to_alipay_dict'):
params['org_name'] = self.org_name.to_alipay_dict()
else:
params['org_name'] = self.org_name
if self.third_party_user_id:
if hasattr(self.third_party_user_id, 'to_alipay_dict'):
params['third_party_user_id'] = self.third_party_user_id.to_alipay_dict()
else:
params['third_party_user_id'] = self.third_party_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Org()
if 'org_id_number' in d:
o.org_id_number = d['org_id_number']
if 'org_id_type' in d:
o.org_id_type = d['org_id_type']
if 'org_legal_id_number' in d:
o.org_legal_id_number = d['org_legal_id_number']
if 'org_legal_id_type' in d:
o.org_legal_id_type = d['org_legal_id_type']
if 'org_legal_name' in d:
o.org_legal_name = d['org_legal_name']
if 'org_name' in d:
o.org_name = d['org_name']
if 'third_party_user_id' in d:
o.third_party_user_id = d['third_party_user_id']
return o
| 32.992366 | 89 | 0.627487 |
73c3d94bbd2e3a44082c804f262463932130f875 | 831 | py | Python | tartangan/kubeflow/download_dataset.py | awentzonline/tartangan | 2d36a81fa0ae91fe6b9b4e1f26763285630837fb | [
"MIT"
] | null | null | null | tartangan/kubeflow/download_dataset.py | awentzonline/tartangan | 2d36a81fa0ae91fe6b9b4e1f26763285630837fb | [
"MIT"
] | null | null | null | tartangan/kubeflow/download_dataset.py | awentzonline/tartangan | 2d36a81fa0ae91fe6b9b4e1f26763285630837fb | [
"MIT"
] | null | null | null | import smart_open
from .base_metadata_app import BaseMetadataApp
class DownloadDatasetMetadata(BaseMetadataApp):
def run(self):
super().run()
datasets = self.find_metadata_datasets_by_name(self.args.dataset_name)
# TODO: need to sort to get latest?
dataset = datasets[-1]
with smart_open.open(dataset['uri'], 'rb') as infile:
with smart_open.open(self.args.output_path, 'wb') as outfile:
outfile.write(infile.read())
@classmethod
def add_args_to_parser(cls, p):
super().add_args_to_parser(p)
p.add_argument('dataset_name', help='Name of metadata entity')
p.add_argument('output_path', help='Where the corresponding files go')
def main():
DownloadDatasetMetadata.run_from_cli()
if __name__ == '__main__':
main()
| 28.655172 | 78 | 0.672684 |
73c3f0a82dd837b440975e35c2200e3b50ac7aae | 304 | py | Python | chapter10/exercises/EG10-19 Twinkle Twinkle Tuples.py | munnep/begin_to_code_with_python | 3ef14d90785526b6b26d262a7627eee73791d7d0 | [
"MIT"
] | null | null | null | chapter10/exercises/EG10-19 Twinkle Twinkle Tuples.py | munnep/begin_to_code_with_python | 3ef14d90785526b6b26d262a7627eee73791d7d0 | [
"MIT"
] | null | null | null | chapter10/exercises/EG10-19 Twinkle Twinkle Tuples.py | munnep/begin_to_code_with_python | 3ef14d90785526b6b26d262a7627eee73791d7d0 | [
"MIT"
] | null | null | null | # EG10-19 Twinkle Twinkle Tuples
import time
import snaps
tune = [(0, 0.4), (0, 0.4), (7, 0.4), (7, 0.4),
(9, 0.4), (9, 0.4), (7, 0.8), (5, 0.4),
(5, 0.4), (4, 0.4), (4, 0.4), (2, 0.4),
(2, 0.4), (0, 0.8)]
for note in tune:
snaps.play_note(note[0])
time.sleep(note[1])
| 21.714286 | 47 | 0.457237 |
73c43dfe372141ff7fda7b888444aa9dd9b2792f | 291 | py | Python | mainapp/admin.py | AHTOH2001/OOP_4_term | c9b0f64f3507486e0670cc95d7252862b673d845 | [
"MIT"
] | null | null | null | mainapp/admin.py | AHTOH2001/OOP_4_term | c9b0f64f3507486e0670cc95d7252862b673d845 | [
"MIT"
] | null | null | null | mainapp/admin.py | AHTOH2001/OOP_4_term | c9b0f64f3507486e0670cc95d7252862b673d845 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Genre)
admin.site.register(Author)
admin.site.register(Book)
admin.site.register(ClientGroup)
admin.site.register(Client)
admin.site.register(Basket)
admin.site.register(SliderImages)
| 20.785714 | 33 | 0.80756 |
73c44a40df8a25538935d78f07b7f11014300089 | 42 | py | Python | carla/recourse_methods/catalog/cchvae/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 140 | 2021-08-03T21:53:32.000Z | 2022-03-20T08:52:02.000Z | carla/recourse_methods/catalog/cchvae/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 54 | 2021-03-07T18:22:16.000Z | 2021-08-03T12:06:31.000Z | carla/recourse_methods/catalog/cchvae/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 16 | 2021-08-23T12:14:58.000Z | 2022-03-01T00:52:58.000Z | # flake8: noqa
from .model import CCHVAE
| 10.5 | 25 | 0.738095 |
73c44be8f89e616d4b24b47bcef729f430f820cd | 43 | py | Python | btd6_memory_info/generated/System/Collections/EmptyReadOnlyDictionaryInternal/empty_read_only_dictionary_internal.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/System/Collections/EmptyReadOnlyDictionaryInternal/empty_read_only_dictionary_internal.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/System/Collections/EmptyReadOnlyDictionaryInternal/empty_read_only_dictionary_internal.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | class EmptyReadOnlyDictionaryInternal: pass | 43 | 43 | 0.930233 |
73c4535bd78fbc0579d70eab1d032f76d674ba84 | 4,864 | py | Python | stock_trading_backend/train.py | iryzhkov/stock-trading-backend | 7161026b7b4deb78a934b66550c85a27c6b32933 | [
"MIT"
] | 1 | 2021-01-27T18:24:02.000Z | 2021-01-27T18:24:02.000Z | stock_trading_backend/train.py | iryzhkov/stock-trading-backend | 7161026b7b4deb78a934b66550c85a27c6b32933 | [
"MIT"
] | null | null | null | stock_trading_backend/train.py | iryzhkov/stock-trading-backend | 7161026b7b4deb78a934b66550c85a27c6b32933 | [
"MIT"
] | null | null | null | """Training.
"""
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import progressbar
from stock_trading_backend.simulation import StockMarketSimulation
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
def train_agent(agent, from_date=None, to_date=None, min_duration=60, max_duration=90, commission=0,
max_stock_owned=1, min_start_balance=1000, max_start_balance=4000, training=True,
stock_data_randomization=False, episode_batch_size=5, num_episodes=10):
"""Train an agent with provided params.
Args:
agent: the agent to train.
from_date: datetime date for the start of the range.
to_date: datetime date for the end of the range.
min_duration: minimum length of the episode.
max_duration: maximum length of the episode (if 0 will run for all available dates).
max_stock_owned: a maximum number of different stocks that can be owned.
commission: relative commission for each transcation.
max_stock_owned: a maximum number of different stocks that can be owned.
min_start_balance: the minimum starting balance.
max_start_balance: the maximum starting balance.
stock_data_randomization: whether to add stock data randomization.
episode_batch_size: the number of episodes in a training batch.
num_episodes: number of episodes that training going to last.
training: the param passed to make_decision in the agent.
"""
if not agent.requires_learning:
raise ValueError("This agent does not need learning")
if from_date is None or to_date is None:
today = datetime.today()
today = datetime(today.year, today.month, today.day)
from_date = today - timedelta(days=720)
to_date = today - timedelta(days=60)
simulation = StockMarketSimulation(agent.data_collection_config, from_date=from_date,
to_date=to_date, min_start_balance=min_start_balance,
max_start_balance=max_start_balance, commission=commission,
max_stock_owned=max_stock_owned, min_duration=min_duration,
max_duration=max_duration, reward_config=agent.reward_config,
stock_data_randomization=stock_data_randomization)
num_episodes_run = 0
overall_reward_history = []
loss_history = []
observation = simulation.reset()
_, kwargs = agent.make_decision(observation, simulation, False)
kwargs_keys = kwargs.keys()
batch_kwargs_keys = ["{}s_batch".format(key) for key in kwargs_keys]
with progressbar.ProgressBar(max_value=num_episodes) as progress_bar:
while num_episodes_run < num_episodes:
batch_kwargs = {key: [] for key in batch_kwargs_keys}
batch_rewards = []
batch_observations = []
batch_actions = []
batch_reward = 0
num_episodes_left_in_batch = episode_batch_size
# Run the simulations in the batch.
while num_episodes_left_in_batch > 0 and num_episodes_run < num_episodes:
rewards = []
actions = []
kwargs = {key: [] for key in kwargs_keys}
observation = simulation.reset()
observations = pd.DataFrame(columns=observation.index)
while not simulation.done:
action, _kwargs = agent.make_decision(observation, simulation, training)
observations = observations.append(observation, ignore_index=True)
actions.append(action)
for key in _kwargs:
kwargs[key].append(_kwargs[key])
observation, reward, _ = simulation.step(action)
rewards.append(reward)
overall_reward = simulation.overall_reward
overall_reward_history.append(overall_reward)
rewards[-2] += overall_reward
rewards = np.asarray(rewards)
batch_rewards.append(rewards)
batch_observations.append(observations)
batch_actions.append(actions)
for key in kwargs:
batch_kwargs["{}s_batch".format(key)].append(kwargs[key])
num_episodes_run += 1
num_episodes_left_in_batch -= 1
progress_bar.update(num_episodes_run)
# Utilize data from the simulations to train agents.
losses = agent.apply_learning(batch_observations, batch_actions, batch_rewards,
**batch_kwargs)
loss_history.extend(losses)
return overall_reward_history, loss_history
| 46.769231 | 100 | 0.637541 |
73c470fba30f2997b698b39705ae17c451af605b | 4,287 | py | Python | trax/layers/rnn.py | modyharshit23/trax | 2e6783a4674209b57482ec41e1c533a420aa6fe6 | [
"Apache-2.0"
] | null | null | null | trax/layers/rnn.py | modyharshit23/trax | 2e6783a4674209b57482ec41e1c533a420aa6fe6 | [
"Apache-2.0"
] | null | null | null | trax/layers/rnn.py | modyharshit23/trax | 2e6783a4674209b57482ec41e1c533a420aa6fe6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of common recurrent neural network cells (RNNs)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from trax.layers import combinators as cb
from trax.layers import convolution
from trax.layers import core
def GRUCell(n_units):
"""Builds a traditional GRU cell with dense internal transformations.
Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555
Args:
n_units: Number of hidden units.
Returns:
A Stax model representing a traditional GRU RNN cell.
"""
return GeneralGRUCell(
candidate_transform=lambda: core.Dense(n_units),
memory_transform_fn=None,
gate_nonlinearity=core.Sigmoid,
candidate_nonlinearity=core.Tanh)
def ConvGRUCell(n_units, kernel_size=(3, 3)):
"""Builds a convolutional GRU.
Paper: https://arxiv.org/abs/1511.06432.
Args:
n_units: Number of hidden units
kernel_size: Kernel size for convolution
Returns:
A Stax model representing a GRU cell with convolution transforms.
"""
def BuildConv():
return convolution.Conv(
filters=n_units, kernel_size=kernel_size, padding='SAME')
return GeneralGRUCell(
candidate_transform=BuildConv,
memory_transform_fn=None,
gate_nonlinearity=core.Sigmoid,
candidate_nonlinearity=core.Tanh)
def GeneralGRUCell(candidate_transform,
memory_transform_fn=None,
gate_nonlinearity=core.Sigmoid,
candidate_nonlinearity=core.Tanh,
dropout_rate_c=0.1,
sigmoid_bias=0.5):
r"""Parametrized Gated Recurrent Unit (GRU) cell construction.
GRU update equations:
$$ Update gate: u_t = \sigmoid(U' * s_{t-1} + B') $$
$$ Reset gate: r_t = \sigmoid(U'' * s_{t-1} + B'') $$
$$ Candidate memory: c_t = \tanh(U * (r_t \odot s_{t-1}) + B) $$
$$ New State: s_t = u_t \odot s_{t-1} + (1 - u_t) \odot c_t $$
See combinators.Gate for details on the gating function.
Args:
candidate_transform: Transform to apply inside the Candidate branch. Applied
before nonlinearities.
memory_transform_fn: Optional transformation on the memory before gating.
gate_nonlinearity: Function to use as gate activation. Allows trying
alternatives to Sigmoid, such as HardSigmoid.
candidate_nonlinearity: Nonlinearity to apply after candidate branch. Allows
trying alternatives to traditional Tanh, such as HardTanh
dropout_rate_c: Amount of dropout on the transform (c) gate. Dropout works
best in a GRU when applied exclusively to this branch.
sigmoid_bias: Constant to add before sigmoid gates. Generally want to start
off with a positive bias.
Returns:
A model representing a GRU cell with specified transforms.
"""
gate_block = [ # u_t
candidate_transform(),
core.AddConstant(constant=sigmoid_bias),
gate_nonlinearity(),
]
reset_block = [ # r_t
candidate_transform(),
core.AddConstant(constant=sigmoid_bias), # Want bias to start positive.
gate_nonlinearity(),
]
candidate_block = [
cb.Dup(),
reset_block,
cb.Multiply(), # Gate S{t-1} with sigmoid(candidate_transform(S{t-1}))
candidate_transform(), # Final projection + tanh to get Ct
candidate_nonlinearity(), # Candidate gate
# Only apply dropout on the C gate. Paper reports 0.1 as a good default.
core.Dropout(rate=dropout_rate_c)
]
memory_transform = memory_transform_fn() if memory_transform_fn else []
return cb.Model(
cb.Dup(), cb.Dup(),
cb.Parallel(memory_transform, gate_block, candidate_block),
cb.Gate(),
)
| 33.232558 | 80 | 0.706088 |
73c4c099dd4b6772132cb0468de4e1952b6c22ac | 1,075 | py | Python | sdks/python/test/test_DistributionGroupReleasesResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/test/test_DistributionGroupReleasesResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/test/test_DistributionGroupReleasesResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from DistributionGroupReleasesResponse.clsDistributionGroupReleasesResponse import DistributionGroupReleasesResponse # noqa: E501
from appcenter_sdk.rest import ApiException
class TestDistributionGroupReleasesResponse(unittest.TestCase):
"""DistributionGroupReleasesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDistributionGroupReleasesResponse(self):
"""Test DistributionGroupReleasesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsDistributionGroupReleasesResponse.DistributionGroupReleasesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.875 | 130 | 0.75814 |
73c50eaa6d03abd836b96de66832e04c218ff64a | 2,099 | py | Python | thirdparty/nsiqcppstyle/nsiqunittest/nsiqcppstyle_update_unittest.py | cfsengineering/tigl | abfbb57b82dc6beac7cde212a4cd5e0aed866db8 | [
"Apache-2.0"
] | 171 | 2015-04-13T11:24:34.000Z | 2022-03-26T00:56:38.000Z | thirdparty/nsiqcppstyle/nsiqunittest/nsiqcppstyle_update_unittest.py | cfsengineering/tigl | abfbb57b82dc6beac7cde212a4cd5e0aed866db8 | [
"Apache-2.0"
] | 620 | 2015-01-20T08:34:36.000Z | 2022-03-30T11:05:33.000Z | thirdparty/nsiqcppstyle/nsiqunittest/nsiqcppstyle_update_unittest.py | cfsengineering/tigl | abfbb57b82dc6beac7cde212a4cd5e0aed866db8 | [
"Apache-2.0"
] | 56 | 2015-02-09T13:33:56.000Z | 2022-03-19T04:52:51.000Z | # Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from updateagent.agent import Version
class updateTest(unittest.TestCase):
def test2(self):
version1 = Version("0.1.1")
version2 = Version("0.1.10.1")
assert(version2 > version1)
version1 = Version("0.2.11")
version2 = Version("0.1.10.1")
assert(version1 > version2)
version2 = Version("0.2.11.1")
assert(version1 < version2)
def test3(self):
eachFileName = "library.dll"
assert(eachFileName.endswith(".dll") or eachFileName.endswith(".zip") or eachFileName.endswith(".exe")) | 48.813953 | 111 | 0.741782 |
73c51e5eac01fc43694fb9499ad496ac5da378bc | 8,613 | py | Python | megdc/install_method.py | indykish/megdc.py | e9de7ce844889efef2ff30444bdf0e361956d7fa | [
"Apache-2.0"
] | 1 | 2015-11-06T09:12:59.000Z | 2015-11-06T09:12:59.000Z | megdc/install_method.py | indykish/megdc.py | e9de7ce844889efef2ff30444bdf0e361956d7fa | [
"Apache-2.0"
] | null | null | null | megdc/install_method.py | indykish/megdc.py | e9de7ce844889efef2ff30444bdf0e361956d7fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import socket
import urllib2
import sys
import datetime
import getopt
import textwrap
from help_text import *
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s',
ifname[:15]))[20:24])
def install_pkg():
print "hello"
#to find local ip address
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError:
pass
return ip
#to check internet connections
def internet_on():
try:
response=urllib2.urlopen('http://216.58.220.46',timeout=2)
return True
except urllib2.URLError as err: pass
return False
ipaddr = get_lan_ip()
def install_repo() :
os.system('sudo apt-get -y install software-properties-common python-software-properties')
os.system('add-apt-repository "deb [arch=amd64] http://get.megam.io/0.9/ubuntu/14.04/ testing megam" ')
os.system('apt-key adv --keyserver keyserver.ubuntu.com --recv B3E0C1B7')
os.system('apt-get -y update')
os.system('apt-get -y install megamcommon')
return
"""
if internet_on() == True :
print "network available"
else:
print "Check your network connection"
"""
#to get hostname
def host_name():
return socket.gethostname()
"""
#write into a file
f = open('v.txt','a+')
f.write(' hai India')
f.seek(0)
print f.read()
"""
def pre_install():
log=open("/var/log/megam/megamcib/megam.log" ,'a+')
if internet_on() == True :
print "network available"
else:
date=datetime.datetime.now()
log.write("\n" +date + "check your network connection. get.megam.io is down or not reachable!")
hostname=socket.gethostname()
log.write("\nAdding entries in /etc/hosts")
#ADD /etc/hosts entries
ipaddr=get_lan_ip()
f2=open("/etc/hosts",'a+')
f2.write("\n127.0.0.1 " +hostname + "localhost" )
f2.write("\n"+ ipaddr +" "+ hostname +" localhost")
log.write("\n/etc/hosts entries added")
#For apt-add-repository command
install_repo()
return
def install_megam() :
print " Installing all megam packagess...."
install_all_text = textwrap.dedent(__install_all__).strip()
os.system(install_all_text)
return
def install_snowflake():
os.system('apt-get install snowflake')
return
def install_common():
os.system('apt-get install megamcommon')
return
def install_nilavu():
os.system('apt-get install megamnilavu')
return
def install_gulpd():
os.system('apt-get install megamgulpd')
return
def install_megamd():
os.system('apt-get install megamd')
return
def install_gateway():
os.system('apt-get install megamgateway')
return
def install_riak():
##################################################### Install and configure riak #########################################################
os.system('apt-get -y install riak ') #>>$MEGAM_LOG
log.write('apt-get -y install riak')
os.system('sed -i "s/^[ \t]*storage_backend .*/storage_backend = leveldb/" /etc/riak/riak.conf')
os.system('sed -i "s/^[ \t]*listener.http.internal =.*/listener.http.internal = $ipaddr:8098/" /etc/riak/riak.conf')
os.system('sed -i "s/^[ \t]*listener.protobuf.internal =.*/listener.protobuf.internal = $ipaddr:8087/" /etc/riak/riak.conf')
os.system('riak start ') #>>$MEGAM_LOG
log.write('riak start')
return
##################################################### MEGAMD PREINSTALL SCRIPT #########################################################
def megamd_preinstall() :
#Gem install
os.system('gem install chef --no-ri --no-rdoc ') #>>$MEGAM_LOG
log.write('gem install chef --no-ri --no-rdoc ')
d1='/var/lib/megam/gems'
if not os.path.exists(d1):
os.makedirs(d1)
os.chdir(d1)
os.system('wget https://s3-ap-southeast-1.amazonaws.com/megampub/gems/knife-opennebula-0.3.0.gem')
os.system('gem install knife-opennebula-0.3.0.gem ') # >> $MEGAM_LOG
log.write('gem install knife-opennebula-0.3.0.gem ')
##################################################### configure chef-server #########################################################
d = os.path.dirname("/opt/chef-server" )
if not os.path.exists(d):
f.write("Chef-server reconfigure") # >> $MEGAM_LOG
os.system('sudo chef-server-ctl reconfigure')# >> $MEGAM_LOG
chefserver=open('//etc/chef-server/chef-server.rb','a+')
__nginx = '''
nginx['url']="https://$ipaddr"
nginx['server_name']="$ipaddr"
nginx['non_ssl_port'] = 90
'''
nginx_text = textwrap.dedent(__ngnix).strip()
chefserver.write(nginx_text)
os.system('sudo chef-server-ctl reconfigure')# >> $MEGAM_LOG
os.system('sudo chef-server-ctl restart')# >> $MEGAM_LOG
log.write('sudo chef-server-ctl reconfigure')
log.write('sudo chef-server-ctl restart')
#sudo rabbitmq-server -detached >> $MEGAM_LOG
os.system('set -e')
#chef_repo_dir=`find /var/lib/megam/megamd -name chef-repo | awk -F/ -vOFS=/ 'NF-=0' | sort -u`
chef_repo_dir="/var/lib/megam/megamd/"
os.system('apt-get install git-core') #>> $MEGAM_LOG
log.write('apt-get install git-core')
os.system('git clone https://github.com/megamsys/chef-repo.git $chef_repo_dir/chef-repo')# >> $MEGAM_LOG
log.write('git clone https://github.com/megamsys/chef-repo.git $chef_repo_dir/chef-repo')
shutil.copy2('/etc/chef-server/admin.pem',chef_repo_dir+'/chef-repo/.chef')
shutil.copy2('/etc/chef-server/chef-validator.pem', chef_repo_dir+'/chef-repo/.chef')
os.system('ipaddr='+ipaddr)
os.system('sed -i "s@^[ \t]*chef_server_url.*@chef_server_url \'https://$ipaddr\'@" $chef_repo_dir/chef-repo/.chef/knife.rb')
os.makedir(chef_repo_dir+'/chef-repo/.chef/trusted_certs') # || true
os.system('[ -f /var/opt/chef-server/nginx/ca/$ipaddr.crt ] && cp /var/opt/chef-server/nginx/ca/$ipaddr.crt $chef_repo_dir/chef-repo/.chef/trusted_certs')
os.system('[ -f /var/opt/chef-server/nginx/ca/$host.crt ] && cp /var/opt/chef-server/nginx/ca/$host.crt $chef_repo_dir/chef-repo/.chef/trusted_certs')
os.system('sudo echo 3 > /proc/sys/vm/drop_caches')
os.system('sleep 5')
#os.system('echo "Cookbook upload Start=====> " >> $MEGAM_LOG')
f.write("Cookbook upload End=====> ")# >> $MEGAM_LOG
os.system('knife cookbook upload --all -c $chef_repo_dir/chef-repo/.chef/knife.rb ') #|| true >> $MEGAM_LOG'
f.write("knife cookbook upload --all -c $chef_repo_dir/chef-repo/.chef/knife.rb ")
return
##################################################### Change config and restart services #################################################
def service_restart():
#MEGAM_GATEWAY
os.system('sed -i "s/^[ \t]*riak.url.*/riak.url=\"$ipaddr\"/" /usr/share/megam/megamgateway/conf/application-production.conf')
os.system('sed -i "s/^[ \t]*sf.host.*/sf.host=\"localhost\"/" /usr/share/megam/megamgateway/conf/application-production.conf')
os.system('stop megamgateway ') #>> $MEGAM_LOG
os.system('start megamgateway') #>> $MEGAM_LOG
#MEGAMD
os.system('sed -i "s/.*:8087.*/ url: $ipaddr:8087/" /usr/share/megam/megamd/conf/megamd.conf')
os.system('stop megamd ') #|| true >> $MEGAM_LOG'
os.system('start megamd ') #>> $MEGAM_LOG
f.write("start megamd")
return
def uninstall_megam() :
print " Removing nilavu...."
os.system('apt-get remove megamnilavu')
os.system('apt-get purge megamnilavu')
uninstall_gateway()
uninstall_snowflake()
uninstall_gulpd()
uninstall_common()
uninstall_snowflake()
return
def uninstall_snowflake():
os.system('apt-get remove snowflake')
os.system('apt-get purge snowflake')
return
def uninstall_common():
os.system('apt-get remove megamcommon')
os.system('apt-get purge megamcommon')
return
def uninstall_gulpd():
os.system('apt-get remove megamgulpd')
os.system('apt-get purge megamgulpd')
return
def uninstall_megamd():
os.system('apt-get remove megamd')
os.system('apt-get purge megamd')
return
def uninstall_gateway():
os.system('apt-get remove megamgateway')
os.system('apt-get purge megamgateway')
return
| 33.383721 | 156 | 0.626611 |
73c53b435175c5a20b107a11ffdf0fe3acebbcf1 | 6,592 | py | Python | sacla/scripts/backups/old_scripts/ancient-preOct2016/sacla_Chip_Manager3_Apr4_0947.py | beamline-i24/DiamondChips | 02fb58a95ad2c1712c41b641eb5f197d688c54c3 | [
"Apache-2.0"
] | null | null | null | sacla/scripts/backups/old_scripts/ancient-preOct2016/sacla_Chip_Manager3_Apr4_0947.py | beamline-i24/DiamondChips | 02fb58a95ad2c1712c41b641eb5f197d688c54c3 | [
"Apache-2.0"
] | null | null | null | sacla/scripts/backups/old_scripts/ancient-preOct2016/sacla_Chip_Manager3_Apr4_0947.py | beamline-i24/DiamondChips | 02fb58a95ad2c1712c41b641eb5f197d688c54c3 | [
"Apache-2.0"
] | null | null | null | import pv
import os, re, sys
import math
import string
from time import sleep
from ca import caput, caget
from sacla_Chip_StartUp3 import get_xy
from sacla_Chip_StartUp3 import make_path_dict
##########################################
# New Chip_Manager for SACLA #
# This version last edited 04 Mar by DAS #
##########################################
def initialise():
caput(pv.me14e_stage_x + '.VMAX', 20)
caput(pv.me14e_stage_y + '.VMAX', 20)
caput(pv.me14e_stage_z + '.VMAX', 20)
caput(pv.me14e_stage_x + '.VELO', 20)
caput(pv.me14e_stage_y + '.VELO', 20)
caput(pv.me14e_stage_z + '.VELO', 20)
caput(pv.me14e_stage_x + '.ACCL', 0.00001)
caput(pv.me14e_stage_y + '.ACCL', 0.00001)
caput(pv.me14e_stage_z + '.ACCL', 0.00001)
caput(pv.me14e_stage_x + '.HLM', 30)
caput(pv.me14e_stage_x + '.LLM', -30)
caput(pv.me14e_stage_y + '.HLM', 30)
caput(pv.me14e_stage_y + '.LLM', -30)
caput(pv.me14e_stage_z + '.HLM', 10)
caput(pv.me14e_stage_z + '.LLM', -10)
print 'DONT FORGET TO DO THIS: export EPICS_CA_ADDR_LIST=172.23.190.255'
print 'DONT FORGET TO DO THIS: export EPICS_CA_AUTO_ADDR_LIST=NO'
print 'Initialisation Complete'
def setdcparameters():
print '\n\n', 10*'set', '\n'
path_dict = make_path_dict()
print 'keys available'
for ky in sorted(path_dict.keys()):
print ky,
path = '/localhome/local/Documents/sacla/parameter_files/'
print '\n\nSaving to', path
f = open(path + 'setdcparams.txt','w')
chipname = caget(pv.me14e_chipname)
f.write('chipname \t%s\n' %chipname)
print 'chipname:', chipname
visit_id = '/localhome/local/Documents/sacla'
f.write('visit_id \t%s\n' %visit_id)
print 'visit_id:', visit_id
proteinname = caget(pv.me14e_filepath)
f.write('proteinname \t%s\n' %proteinname)
print 'proteinname:', proteinname
chipcapacity = caget(pv.me14e_chipcapacity)
f.write('chipcapacity \t%s\n' %chipcapacity)
print 'chip capacity:', chipcapacity
blockcapacity = caget(pv.me14e_blockcapacity)
f.write('blockcapacity \t%s\n' %blockcapacity)
print 'block capacity:', blockcapacity
path_key = caget(pv.me14e_dcdetdist)
f.write('path_key \t%s\n' %path_key)
print 'path_key:', path_key, '\n', 30*('%i ' %path_key)
f.close()
xstart, ystart, xblocks, yblocks, coltype, path = path_dict[path_key]
print '\nxstart', xstart
print 'ystart', ystart
print 'xblocks', xblocks
print 'yblocks', yblocks
print 'coltype', coltype
print 'path', path[0], '------>', path[-1]
print 'Number of Blocks', len(path)
print '\n', 10*'set', '\n\n'
def moveto(place):
print place
if place == 'zero':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
elif place == 'A9_al':
caput(pv.me14e_stage_x, 18.975)
caput(pv.me14e_stage_y, 0.0)
elif place == 'I1_la':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, -21.375)
elif place == 'I9_ll':
caput(pv.me14e_stage_x, 18.975)
caput(pv.me14e_stage_y, -21.375)
elif place == 'yag':
caput(pv.me14e_stage_x, 43.0)
caput(pv.me14e_stage_y, -33.0)
elif place == 'chip':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
elif place == 'load_position':
caput(pv.me14e_stage_x, 0)
caput(pv.me14e_stage_y, 0)
caput(pv.me14e_stage_z, 40)
elif place == 'collect_position':
caput(pv.me14e_stage_x, 0)
caput(pv.me14e_stage_y, 0)
caput(pv.me14e_stage_z, 0)
else:
print 'Unknown Argument In Method moveto'
def fiducial(point):
path = '/localhome/local/Documents/sacla/parameter_files/'
x = caget(pv.me14e_stage_x + '.RBV')
y = caget(pv.me14e_stage_y + '.RBV')
z = caget(pv.me14e_stage_z + '.RBV')
print 'Writing Fiducial File', 30*str(point)
print path
print '\n'.join([str(x),str(y),str(z)]), '\n'
f = open(path + 'fiducial_%s.txt' %point, 'w')
f.write('%1.3f\n' %x)
f.write('%1.3f\n' %y)
f.write('%1.3f\n' %z)
f.close()
print 'Writing Fiducial File', 30*str(point)
def cs_maker():
fiducial_dict = {}
fiducial_dict['f1'] = {}
fiducial_dict['f2'] = {}
fiducial_dict['f1']['x'] = 18.975
fiducial_dict['f1']['y'] = 0
fiducial_dict['f1']['z'] = 0
fiducial_dict['f2']['x'] = 0
fiducial_dict['f2']['y'] = 21.375
fiducial_dict['f2']['z'] = 0
#1mm / counts per nanometer (give cts/mm)
scale = 10000
#d1, d2 = fiducial_positions
f1 = open('/localhome/local/Documents/sacla/parameter_files/fiducial_1.txt','r')
f1_lines = f1.readlines()
f1_lines_x = f1_lines[0].rstrip('/n')
f1_lines_y = f1_lines[1].rstrip('/n')
f1_lines_z = f1_lines[2].rstrip('/n')
f1_x = float(f1_lines_x)
f1_y = float(f1_lines_y)
f1_z = float(f1_lines_z)
f2 = open('/localhome/local/Documents/sacla/parameter_files/fiducial_2.txt','r')
f2_lines = f2.readlines()
f2_lines_x = f2_lines[0].rstrip('/n')
f2_lines_y = f2_lines[1].rstrip('/n')
f2_lines_z = f2_lines[2].rstrip('/n')
f2_x = float(f2_lines_x)
f2_y = float(f2_lines_y)
f2_z = float(f2_lines_z)
#Evaluate numbers
x1factor = (f1_x / fiducial_dict['f1']['x']) *scale
y1factor = (f1_y / f1_x) *scale
z1factor = (f1_z / f1_x) *scale
x2factor = (f2_x / f2_y) *scale
y2factor = (f2_y / fiducial_dict['f2']['y']) *scale
z2factor = (f2_z / f2_y) *scale
z3factor = scale
cs1 = "#1->%+1.5fX%+1.5fY%+1.5fZ" % (-1*x1factor, y1factor, z1factor)
cs2 = "#2->%+1.5fX%+1.5fY%+1.5fZ" % (x2factor, y2factor, z2factor)
cs3 = "#3->0X+0Y%+fZ" % (z3factor)
print cs1
print cs2
print cs3
caput(pv.me14e_pmac_str, '!x0y0z0')
sleep(1)
caput(pv.me14e_pmac_str, '&2')
sleep(1)
caput(pv.me14e_pmac_str, cs1)
sleep(1)
caput(pv.me14e_pmac_str, cs2)
sleep(1)
caput(pv.me14e_pmac_str, cs3)
sleep(1)
print 'done'
def main(args):
if args[1] == 'moveto':
moveto(args[2])
elif args[1] == 'fiducial':
fiducial(args[2])
elif args[1] == 'cs_maker':
cs_maker()
elif args[1] == 'setdcparameters':
setdcparameters()
elif args[1] == 'initialise':
initialise()
else:
print 'Unknown Command'
pass
if __name__ == '__main__':
main(sys.argv)
| 32 | 88 | 0.597239 |
73c55c9984fbc04b2959467813c814ef1fbe2a62 | 45,540 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/operations/_disks_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/operations/_disks_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/operations/_disks_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskName": _SERIALIZER.url("disk_name", disk_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskName": _SERIALIZER.url("disk_name", disk_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskName": _SERIALIZER.url("disk_name", disk_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskName": _SERIALIZER.url("disk_name", disk_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_grant_access_request_initial(
subscription_id: str,
resource_group_name: str,
disk_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskName": _SERIALIZER.url("disk_name", disk_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_revoke_access_request_initial(
subscription_id: str,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskName": _SERIALIZER.url("disk_name", disk_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
**kwargs
)
class DisksOperations(object):
"""DisksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'Disk')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> LROPoller["_models.Disk"]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation.
:type disk: ~azure.mgmt.compute.v2019_11_01.models.Disk
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_11_01.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'DiskUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> LROPoller["_models.Disk"]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation.
:type disk: ~azure.mgmt.compute.v2019_11_01.models.DiskUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_11_01.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> "_models.Disk":
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_11_01.models.Disk
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DiskList"]:
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_11_01.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DiskList"]:
"""Lists all the disks under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_11_01.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'} # type: ignore
def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(grant_access_data, 'GrantAccessData')
request = build_grant_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._grant_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
@distributed_trace
def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> LROPoller["_models.AccessUri"]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2019_11_01.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AccessUri or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_11_01.models.AccessUri]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
def _revoke_access_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revoke_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self._revoke_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
@distributed_trace
def begin_revoke_access(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
| 42.049861 | 190 | 0.660277 |
73c560f1ad8a08a1f88c8d0d474492215b878fcd | 982 | py | Python | cron/models.py | lukaszlacinski/dashboard | e84131d141f64f2ae41aa3726ff201ef9a626184 | [
"Apache-2.0"
] | null | null | null | cron/models.py | lukaszlacinski/dashboard | e84131d141f64f2ae41aa3726ff201ef9a626184 | [
"Apache-2.0"
] | null | null | null | cron/models.py | lukaszlacinski/dashboard | e84131d141f64f2ae41aa3726ff201ef9a626184 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from sqlalchemy import Column, Integer, BigInteger, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
DBSession = scoped_session(sessionmaker())
Base = declarative_base()
class TransferModel(Base):
__tablename__ = "transfer"
id = Column(Integer, primary_key=True)
uuid = Column(String)
tstamp = Column(DateTime, default=datetime.utcnow)
set = Column(String)
source = Column(String)
destination = Column(String)
dataset = Column(String)
status = Column(Integer)
rate = Column(BigInteger)
message = Column(String)
faults = Column(Integer)
def __repr__(self):
return "Transfer(id={}, uuid={}, tstamp={}, source={}, destination={}, dataset={}, status={}, rate={}, faults={})".format(
self.id, self.uuid, self.tstamp, self.source, self.destination, self.dataset, self.status, self.rate, self.faults)
| 33.862069 | 130 | 0.706721 |
73c57060bcb145021eb5b710e1bcfcb21f2648f0 | 1,067 | py | Python | Courses/Udacity/CS101/Lesson_4_Problem_Set/04-Better_Splitting/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 1 | 2019-02-13T12:02:26.000Z | 2019-02-13T12:02:26.000Z | Courses/Udacity/CS101/Lesson_4_Problem_Set/04-Better_Splitting/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 1 | 2018-08-13T15:58:33.000Z | 2018-08-13T15:58:33.000Z | Courses/Udacity/CS101/Lesson_4_Problem_Set/04-Better_Splitting/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 2 | 2017-08-10T20:01:29.000Z | 2021-07-01T08:39:13.000Z | # 1 Gold Star
# The built-in <string>.split() procedure works
# okay, but fails to find all the words on a page
# because it only uses whitespace to split the
# string. To do better, we should also use punctuation
# marks to split the page into words.
# Define a procedure, split_string, that takes two
# inputs: the string to split and a string containing
# all of the characters considered separators. The
# procedure should return a list of strings that break
# the source string up by the characters in the
# splitlist.
def split_string(source,splitlist):
#out = split_string("This is a test-of the,string separation-code!"," ,!-")
#print out
#>>> ['This', 'is', 'a', 'test', 'of', 'the', 'string', 'separation', 'code']
#out = split_string("After the flood ... all the colors came out.", " .")
#print out
#>>> ['After', 'the', 'flood', 'all', 'the', 'colors', 'came', 'out']
#out = split_string("First Name,Last Name,Street Address,City,State,Zip Code",",")
#print out
#>>>['First Name', 'Last Name', 'Street Address', 'City', 'State', 'Zip Code'] | 32.333333 | 82 | 0.68135 |
73c5853a6ea3d141b03350a4361f2462d761ad05 | 1,453 | py | Python | stable_baselines/deepq/experiments/train_cartpole.py | jfsantos/stable-baselines | 5bd4ffa98e364b9e8e8b4e64bc2d1be9b6e4897a | [
"MIT"
] | null | null | null | stable_baselines/deepq/experiments/train_cartpole.py | jfsantos/stable-baselines | 5bd4ffa98e364b9e8e8b4e64bc2d1be9b6e4897a | [
"MIT"
] | null | null | null | stable_baselines/deepq/experiments/train_cartpole.py | jfsantos/stable-baselines | 5bd4ffa98e364b9e8e8b4e64bc2d1be9b6e4897a | [
"MIT"
] | 1 | 2019-12-25T16:45:54.000Z | 2019-12-25T16:45:54.000Z | import argparse
import gym
import numpy as np
from stable_baselines.deepq import DeepQ, models as deepq_models
def callback(lcl, _glb):
"""
the callback function for logging and saving
:param lcl: (dict) the local variables
:param _glb: (dict) the global variables
:return: (bool) is solved
"""
# stop training if reward exceeds 199
if len(lcl['episode_rewards'][-101:-1]) == 0:
mean_100ep_reward = -np.inf
else:
mean_100ep_reward = round(float(np.mean(lcl['episode_rewards'][-101:-1])), 1)
is_solved = lcl['step'] > 100 and mean_100ep_reward >= 199
return is_solved
def main(args):
"""
train and save the DeepQ model, for the cartpole problem
:param args: (ArgumentParser) the input arguments
"""
env = gym.make("CartPole-v0")
q_func = deepq_models.mlp([64])
model = DeepQ(
env=env,
policy=q_func,
learning_rate=1e-3,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
)
model.learn(total_timesteps=args.max_timesteps, callback=callback)
print("Saving model to cartpole_model.pkl")
model.save("cartpole_model.pkl")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train DQN on cartpole")
parser.add_argument('--max-timesteps', default=100000, type=int, help="Maximum number of timesteps")
args = parser.parse_args()
main(args)
| 27.415094 | 104 | 0.668273 |
73c596404520949ea5c7aec7682dccacedb1313d | 2,047 | py | Python | linux_proc_extras/tests/test_linux_proc_extras.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2020-08-08T02:01:01.000Z | 2020-08-08T02:01:01.000Z | linux_proc_extras/tests/test_linux_proc_extras.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:50:17.000Z | 2018-08-15T05:50:17.000Z | linux_proc_extras/tests/test_linux_proc_extras.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2019-03-06T14:30:52.000Z | 2019-03-06T14:30:52.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from mock import mock_open, patch
import pytest
from datadog_checks.linux_proc_extras import MoreUnixCheck
CHECK_NAME = 'linux_proc_extras'
HERE = os.path.abspath(os.path.dirname(__file__))
FIXTURE_DIR = os.path.join(HERE, "fixtures")
INODE_GAUGES = [
'system.inodes.total',
'system.inodes.used'
]
PROC_COUNTS = [
'system.linux.context_switches',
'system.linux.processes_created',
'system.linux.interrupts'
]
ENTROPY_GAUGES = [
'system.entropy.available'
]
PROCESS_STATS_GAUGES = [
'system.processes.states',
'system.processes.priorities'
]
@pytest.fixture
def aggregator():
from datadog_checks.stubs import aggregator
aggregator.reset()
return aggregator
@pytest.fixture
def check():
return MoreUnixCheck(CHECK_NAME, {}, {})
# Really a basic check to see if all metrics are there
def test_check(aggregator, check):
check.tags = []
check.set_paths()
with open(os.path.join(FIXTURE_DIR, "entropy_avail")) as f:
m = mock_open(read_data=f.read())
with patch('__builtin__.open', m):
check.get_entropy_info()
with open(os.path.join(FIXTURE_DIR, "inode-nr")) as f:
m = mock_open(read_data=f.read())
with patch('__builtin__.open', m):
check.get_inode_info()
with open(os.path.join(FIXTURE_DIR, "proc-stat")) as f:
m = mock_open(read_data=f.read())
with patch('__builtin__.open', m):
check.get_stat_info()
with open(os.path.join(FIXTURE_DIR, "process_stats")) as f:
with patch(
'datadog_checks.linux_proc_extras.linux_proc_extras.get_subprocess_output',
return_value=(f.read(), "", 0)
):
check.get_process_states()
# Assert metrics
for metric in PROC_COUNTS + INODE_GAUGES + ENTROPY_GAUGES + PROCESS_STATS_GAUGES:
aggregator.assert_metric(metric)
aggregator.assert_all_metrics_covered()
| 24.369048 | 87 | 0.679043 |
73c5b2feb2840b35e91916a6a1e748a3f7a3a9fe | 17,779 | py | Python | src/lazycluster/cluster/runtime_cluster.py | prototypefund/lazycluster | e6fbd69dbd73ec9bf101a502f25f7afdf0579f66 | [
"Apache-2.0"
] | 44 | 2019-08-07T12:01:07.000Z | 2021-09-02T16:50:51.000Z | src/lazycluster/cluster/runtime_cluster.py | prototypefund/lazycluster | e6fbd69dbd73ec9bf101a502f25f7afdf0579f66 | [
"Apache-2.0"
] | 9 | 2020-10-26T13:08:32.000Z | 2021-09-16T02:13:58.000Z | src/lazycluster/cluster/runtime_cluster.py | prototypefund/lazycluster | e6fbd69dbd73ec9bf101a502f25f7afdf0579f66 | [
"Apache-2.0"
] | 9 | 2019-09-18T07:52:09.000Z | 2022-02-11T13:48:19.000Z | """Module comprising the abstract RuntimeCluster class with its related `launcher strategy` classes.
Note: The design of the launcher classes follows the strategy pattern.
"""
import atexit
import logging
from subprocess import Popen
from typing import Dict, List, Optional, Union
from lazycluster.exceptions import LazyclusterError
from lazycluster.runtime_mgmt import RuntimeGroup
class MasterLauncher(object):
"""Abstract class for implementing the strategy for launching the master instance of the cluster."""
def __init__(self, runtime_group: RuntimeGroup):
"""Initialization method.
Args:
runtime_group: The group where the workers will be started.
"""
# Create the Logger
self.log = logging.getLogger(__name__)
self._group = runtime_group
self._port: Union[int, None] = None # Needs to be set in self.start()
self._process: Optional[Popen] = None # Needs to be set in self.start()
self.log.debug("MasterLauncher initialized.")
@property
def port(self) -> Optional[int]:
"""The port where the master instance is started on. Will be None if not yet started.
Returns:
Optional[int]: The master port.
"""
return self._port
@property
def process(self) -> Optional[Popen]:
"""The process object where the master instance was started in.
Returns:
Optional[Popen]: The process object or None if not yet started.
"""
return self._process
def start(
self, ports: Union[List[int], int], timeout: int = 3, debug: bool = False
) -> List[int]:
"""Launch a master instance.
Note:
If you create a custom subclass of MasterLauncher which will not start the master instance on localhost
then you should pass the debug flag on to `execute_task()` of the `RuntimeGroup` or `Runtime` so that you
can benefit from the debug feature of `RuntimeTask.execute()`.
Args:
ports: Port where the master should be started. If a list is given then the first port that is free in the
`RuntimeGroup` will be used. The actual chosen port can requested via the property `port`.
timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each task step. Defaults to
`False`.
Returns:
List[int]: In case a port list was given the updated port list will be returned. Otherwise an empty list.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If a port list was given and none of the ports is actually free in the `RuntimeGroup`.
MasterStartError: If master was not started after the specified `timeout`.
"""
# The actual values for the following instance attributes need to be set within the concrete implementations:
# - self._port
# - self._process (In case the master is started in a process managed by the MasterLauncher instance )
raise NotImplementedError
def cleanup(self) -> None:
"""Release all resources."""
self.log.debug("Cleaning up MasterLauncher ...")
if self._process:
self._process.terminate()
class WorkerLauncher(object):
"""Abstract class for implementing the strategy for launching worker instances within a RuntimeGroup.
In order to implement a new concrete `WorkerLauncher` subclass you need to implement the start method. Please
consider the comments of the start method because some internal variables need to be set in the concrete
implementation.
Moreover, the `setup_worker_ssh_tunnels()` method can be used to setup ssh tunnels so that all entities can talk to
each other.
"""
def __init__(self, runtime_group: RuntimeGroup):
"""Initialization method.
Args:
runtime_group: The group where the workers will be started in.
"""
# Create the Logger
self.log = logging.getLogger(__name__)
self._group = runtime_group
self._ports_per_host: Dict[
str, List[int]
] = {} # Needs to be set in `self.start()`
self.log.debug("Worker launcher initialized")
def __repr__(self) -> str:
return "%s(%r)" % (self.__class__, self.__dict__)
@property
def ports_per_host(self) -> Dict[str, List[int]]:
"""Dictionary with the host as key and a port list as value. The list contains all ports where a worker instance is reachable on the respective host.
Returns:
Dict[str, List[int]]: The ports per host as a dictionary.
"""
return self._ports_per_host
def start(
self, worker_count: int, master_port: int, ports: List[int], debug: bool = False
) -> List[int]:
"""Launches the worker instances in the `RuntimeGroup`.
Args:
worker_count: The number of worker instances to be started in the group.
master_port: The port of the master instance.
ports: The ports to be used for starting the workers. Only ports from the list will be chosen that are
actually free.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each task step. Defaults to
`False`.
Returns:
List[int]: The updated port list after starting the workers, i.e. the used ones were removed.
Raises:
NoPortsLeftError: If there are not enough free ports for starting all workers.
"""
# 1. The actual values for the following instance attributes need to be set within the concrete implementations:
# - self._ports_per_host
# 2. Raise `NoPortsLeftError` if there were not sufficient free ports in the list
raise NotImplementedError
def setup_worker_ssh_tunnels(self) -> None:
"""Set up ssh tunnel for workers such that all communication is routed over the local machine and all entities can talk to each other on localhost.
Note:
This method needs to be called if the communication between the worker instances is necessary, e.g. in case
of DASK or Apache Flink, where data needs to be shuffled between the different entities.
Raises:
ValueError: If host is not contained.
PortInUseError: If `group_port` is occupied on the local machine.
NoPortsLeftError: If `group_ports` was given and none of the ports was free.
"""
self.log.info("Setting up ssh tunnel for inter worker communication.")
for host, ports in self.ports_per_host.items():
for worker_port in ports:
self._group.expose_port_from_runtime_to_group(
host, worker_port
) # Raises all errors
def cleanup(self) -> None:
"""Release all resources."""
self.log.debug("Cleaning up WorkerLauncher ...")
self.log.debug("No WorkerLauncher resources to be released")
class RuntimeCluster(object):
"""Abstract cluster class.
All further cluster implementations should inherit from this class either directly (e.g. the abstract class
`MasterWorkerCluster`) or indirectly (e.g. the DaskCluster which is an concrete implementation of the
`MasterWorkerCluster`).
"""
def __repr__(self) -> str:
return "%s(%r)" % (self.__class__, self.__dict__)
class MasterWorkerCluster(RuntimeCluster):
"""Class for clusters following a master-worker architecture.
Usually you want to inherit from this class and do not want to use it directly. It is recommended to treat this
class as an abstract class or an interface.
Examples:
Create a cluster with all `Runtimes` detected by the `RuntimeManager`.
```python
from lazycluster import RuntimeManager
cluster = MyMasterWorkerClusterImpl(RuntimeManager().create_group())
cluster.start()
```
Use different strategies for launching the master and the worker instance as the default ones by providing
custom implementation of `MasterLauncher` and `WorkerLauncher`.
```python
cluster = MyMasterWorkerClusterImpl(RuntimeManager().create_group(),
MyMasterLauncherImpl(),
MyWorkerLauncherImpl()
cluster.start()
```
"""
DEFAULT_MASTER_PORT = 60000
DEFAULT_PORT_RANGE_START = 60001 # Can be overwritten in subclasses
DEFAULT_PORT_RANGE_END = 60200 # Can be overwritten in subclasses
def __init__(
self,
runtime_group: RuntimeGroup,
ports: Optional[List[int]] = None,
master_launcher: Optional[MasterLauncher] = None,
worker_launcher: Optional[WorkerLauncher] = None,
):
"""Initialization method.
Args:
runtime_group: The `RuntimeGroup` contains all `Runtimes` which can be used for starting the cluster
entities.
ports: The list of ports which will be used to instantiate a cluster. Defaults to
`list(range(self.DEFAULT_PORT_RANGE_START, self.DEFAULT_PORT_RANGE_END)`.
master_launcher: Optionally, an instance implementing the `MasterLauncher` interface can be given, which
implements the strategy for launching the master instances in the cluster. If None, then
the default of the concrete cluster implementation will be chosen.
worker_launcher: Optionally, an instance implementing the `WorkerLauncher` interface can be given, which
implements the strategy for launching the worker instances. If None, then the default of
the concrete cluster implementation will be chosen.
"""
# Create the Logger
self.log = logging.getLogger(__name__)
self._group = runtime_group
self._ports = (
ports
if ports
else list(range(self.DEFAULT_PORT_RANGE_START, self.DEFAULT_PORT_RANGE_END))
)
self._master_launcher = master_launcher
self._worker_launcher = worker_launcher
# Cleanup will be done atexit since usage of destructor may lead to exceptions
atexit.register(self.cleanup)
self.log.debug("MasterWorkerCluster initialized.")
def __str__(self) -> str:
return type(self).__name__ + " with " + str(self._group)
@property
def master_port(self) -> Optional[int]:
"""The port where the master instance was started. None, if not yet started.
Returns:
Optional[int]: The master port.
"""
return self._master_launcher.port if self._master_launcher else None
@property
def runtime_group(self) -> RuntimeGroup:
"""The RuntimeGroup.
Returns:
RuntimeGroup: The used group.
"""
return self._group
def start(
self,
worker_count: Optional[int] = None,
master_port: Optional[int] = None,
debug: bool = False,
) -> None:
"""Convenient method for launching the cluster.
Internally, `self.start_master()` and `self.start_workers()` will be called.
Args:
master_port: Port of the cluster master. Will be passed on to `self.start()`, hence see respective method
for further details.
worker_count: The number of worker instances to be started in the cluster. Will be passed on to
`self.start()`, hence see respective method for further details.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
"""
self.log.info("Starting the cluster ...")
self.start_master(master_port, debug=debug)
self.start_workers(worker_count, debug=debug)
def start_master(
self, master_port: Optional[int] = None, timeout: int = 3, debug: bool = False
) -> None:
"""Start the master instance.
Note:
How the master is actually started is determined by the the actual `MasterLauncher` implementation. Another
implementation adhering to the `MasterLauncher` interface can be provided in the constructor of the cluster
class.
Args:
master_port: Port of the master instance. Defaults to `self.DEFAULT_MASTER_PORT`, but another one is chosen if
the port is not free within the group. The actual chosen port can be requested via
self.master_port.
timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. Has no effect for
if the master instance is started locally, what default MasterLauncher implementations usually do.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If there are no free ports left in the port list for instantiating the master.
MasterStartError: If master was not started after the specified `timeout`.
"""
if not self._master_launcher:
raise LazyclusterError("Master Launcher does not exist.")
# 1. Determine a port or port list
overwrite_port_list = False
if master_port:
ports: Union[int, List[int]] = master_port
elif self._group.has_free_port(self.DEFAULT_MASTER_PORT):
ports = self.DEFAULT_MASTER_PORT
else:
ports = self._ports
overwrite_port_list = True
# 2. Trigger the actual logic for starting the master instance
ports = self._master_launcher.start(
ports, timeout, debug
) # Raises the possible exceptions
if overwrite_port_list:
self._ports = ports
# Some attributes must be set in the given MasterLauncher implementation after
# starting the master to ensure correct behavior of MasterWorkerCluster
# => indicates a wrong implementation of the given launcher class
assert self._master_launcher.port
if not self._master_launcher.process:
self.log.debug(
"No self._master_launcher.process is set after starting the cluster master. If the master"
"instance was not started as a deamon, then this could indicate buggy implementation. The"
"variable should be set to be able to eventually shutdown the cluster."
)
self.log.info(f"Master instance started on port {str(self.master_port)}.")
def start_workers(self, count: Optional[int] = None, debug: bool = False) -> None:
"""Start the worker instances.
Note:
How workers are actually started is determined by the the actual `WorkerLauncher` implementation. Another
implementation adhering to the `WorkerLauncher` interface can be provided in the constructor of the cluster
class.
Args:
count: The number of worker instances to be started in the cluster. Defaults to the number of runtimes in
the cluster.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
Raises:
NoPortsLeftError: If there are no free ports left in the port list for instantiating new worker entities.
"""
if not self._worker_launcher:
raise LazyclusterError("Worker launcher does not exist")
if not count:
count = self._group.runtime_count
assert self.master_port
self._ports = self._worker_launcher.start(
count, self.master_port, self._ports, debug
)
# Some attributes must be set in the given MasterLauncher implementation after
# starting the master to ensure correct behavior of MasterWorkerCluster
# => indicates a wrong implementation of the given launcher class
# e.g. for DASK -> assert self._worker_launcher.ports_per_host
self.log.info("Worker instances started.")
def print_log(self) -> None:
"""Print the execution log.
Note:
This method is a convenient wrapper for the equivalent method of the contained `RuntimeGroup`.
"""
self.runtime_group.print_log()
def cleanup(self) -> None:
"""Release all resources."""
self.log.info("Shutting down the MasterWorkerCluster ...")
if self._worker_launcher:
self._worker_launcher.cleanup()
if self._master_launcher:
self._master_launcher.cleanup()
self._group.cleanup()
| 42.431981 | 157 | 0.648293 |
73c5c5b84d97be6ef48795971abd6d3bf6eabde2 | 5,260 | py | Python | obsplan/tests/test_graphics.py | zachariahmilby/keck-aurora-observation-planning | 9f37e4f7782a3c3a2eb388bb0fac8fcb05ca1882 | [
"MIT"
] | null | null | null | obsplan/tests/test_graphics.py | zachariahmilby/keck-aurora-observation-planning | 9f37e4f7782a3c3a2eb388bb0fac8fcb05ca1882 | [
"MIT"
] | null | null | null | obsplan/tests/test_graphics.py | zachariahmilby/keck-aurora-observation-planning | 9f37e4f7782a3c3a2eb388bb0fac8fcb05ca1882 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from obsplan.graphics import color_dict, _keck_one_alt_az_axis, \
_format_axis_date_labels
class TestColorDictionary:
@pytest.fixture
def red(self):
yield '#D62728'
@pytest.fixture
def orange(self):
yield '#FF7F0E'
@pytest.fixture
def yellow(self):
yield '#FDB813'
@pytest.fixture
def green(self):
yield '#2CA02C'
@pytest.fixture
def blue(self):
yield '#0079C1'
@pytest.fixture
def violet(self):
yield '#9467BD'
@pytest.fixture
def cyan(self):
yield '#17BECF'
@pytest.fixture
def magenta(self):
yield '#D64ECF'
@pytest.fixture
def brown(self):
yield '#8C564B'
@pytest.fixture
def darkgrey(self):
yield '#3F3F3F'
@pytest.fixture
def grey(self):
yield '#7F7F7F'
@pytest.fixture
def lightgrey(self):
yield '#BFBFBF'
def test_red_hex_value(self, red):
assert color_dict['red'] == red
def test_orange_hex_value(self, orange):
assert color_dict['orange'] == orange
def test_yellow_hex_value(self, yellow):
assert color_dict['yellow'] == yellow
def test_green_hex_value(self, green):
assert color_dict['green'] == green
def test_blue_hex_value(self, blue):
assert color_dict['blue'] == blue
def test_violet_hex_value(self, violet):
assert color_dict['violet'] == violet
def test_cyan_hex_value(self, cyan):
assert color_dict['cyan'] == cyan
def test_magenta_hex_value(self, magenta):
assert color_dict['magenta'] == magenta
def test_brown_hex_value(self, brown):
assert color_dict['brown'] == brown
def test_darkgrey_hex_value(self, darkgrey):
assert color_dict['darkgrey'] == darkgrey
def test_grey_hex_value(self, grey):
assert color_dict['grey'] == grey
def test_lightgrey_hex_value(self, lightgrey):
assert color_dict['lightgrey'] == lightgrey
class TestKeckOneAltAxAxis:
def test_if_return_type_is_axis(self):
fig, axis = plt.subplots(subplot_kw={'projection': 'polar'})
assert isinstance(_keck_one_alt_az_axis(axis), plt.Axes) is True
def test_failure_with_non_polar_axis(self):
with pytest.raises(AttributeError):
fig, axis = plt.subplots()
_keck_one_alt_az_axis(axis)
def test_theta_direction_value_is_negative(self):
fig, axis = plt.subplots(subplot_kw={'projection': 'polar'})
axis = _keck_one_alt_az_axis(axis)
assert axis.get_theta_direction() == -1
def test_theta_zero_location_is_north(self):
fig, axis = plt.subplots(subplot_kw={'projection': 'polar'})
axis = _keck_one_alt_az_axis(axis)
assert axis.get_theta_offset() == np.pi/2
def test_rmin_is_0(self):
fig, axis = plt.subplots(subplot_kw={'projection': 'polar'})
axis = _keck_one_alt_az_axis(axis)
assert axis.get_rmin() == 0
def test_rmax_is_90(self):
fig, axis = plt.subplots(subplot_kw={'projection': 'polar'})
axis = _keck_one_alt_az_axis(axis)
assert axis.get_rmax() == 90
class TestFormatAxisDateLabels:
@pytest.fixture
def xticks(self):
yield np.array([18786.0, 18786.041666666668, 18786.083333333332,
18786.125])
@pytest.fixture
def utc_times(self):
yield np.array(['00:00', '01:00', '02:00', '03:00'])
@pytest.fixture
def california_times(self):
yield np.array(['17:00', '18:00', '19:00', '20:00'])
def test_utc_axis_positions(self, xticks):
fig, axis = plt.subplots()
time = pd.date_range(start='2021-06-08', periods=180, freq='min')
axis.plot(time, np.ones_like(time))
_format_axis_date_labels(axis)
assert assert_array_equal(axis.get_xticks(), xticks) is None
def test_utc_axis_labels(self, utc_times):
fig, axis = plt.subplots()
time = pd.date_range(start='2021-06-08', periods=180, freq='min')
axis.plot(time, np.ones_like(time))
_format_axis_date_labels(axis)
fig.canvas.draw()
labels = np.array([label.get_text()
for label in axis.get_xticklabels()])
assert assert_array_equal(labels, utc_times) is None
def test_california_axis_positions(self, xticks):
fig, axis = plt.subplots()
time = pd.date_range(start='2021-06-08', periods=180, freq='min')
axis.plot(time, np.ones_like(time))
axis = _format_axis_date_labels(axis)
assert assert_array_equal(axis.get_xticks(), xticks) is None
def test_california_axis_labels(self, california_times):
fig, axis = plt.subplots()
time = pd.date_range(start='2021-06-08', periods=180, freq='min')
axis.plot(time, np.ones_like(time))
axis = _format_axis_date_labels(axis)
fig.canvas.draw()
labels = np.array([label.get_text()
for label in axis.get_xticklabels()])
assert assert_array_equal(labels, california_times) is None
| 29.550562 | 73 | 0.642395 |
73c60c9b2a9d4a6d490e76e0e11784f6261481af | 2,273 | py | Python | 2019-02-04/islands.py | asthajn6/archive | 975fc4cf8acb9b0c9dc971f8b31f8905edf6031f | [
"Unlicense"
] | 2 | 2018-10-16T03:17:53.000Z | 2018-10-16T17:49:10.000Z | 2019-02-04/islands.py | asthajn6/archive | 975fc4cf8acb9b0c9dc971f8b31f8905edf6031f | [
"Unlicense"
] | null | null | null | 2019-02-04/islands.py | asthajn6/archive | 975fc4cf8acb9b0c9dc971f8b31f8905edf6031f | [
"Unlicense"
] | null | null | null | def beachfront_cell(map, i, j):
'''Returns the amount of beachfront for the cell at `map[i][j]`.
The map is a grid of 1s and 0s where 1 means land and 0 means water.
Beachfront is defined as any point where water borders land to the top,
bottom, left, or right. Only land cells have beachfront (otherwise the
same beachfront would be counted twice). Cells beyond the border of the map
are always considered water.
Arguments:
map (2D rectangular list):
The map of the island.
i (int):
The row coordinate of the cell.
j (int):
The column coordinate of the cell.
Returns:
beachfront (int):
The amount of beachfront at `map[i][j]`.
'''
n_rows = len(map)
n_cols = len(map[0])
first_row = 0
first_col = 0
last_row = n_rows - 1
last_col = n_cols - 1
center = map[i][j]
t = map[i - 1][j] if first_row < i else 0
b = map[i + 1][j] if i < last_row else 0
l = map[i][j - 1] if first_col < j else 0
r = map[i][j + 1] if j < last_col else 0
if center == 0:
return 0
else:
beachfront = 0
if t == 0: beachfront += 1
if b == 0: beachfront += 1
if l == 0: beachfront += 1
if r == 0: beachfront += 1
return beachfront
def beachfront_island(map):
'''Returns the amount of beachfront for the island described by the map.
The map is a grid of 1s and 0s where 1 means land and 0 means water.
Beachfront is defined as any point where water borders land to the top,
bottom, left, or right. Only land cells have beachfront (otherwise the
same beachfront would be counted twice). Cells beyond the border of the map
are always considered water.
Arguments:
map (2D rectangular list):
The map of the island.
Returns:
beachfront (int):
The amount of beachfront on the entire map.
'''
perimeter = 0
for i, row in enumerate(map):
for j, _ in enumerate(row):
perimeter += beachfront_cell(map, i, j)
return perimeter
def test():
assert beachfront_island([
[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[1, 1, 0, 0],
]) == 16
| 28.772152 | 79 | 0.58645 |
73c621d2cc505fdeeb9c2b4ad7fc3f201ef7a374 | 20,139 | py | Python | tests/druid_tests.py | rodrigoguariento/incubator-superset | b2633a51d43faaca74751349b96fc32784d4b377 | [
"Apache-2.0"
] | 1 | 2019-12-12T16:18:45.000Z | 2019-12-12T16:18:45.000Z | tests/druid_tests.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 59 | 2019-10-29T10:43:54.000Z | 2020-01-13T20:28:00.000Z | tests/druid_tests.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 3 | 2020-04-15T16:34:09.000Z | 2020-06-22T17:26:45.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import json
import unittest
from datetime import datetime
from unittest.mock import Mock, patch
from superset import db, security_manager
from tests.test_app import app
from .base_tests import SupersetTestCase
try:
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
except ImportError:
pass
class PickableMock(Mock):
def __reduce__(self):
return (Mock, ())
SEGMENT_METADATA = [
{
"id": "some_id",
"intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
"columns": {
"__time": {
"type": "LONG",
"hasMultipleValues": False,
"size": 407240380,
"cardinality": None,
"errorMessage": None,
},
"dim1": {
"type": "STRING",
"hasMultipleValues": False,
"size": 100000,
"cardinality": 1944,
"errorMessage": None,
},
"dim2": {
"type": "STRING",
"hasMultipleValues": True,
"size": 100000,
"cardinality": 1504,
"errorMessage": None,
},
"metric1": {
"type": "FLOAT",
"hasMultipleValues": False,
"size": 100000,
"cardinality": None,
"errorMessage": None,
},
},
"aggregators": {
"metric1": {"type": "longSum", "name": "metric1", "fieldName": "metric1"}
},
"size": 300000,
"numRows": 5000000,
}
]
GB_RESULT_SET = [
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {"dim1": "Canada", "dim2": "boy", "metric1": 12345678},
},
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {"dim1": "USA", "dim2": "girl", "metric1": 12345678 / 2},
},
]
DruidCluster.get_druid_version = lambda _: "0.9.1" # type: ignore
class DruidTests(SupersetTestCase):
"""Testing interactions with Druid"""
@classmethod
def setUpClass(cls):
cls.create_druid_test_objects()
def get_test_cluster_obj(self):
return DruidCluster(
cluster_name="test_cluster",
broker_host="localhost",
broker_port=7980,
broker_endpoint="druid/v2",
metadata_last_refreshed=datetime.now(),
)
def get_cluster(self, PyDruid):
instance = PyDruid.return_value
instance.time_boundary.return_value = [{"result": {"maxTime": "2016-01-01"}}]
instance.segment_metadata.return_value = SEGMENT_METADATA
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name="test_cluster")
.first()
)
if cluster:
for datasource in (
db.session.query(DruidDatasource)
.filter_by(cluster_name=cluster.cluster_name)
.all()
):
db.session.delete(datasource)
db.session.delete(cluster)
db.session.commit()
cluster = self.get_test_cluster_obj()
db.session.add(cluster)
cluster.get_datasources = PickableMock(return_value=["test_datasource"])
return cluster
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_client(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
cluster.refresh_datasources(merge_flag=True)
datasource_id = cluster.datasources[0].id
db.session.commit()
nres = [
list(v["event"].items()) + [("timestamp", v["timestamp"])]
for v in GB_RESULT_SET
]
nres = [dict(v) for v in nres]
import pandas as pd
df = pd.DataFrame(nres)
instance = PyDruid.return_value
instance.export_pandas.return_value = df
instance.query_dict = {}
instance.query_builder.last_query.query_dict = {}
resp = self.get_resp("/superset/explore/druid/{}/".format(datasource_id))
self.assertIn("test_datasource", resp)
form_data = {
"viz_type": "table",
"granularity": "one+day",
"druid_time_origin": "",
"since": "7+days+ago",
"until": "now",
"row_limit": 5000,
"include_search": "false",
"metrics": ["count"],
"groupby": ["dim1"],
"force": "true",
}
# One groupby
url = "/superset/explore_json/druid/{}/".format(datasource_id)
resp = self.get_json_resp(url, {"form_data": json.dumps(form_data)})
self.assertEqual("Canada", resp["data"]["records"][0]["dim1"])
form_data = {
"viz_type": "table",
"granularity": "one+day",
"druid_time_origin": "",
"since": "7+days+ago",
"until": "now",
"row_limit": 5000,
"include_search": "false",
"metrics": ["count"],
"groupby": ["dim1", "dim2"],
"force": "true",
}
# two groupby
url = "/superset/explore_json/druid/{}/".format(datasource_id)
resp = self.get_json_resp(url, {"form_data": json.dumps(form_data)})
self.assertEqual("Canada", resp["data"]["records"][0]["dim1"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_druid_sync_from_config(self):
CLUSTER_NAME = "new_druid"
self.login()
cluster = self.get_or_create(
DruidCluster, {"cluster_name": CLUSTER_NAME}, db.session
)
db.session.merge(cluster)
db.session.commit()
ds = (
db.session.query(DruidDatasource)
.filter_by(datasource_name="test_click")
.first()
)
if ds:
db.session.delete(ds)
db.session.commit()
cfg = {
"user": "admin",
"cluster": CLUSTER_NAME,
"config": {
"name": "test_click",
"dimensions": ["affiliate_id", "campaign", "first_seen"],
"metrics_spec": [
{"type": "count", "name": "count"},
{"type": "sum", "name": "sum"},
],
"batch_ingestion": {
"sql": "SELECT * FROM clicks WHERE d='{{ ds }}'",
"ts_column": "d",
"sources": [{"table": "clicks", "partition": "d='{{ ds }}'"}],
},
},
}
def check():
resp = self.client.post("/superset/sync_druid/", data=json.dumps(cfg))
druid_ds = (
db.session.query(DruidDatasource)
.filter_by(datasource_name="test_click")
.one()
)
col_names = set([c.column_name for c in druid_ds.columns])
assert {"affiliate_id", "campaign", "first_seen"} == col_names
metric_names = {m.metric_name for m in druid_ds.metrics}
assert {"count", "sum"} == metric_names
assert resp.status_code == 201
check()
# checking twice to make sure a second sync yields the same results
check()
# datasource exists, add new metrics and dimensions
cfg = {
"user": "admin",
"cluster": CLUSTER_NAME,
"config": {
"name": "test_click",
"dimensions": ["affiliate_id", "second_seen"],
"metrics_spec": [
{"type": "bla", "name": "sum"},
{"type": "unique", "name": "unique"},
],
},
}
resp = self.client.post("/superset/sync_druid/", data=json.dumps(cfg))
druid_ds = (
db.session.query(DruidDatasource)
.filter_by(datasource_name="test_click")
.one()
)
# columns and metrics are not deleted if config is changed as
# user could define his own dimensions / metrics and want to keep them
assert set([c.column_name for c in druid_ds.columns]) == set(
["affiliate_id", "campaign", "first_seen", "second_seen"]
)
assert set([m.metric_name for m in druid_ds.metrics]) == set(
["count", "sum", "unique"]
)
# metric type will not be overridden, sum stays instead of bla
assert set([m.metric_type for m in druid_ds.metrics]) == set(
["longSum", "sum", "unique"]
)
assert resp.status_code == 201
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@unittest.skipUnless(app.config["DRUID_IS_ACTIVE"], "DRUID_IS_ACTIVE is false")
def test_filter_druid_datasource(self):
CLUSTER_NAME = "new_druid"
cluster = self.get_or_create(
DruidCluster, {"cluster_name": CLUSTER_NAME}, db.session
)
db.session.merge(cluster)
gamma_ds = self.get_or_create(
DruidDatasource,
{"datasource_name": "datasource_for_gamma", "cluster": cluster},
db.session,
)
gamma_ds.cluster = cluster
db.session.merge(gamma_ds)
no_gamma_ds = self.get_or_create(
DruidDatasource,
{"datasource_name": "datasource_not_for_gamma", "cluster": cluster},
db.session,
)
no_gamma_ds.cluster = cluster
db.session.merge(no_gamma_ds)
db.session.commit()
security_manager.add_permission_view_menu("datasource_access", gamma_ds.perm)
security_manager.add_permission_view_menu("datasource_access", no_gamma_ds.perm)
perm = security_manager.find_permission_view_menu(
"datasource_access", gamma_ds.get_perm()
)
security_manager.add_permission_role(security_manager.find_role("Gamma"), perm)
security_manager.get_session.commit()
self.login(username="gamma")
url = "/druiddatasourcemodelview/list/"
resp = self.get_resp(url)
self.assertIn("datasource_for_gamma", resp)
self.assertNotIn("datasource_not_for_gamma", resp)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_sync_druid_perm(self, PyDruid):
self.login(username="admin")
instance = PyDruid.return_value
instance.time_boundary.return_value = [{"result": {"maxTime": "2016-01-01"}}]
instance.segment_metadata.return_value = SEGMENT_METADATA
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name="test_cluster")
.first()
)
if cluster:
for datasource in (
db.session.query(DruidDatasource)
.filter_by(cluster_name=cluster.cluster_name)
.all()
):
db.session.delete(datasource)
db.session.delete(cluster)
db.session.commit()
cluster = DruidCluster(
cluster_name="test_cluster",
broker_host="localhost",
broker_port=7980,
metadata_last_refreshed=datetime.now(),
)
db.session.add(cluster)
cluster.get_datasources = PickableMock(return_value=["test_datasource"])
cluster.refresh_datasources()
cluster.datasources[0].merge_flag = True
metadata = cluster.datasources[0].latest_metadata()
self.assertEqual(len(metadata), 4)
db.session.commit()
view_menu_name = cluster.datasources[0].get_perm()
view_menu = security_manager.find_view_menu(view_menu_name)
permission = security_manager.find_permission("datasource_access")
pv = (
security_manager.get_session.query(security_manager.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
assert pv is not None
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_refresh_metadata(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
datasource = cluster.datasources[0]
cols = db.session.query(DruidColumn).filter(
DruidColumn.datasource_id == datasource.id
)
for col in cols:
self.assertIn(col.column_name, SEGMENT_METADATA[0]["columns"].keys())
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
agg, _ = metric.metric_name.split("__")
self.assertEqual(
json.loads(metric.json)["type"], "double{}".format(agg.capitalize())
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_refresh_metadata_augment_type(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
metadata = SEGMENT_METADATA[:]
metadata[0]["columns"]["metric1"]["type"] = "LONG"
instance = PyDruid.return_value
instance.segment_metadata.return_value = metadata
cluster.refresh_datasources()
datasource = cluster.datasources[0]
column = (
db.session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name == "metric1")
).one()
self.assertEqual(column.type, "LONG")
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
agg, _ = metric.metric_name.split("__")
self.assertEqual(metric.json_obj["type"], "long{}".format(agg.capitalize()))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_refresh_metadata_augment_verbose_name(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
datasource = cluster.datasources[0]
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
metric.verbose_name = metric.metric_name
db.session.commit()
# The verbose name should not change during a refresh.
cluster.refresh_datasources()
datasource = cluster.datasources[0]
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
self.assertEqual(metric.verbose_name, metric.metric_name)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_urls(self):
cluster = self.get_test_cluster_obj()
self.assertEqual(
cluster.get_base_url("localhost", "9999"), "http://localhost:9999"
)
self.assertEqual(
cluster.get_base_url("http://localhost", "9999"), "http://localhost:9999"
)
self.assertEqual(
cluster.get_base_url("https://localhost", "9999"), "https://localhost:9999"
)
self.assertEqual(
cluster.get_base_broker_url(), "http://localhost:7980/druid/v2"
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_druid_time_granularities(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
cluster.refresh_datasources(merge_flag=True)
datasource_id = cluster.datasources[0].id
db.session.commit()
nres = [
list(v["event"].items()) + [("timestamp", v["timestamp"])]
for v in GB_RESULT_SET
]
nres = [dict(v) for v in nres]
import pandas as pd
df = pd.DataFrame(nres)
instance = PyDruid.return_value
instance.export_pandas.return_value = df
instance.query_dict = {}
instance.query_builder.last_query.query_dict = {}
form_data = {
"viz_type": "table",
"since": "7+days+ago",
"until": "now",
"metrics": ["count"],
"groupby": [],
"include_time": "true",
}
granularity_map = {
"5 seconds": "PT5S",
"30 seconds": "PT30S",
"1 minute": "PT1M",
"5 minutes": "PT5M",
"1 hour": "PT1H",
"6 hour": "PT6H",
"one day": "P1D",
"1 day": "P1D",
"7 days": "P7D",
"week": "P1W",
"week_starting_sunday": "P1W",
"week_ending_saturday": "P1W",
"month": "P1M",
"quarter": "P3M",
"year": "P1Y",
}
url = "/superset/explore_json/druid/{}/".format(datasource_id)
for granularity_mapping in granularity_map:
form_data["granularity"] = granularity_mapping
self.get_json_resp(url, {"form_data": json.dumps(form_data)})
self.assertEqual(
granularity_map[granularity_mapping],
instance.timeseries.call_args[1]["granularity"]["period"],
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_external_metadata(self, PyDruid):
self.login(username="admin")
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
datasource = cluster.datasources[0]
url = "/datasource/external_metadata/druid/{}/".format(datasource.id)
resp = self.get_json_resp(url)
col_names = {o.get("name") for o in resp}
self.assertEqual(col_names, {"__time", "dim1", "dim2", "metric1"})
if __name__ == "__main__":
unittest.main()
| 34.076142 | 88 | 0.576394 |
73c62c1f11b2d75426516f5c6dac4f78a5583228 | 512 | py | Python | setup.py | ninadpage/contact-book-python | 5247a7d13df0c952da59fb3c0875ab4002a3fc14 | [
"MIT"
] | null | null | null | setup.py | ninadpage/contact-book-python | 5247a7d13df0c952da59fb3c0875ab4002a3fc14 | [
"MIT"
] | null | null | null | setup.py | ninadpage/contact-book-python | 5247a7d13df0c952da59fb3c0875ab4002a3fc14 | [
"MIT"
] | null | null | null | # encoding=utf-8
# Author: ninadpage
from setuptools import setup
setup(name='contactbook',
version='0.0.1',
description='A simple Python implementation of a personal address book',
url='https://github.com/ninadpage/contact-book-python',
author='Ninad Page',
license='MIT',
packages=['contactbook'],
package_dir={
'': 'src',
},
test_suite='tests',
install_requires=[
'SQLAlchemy==1.0.14',
'PyTrie==0.2',
]
)
| 23.272727 | 78 | 0.578125 |
73c65c0825ac3e66c86ddfbfa8c6bd324c05d2b1 | 118 | py | Python | papermerge/core/ocr/__init__.py | w-michal/papermerge | 14703c3316deea06696da041b7adc4bd0b15270b | [
"Apache-2.0"
] | 1 | 2020-09-28T06:04:38.000Z | 2020-09-28T06:04:38.000Z | papermerge/core/ocr/__init__.py | w-michal/papermerge | 14703c3316deea06696da041b7adc4bd0b15270b | [
"Apache-2.0"
] | 1 | 2021-02-12T02:28:00.000Z | 2021-02-24T04:08:34.000Z | papermerge/core/ocr/__init__.py | w-michal/papermerge | 14703c3316deea06696da041b7adc4bd0b15270b | [
"Apache-2.0"
] | 2 | 2021-02-11T23:10:29.000Z | 2021-02-13T09:06:49.000Z | from papermerge.core.ocr.page import ocr_page # noqa
from papermerge.core.ocr.page import (COMPLETE, STARTED) # noqa | 59 | 64 | 0.788136 |
73c698c17e56f20c5d5f2569656abbc455f52c35 | 7,063 | py | Python | pymtv/ABI/DatabaseABI.py | MultiversumBlockchain/PyMTV | 3e91d8376ee08c2da2f1ab3eeb6873d2165af54e | [
"MIT"
] | null | null | null | pymtv/ABI/DatabaseABI.py | MultiversumBlockchain/PyMTV | 3e91d8376ee08c2da2f1ab3eeb6873d2165af54e | [
"MIT"
] | null | null | null | pymtv/ABI/DatabaseABI.py | MultiversumBlockchain/PyMTV | 3e91d8376ee08c2da2f1ab3eeb6873d2165af54e | [
"MIT"
] | null | null | null |
DatabaseABI = [
{
"inputs": [
{
"internalType": "address",
"name": "factoryAddress",
"type": "address"
},
{
"internalType": "address",
"name": "creatorAddress",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": True,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "uint256",
"name": "index",
"type": "uint256"
}
],
"name": "RowCreated",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "uint256",
"name": "index",
"type": "uint256"
}
],
"name": "RowDeleted",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "uint256",
"name": "index",
"type": "uint256"
}
],
"name": "RowUpdated",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "uint256",
"name": "index",
"type": "uint256"
},
{
"indexed": False,
"internalType": "bytes32",
"name": "name",
"type": "bytes32"
}
],
"name": "TableCreated",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "uint256",
"name": "index",
"type": "uint256"
}
],
"name": "TableDropped",
"type": "event"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_name",
"type": "bytes32"
},
{
"internalType": "bytes32[]",
"name": "_columns",
"type": "bytes32[]"
}
],
"name": "createTable",
"outputs": [
{
"internalType": "uint256",
"name": "_index",
"type": "uint256"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_table",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_index",
"type": "uint256"
}
],
"name": "deleteDirect",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_table",
"type": "uint256"
}
],
"name": "desc",
"outputs": [
{
"internalType": "bytes32[]",
"name": "_columns",
"type": "bytes32[]"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_table",
"type": "uint256"
}
],
"name": "dropTable",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "factory",
"outputs": [
{
"internalType": "contract DatabaseFactory",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_table",
"type": "uint256"
},
{
"internalType": "string[]",
"name": "_values",
"type": "string[]"
}
],
"name": "insert",
"outputs": [
{
"internalType": "uint256",
"name": "_index",
"type": "uint256"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_table",
"type": "uint256"
}
],
"name": "rowsCount",
"outputs": [
{
"internalType": "uint256",
"name": "_count",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_table",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_offset",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_limit",
"type": "uint256"
}
],
"name": "selectAll",
"outputs": [
{
"internalType": "string[][]",
"name": "_rows",
"type": "string[][]"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "showTables",
"outputs": [
{
"internalType": "bytes32[]",
"name": "_tables",
"type": "bytes32[]"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_table",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_index",
"type": "uint256"
},
{
"internalType": "uint256[]",
"name": "_columns",
"type": "uint256[]"
},
{
"internalType": "string[]",
"name": "_values",
"type": "string[]"
}
],
"name": "updateDirect",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
| 20.591837 | 53 | 0.374204 |
73c71f2d4d722adee1ba282d9dc6cc64a7dd663e | 3,528 | py | Python | laserAtomTrap/scripts/main.py | statisdisc/modellingAndSimulation | 1458e7b7a527a4835c5a545f5899b6f1e4d80f92 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | laserAtomTrap/scripts/main.py | statisdisc/modellingAndSimulation | 1458e7b7a527a4835c5a545f5899b6f1e4d80f92 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | laserAtomTrap/scripts/main.py | statisdisc/modellingAndSimulation | 1458e7b7a527a4835c5a545f5899b6f1e4d80f92 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | '''
Script for modelling and plotting the forces on neutral Rb-87 atoms in a magneto optical trap
with a single laser and reflection gratings.
If modelling the fields (computationally expensive) has been done previously, use
plotFields.py instead for faster computation.
Use this script if generating the data for the first time.
!---WARNING---!
300MB of RAM recommended for resolution=100
10GB of RAM recommended for resolution=300
Resources scales as resolution^3
'''
import os
import sys
import time
import numpy as np
# User-made modules and functions
from calculateFields import calculateFields
from plotFields import plotFields
def main(
id = "default",
gratingType = "triangle",
beamRadius = 1.2,
gaussian = True,
imperfection = False,
resolution = 100,
rangeX = [-0.55, 0.55],
rangeY = [-0.55, 0.55],
rangeZ = [0., 1.1],
precisionCoords = 4,
precisionData = 2
):
'''
Generate the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
Args
id: Data id used for the naming of output files
gratingType: Shape of grating etches/grooves. Valid parameters are "triangle" and "square"
beamRadius: The incident laser beam radius in cm.
gaussian: Is the beam profile Gaussian or uniform? Boolean only.
imperfection: If True, some of the laser beam will be diffracted to 0th order (reflection)
resolution: Resolution of the data in all 3 axes. resolution x 2 = computation x 8.
rangeX: Range of x values to be evaluated in cm.
rangeY: Range of x values to be evaluated in cm.
rangeZ: Range of x values to be evaluated in cm.
precisionCoords: Precision of coordinate data when writen to output file.
precisionData: Precision of field data when writen to output file.
'''
print(f"\nProcessin {id}")
# Calculate the acceleration and radiation pressure, save to file in root/outputs/id/
calculateFields(
id = id,
gratingType = gratingType,
beamRadius = beamRadius,
gaussian = gaussian,
imperfection = imperfection,
resolution = resolution,
rangeX = rangeX,
rangeY = rangeY,
rangeZ = rangeZ,
precisionCoords = precisionCoords,
precisionData = precisionData
)
# Plot fields and save to root/outputs/id/
plotFields(id=id)
if __name__ == "__main__":
timeInit = time.time()
main(id="triangleGaussian")
main(id="triangle", gaussian=False)
main(id="squareGaussian", gratingType="square")
main(id="square", gratingType="square", gaussian=False)
main(id="triangleGaussianZoomOut", rangeX = [-1.55, 1.55], rangeY = [-1.55, 1.55], rangeZ = [0., 3.1])
main(id="triangleZoomOut", gaussian=False, rangeX = [-1.55, 1.55], rangeY = [-1.55, 1.55], rangeZ = [0., 3.1])
main(id="squareGaussianZoomOut", gratingType="square", rangeX = [-1.55, 1.55], rangeY = [-1.55, 1.55], rangeZ = [0., 3.1])
main(id="squareZoomOut", gratingType="square", gaussian=False, rangeX = [-1.55, 1.55], rangeY = [-1.55, 1.55], rangeZ = [0., 3.1])
timeElapsed = time.time() - timeInit
print(f"Elapsed time: {timeElapsed:.2f}s") | 38.347826 | 135 | 0.636338 |
73c72060a23e792a218bc8f69f12a3b033e9d322 | 3,024 | py | Python | playx/playlist/spotify.py | Saul-Dickson/playx | 48f3dc0d2cb68cfb6b0ed3c362c86ed3ed382ebe | [
"MIT"
] | 221 | 2017-07-21T03:46:50.000Z | 2021-12-20T22:45:25.000Z | playx/playlist/spotify.py | Saul-Dickson/playx | 48f3dc0d2cb68cfb6b0ed3c362c86ed3ed382ebe | [
"MIT"
] | 98 | 2018-07-14T12:56:28.000Z | 2021-03-04T16:48:24.000Z | playx/playlist/spotify.py | Saul-Dickson/playx | 48f3dc0d2cb68cfb6b0ed3c362c86ed3ed382ebe | [
"MIT"
] | 35 | 2018-05-15T19:30:08.000Z | 2021-07-24T14:43:33.000Z | from requests import get
from bs4 import BeautifulSoup
import re
from playx.playlist.playlistbase import PlaylistBase, SongMetadataBase
from playx.logger import Logger
# Setup logger
logger = Logger("Spotify")
class SpotifySong(SongMetadataBase):
"""Spotify songs container."""
def __init__(self, title="", artist="", album=""):
super().__init__()
self.title = title
self.artist = artist
self.album = album
self._create_search_query()
self._remove_duplicates()
def _create_search_query(self):
"""
Create a search querry.
"""
self.search_query = self.title + " " + self.artist
class SpotifyIE(PlaylistBase):
"""Spotify playlist data extractor."""
def __init__(self, URL, pl_start=None, pl_end=None):
super().__init__(pl_start, pl_end)
self.URL = URL
self.list_content_tuple = []
self.playlist_name = ""
def get_data(self):
r = get(self.URL)
soup = BeautifulSoup(r.text, "html.parser")
text = "Spotify is currently not available in your country."
if text in str(soup):
raise ValueError(text)
s = soup.findAll(attrs={"class": "track-name-wrapper"})
name = soup.findAll(attrs={"class": "media-bd"})
name = re.sub(
r"<span.*?>|</span>",
"",
re.findall(r'<span dir="auto">.*?</span>', str(name))[0],
)
self.playlist_name = name
for i in s:
title = re.sub(
r'class="track-name".*?>|</span>',
"",
re.findall(r'class="track-name".*?</span>', str(i))[0],
)
# Some spotify playlists (mostly the ones by spotify) have one or
# more videos in the playlist. In that case we will skip the
# extraction of artist and album.
try:
artist = re.sub(
r'a href="/artist.*?<span dir=".*?>|</span>|</a>',
"",
re.findall(r'a href="/artist.*?</a>', str(i))[0],
)
except IndexError:
artist = ""
try:
album = re.sub(
r'a href="/album.*?<span dir=".*?>|</span>|</a>',
"",
re.findall(r'a href="/album.*?</a>', str(i))[0],
)
except IndexError:
album = ""
self.list_content_tuple.append(SpotifySong(title, artist, album))
self.strip_to_start_end()
def get_data(URL, pl_start, pl_end):
"""Generic function. Should be called only when
it is checked if the URL is a spotify playlist.
Returns a tuple containing the songs and name of
the playlist.
"""
logger.debug("Extracting Playlist Contents")
spotify_playlist = SpotifyIE(URL, pl_start, pl_end)
spotify_playlist.get_data()
return spotify_playlist.list_content_tuple, spotify_playlist.playlist_name
| 31.5 | 78 | 0.54828 |
73c7216499bbd887e8b741cbf4905c24a5bee460 | 633 | py | Python | supriya/ugens/PauseSelf.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/PauseSelf.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/PauseSelf.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class PauseSelf(UGen):
"""
Pauses the enclosing synth when triggered by `trigger`.
::
>>> trigger = supriya.ugens.Impulse.kr(frequency=1.0)
>>> pause_self = supriya.ugens.PauseSelf.kr(
... trigger=trigger,
... )
>>> pause_self
PauseSelf.kr()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Envelope Utility UGens"
_ordered_input_names = collections.OrderedDict([("trigger", None)])
_valid_calculation_rates = (CalculationRate.CONTROL,)
| 21.827586 | 71 | 0.64297 |
73c799d1e4e35a1b5d6898fb31a4e1c21a684c8f | 2,746 | py | Python | scrapy_projects/Hoaxlyspiders/spiders/correctiv_org.py | hoaxly/hoaxly-spiderbreeder | 066ff4072e1e0e7f5d7c495d3048909256127a5f | [
"MIT"
] | null | null | null | scrapy_projects/Hoaxlyspiders/spiders/correctiv_org.py | hoaxly/hoaxly-spiderbreeder | 066ff4072e1e0e7f5d7c495d3048909256127a5f | [
"MIT"
] | null | null | null | scrapy_projects/Hoaxlyspiders/spiders/correctiv_org.py | hoaxly/hoaxly-spiderbreeder | 066ff4072e1e0e7f5d7c495d3048909256127a5f | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from scrapy import Request
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Identity
from scrapy.spiders import Rule
from ..utils.spiders import BasePortiaSpider
from ..utils.starturls import FeedGenerator, FragmentGenerator
from ..utils.processors import Item, Field, Text, Number, Price, Date, Url, Image, Regex
from ..items import HoaxlyinboxschemaItem, PortiaItem
class CorrectivOrg(BasePortiaSpider):
name = "correctiv.org"
allowed_domains = ['correctiv.org']
start_urls = [
'https://correctiv.org/echtjetzt/']
rules = [
Rule(
LinkExtractor(
allow=('^https\\:\\/\\/correctiv\\.org\\/echtjetzt\\/artikel\\/'),
deny=()),
callback='parse_item',
follow=True)]
items = [
[
Item(
HoaxlyinboxschemaItem,
None,
'#entry-2578',
[
Field(
'factoidHeadline',
'.article-header > .article-header__headline-group > .article-header__headline *::text',
[]),
Field(
'claimReviewers',
'.article-body > .article-body__aside > .vcard > .fn::attr(href)',
[]),
Field(
'factoidPubdate',
'.article-body > .article-body__aside > .article-body__publishing-date *::text',
[]),
Field(
'factoidContent',
'.article-body > .article-body__main *::text',
[]),
Field(
'itemReviewed',
'.article-body > .article-body__main > p:nth-child(1) > a::attr(href)',
[]),
Field(
'factoidClaim',
'.article-body__main > p:nth-child(2) *::text',
[]),
Field(
'factoidVerdict',
'.article-body > .article-body__claimreview > .claimreview__title *::text',
[]),
Field(
'factoidRating',
'.article-body > .article-body__claimreview > .claimreview__picture::attr(src)',
[]),
Field(
'factoidSourceUrls',
'.article-body__main > p:nth-child(4) > a::attr(href)',
[])])]]
| 39.228571 | 112 | 0.458121 |
73c7b94565a8ed4c2db388f7b29a67a154b523d5 | 603 | py | Python | src/rpi/gpio/examples/dc_motor_with_button_relay.py | MatthewGerber/rpi | 29246e7eceb95299ee6fa337d812b8f8c0165001 | [
"MIT"
] | null | null | null | src/rpi/gpio/examples/dc_motor_with_button_relay.py | MatthewGerber/rpi | 29246e7eceb95299ee6fa337d812b8f8c0165001 | [
"MIT"
] | null | null | null | src/rpi/gpio/examples/dc_motor_with_button_relay.py | MatthewGerber/rpi | 29246e7eceb95299ee6fa337d812b8f8c0165001 | [
"MIT"
] | null | null | null | import time
import RPi.GPIO as gpio
from rpi.gpio import CkPin, setup, cleanup
from rpi.gpio.controls import TwoPoleButton
def main():
"""
This example drives a DC motor as shown on page 176 of the tutorial.
"""
setup()
transistor_base_pin = CkPin.GPIO17
gpio.setup(transistor_base_pin, gpio.OUT)
button = TwoPoleButton(CkPin.GPIO18, 300)
button.event(lambda s: gpio.output(transistor_base_pin, gpio.HIGH if s.pressed else gpio.LOW))
print('You have 20 seconds to press the button...')
time.sleep(20)
cleanup()
if __name__ == '__main__':
main()
| 20.793103 | 98 | 0.693201 |
73c7ce12922da9c68863e541bbdb80f15064cccc | 270 | py | Python | manage.py | goobes/box | 42e136d98b2a25ffd8717b3c8c9d97654cc15e54 | [
"MIT"
] | null | null | null | manage.py | goobes/box | 42e136d98b2a25ffd8717b3c8c9d97654cc15e54 | [
"MIT"
] | 21 | 2018-02-21T06:53:36.000Z | 2022-03-11T23:14:47.000Z | manage.py | goobes/box | 42e136d98b2a25ffd8717b3c8c9d97654cc15e54 | [
"MIT"
] | 1 | 2018-03-01T11:01:20.000Z | 2018-03-01T11:01:20.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "subscriptionbox.settings.development")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 24.545455 | 91 | 0.785185 |
73c7d2606cc017abd1c6576f9d7723e7b4252c49 | 112 | py | Python | thompson_sampling/contextual_bandits.py | wooohoooo/thompson_sampling | 259f4ba5d7ec023c5c48fa51e9d6c2ba0ee19fcc | [
"Apache-2.0"
] | 1 | 2021-07-28T16:35:58.000Z | 2021-07-28T16:35:58.000Z | thompson_sampling/contextual_bandits.py | wooohoooo/thompson_sampling | 259f4ba5d7ec023c5c48fa51e9d6c2ba0ee19fcc | [
"Apache-2.0"
] | 2 | 2020-04-24T12:58:34.000Z | 2022-02-26T07:03:05.000Z | thompson_sampling/contextual_bandits.py | wooohoooo/thompson_sampling | 259f4ba5d7ec023c5c48fa51e9d6c2ba0ee19fcc | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: contextual_bandits.ipynb (unless otherwise specified).
__all__ = [] | 37.333333 | 98 | 0.767857 |
73c843ffc11ffcc0b1d09e2a6759704689d21fbc | 5,524 | py | Python | app/celery/provider_tasks.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | app/celery/provider_tasks.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | app/celery/provider_tasks.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | from flask import current_app
from notifications_utils.recipients import InvalidEmailError
from notifications_utils.statsd_decorators import statsd
from sqlalchemy.orm.exc import NoResultFound
from app import notify_celery
from app.config import QueueNames
from app.dao import notifications_dao
from app.dao.notifications_dao import update_notification_status_by_id
from app.delivery import send_to_providers
from app.exceptions import (
InvalidUrlException,
MalwarePendingException,
NotificationTechnicalFailureException,
)
from app.models import NOTIFICATION_TECHNICAL_FAILURE
from app.notifications.callbacks import _check_and_queue_callback_task
# Celery rate limits are per worker instance and not a global rate limit.
# https://docs.celeryproject.org/en/stable/userguide/tasks.html#Task.rate_limit
# This task is dispatched through the `send-throttled-sms-tasks` queue.
# This queue is consumed by 1 Celery instance with 1 worker, the SMS Celery pod.
# The maximum throughput is therefore 1 instance * 1 worker = 1 task per rate limit.
# We the set rate_limit="30/m" on the Celery task to have 1 task per 2 seconds.
@notify_celery.task(
bind=True,
name="deliver_throttled_sms",
max_retries=48,
default_retry_delay=300,
rate_limit="30/m",
)
@statsd(namespace="tasks")
def deliver_throttled_sms(self, notification_id):
_deliver_sms(self, notification_id)
# Celery rate limits are per worker instance and not a global rate limit.
# https://docs.celeryproject.org/en/stable/userguide/tasks.html#Task.rate_limit
# This task is dispatched through the `send-sms-tasks` queue.
# This queue is consumed by 6 Celery instances with 4 workers in production.
# The maximum throughput is therefore 6 instances * 4 workers = 24 tasks per second
# if we set rate_limit="1/s" on the Celery task
@notify_celery.task(
bind=True,
name="deliver_sms",
max_retries=48,
default_retry_delay=300,
rate_limit="1/s",
)
@statsd(namespace="tasks")
def deliver_sms(self, notification_id):
_deliver_sms(self, notification_id)
@notify_celery.task(bind=True, name="deliver_email", max_retries=48, default_retry_delay=300)
@statsd(namespace="tasks")
def deliver_email(self, notification_id):
try:
current_app.logger.info("Start sending email for notification id: {}".format(notification_id))
notification = notifications_dao.get_notification_by_id(notification_id)
if not notification:
raise NoResultFound()
send_to_providers.send_email_to_provider(notification)
except InvalidEmailError as e:
current_app.logger.info(f"Cannot send notification {notification_id}, got an invalid email address: {str(e)}.")
update_notification_status_by_id(notification_id, NOTIFICATION_TECHNICAL_FAILURE)
_check_and_queue_callback_task(notification)
except InvalidUrlException:
current_app.logger.error(f"Cannot send notification {notification_id}, got an invalid direct file url.")
update_notification_status_by_id(notification_id, NOTIFICATION_TECHNICAL_FAILURE)
_check_and_queue_callback_task(notification)
except MalwarePendingException:
current_app.logger.info("RETRY: Email notification {} is pending malware scans".format(notification_id))
self.retry(queue=QueueNames.RETRY, countdown=60)
except Exception:
try:
current_app.logger.exception("RETRY: Email notification {} failed".format(notification_id))
self.retry(queue=QueueNames.RETRY)
except self.MaxRetriesExceededError:
message = (
"RETRY FAILED: Max retries reached. "
"The task send_email_to_provider failed for notification {}. "
"Notification has been updated to technical-failure".format(notification_id)
)
update_notification_status_by_id(notification_id, NOTIFICATION_TECHNICAL_FAILURE)
_check_and_queue_callback_task(notification)
raise NotificationTechnicalFailureException(message)
def _deliver_sms(self, notification_id):
try:
current_app.logger.info("Start sending SMS for notification id: {}".format(notification_id))
notification = notifications_dao.get_notification_by_id(notification_id)
if not notification:
raise NoResultFound()
send_to_providers.send_sms_to_provider(notification)
except InvalidUrlException:
current_app.logger.error(f"Cannot send notification {notification_id}, got an invalid direct file url.")
update_notification_status_by_id(notification_id, NOTIFICATION_TECHNICAL_FAILURE)
_check_and_queue_callback_task(notification)
except Exception:
try:
current_app.logger.exception("SMS notification delivery for id: {} failed".format(notification_id))
if self.request.retries == 0:
self.retry(queue=QueueNames.RETRY, countdown=0)
else:
self.retry(queue=QueueNames.RETRY)
except self.MaxRetriesExceededError:
message = (
"RETRY FAILED: Max retries reached. The task send_sms_to_provider failed for notification {}. "
"Notification has been updated to technical-failure".format(notification_id)
)
update_notification_status_by_id(notification_id, NOTIFICATION_TECHNICAL_FAILURE)
_check_and_queue_callback_task(notification)
raise NotificationTechnicalFailureException(message)
| 47.213675 | 119 | 0.745474 |
73c8627e2e61fbbc92b1969077a79136fb3526b8 | 5,604 | py | Python | Chapter05/chapter_05_example_01.py | pesader/hands-on-music-generation-with-magenta | 0e2364e10dc76a90d5ba09f42122cc9555950798 | [
"MIT"
] | 123 | 2019-08-25T11:01:04.000Z | 2022-03-10T19:23:53.000Z | Chapter05/chapter_05_example_01.py | pesader/hands-on-music-generation-with-magenta | 0e2364e10dc76a90d5ba09f42122cc9555950798 | [
"MIT"
] | 17 | 2020-02-20T18:17:49.000Z | 2021-05-28T06:17:51.000Z | Chapter05/chapter_05_example_01.py | pesader/hands-on-music-generation-with-magenta | 0e2364e10dc76a90d5ba09f42122cc9555950798 | [
"MIT"
] | 45 | 2019-07-26T15:17:13.000Z | 2022-03-22T15:58:05.000Z | """
This example shows how to use NSynth to interpolate between pairs of sounds.
VERSION: Magenta 1.1.7
"""
import os
import tarfile
from typing import List, Tuple
import numpy as np
import tensorflow as tf
from magenta.models.nsynth import utils
from magenta.models.nsynth.wavenet import fastgen
from six.moves import urllib
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
"log", "WARN",
"The threshold for what messages will be logged. DEBUG, INFO, WARN, ERROR, "
"or FATAL.")
WAV_FILENAMES = ["83249__zgump__bass-0205__crop.wav",
"160045__jorickhoofd__metal-hit-with-metal-bar-resonance"
"__crop.wav",
"412017__skymary__cat-meow-short__crop.wav",
"427567__maria-mannone__flute__crop.wav"]
def download_checkpoint(checkpoint_name: str,
target_dir: str = "checkpoints") -> None:
"""
Downloads a Magenta checkpoint to target directory and extracts it.
Target directory target_dir will be created if it does not already exist.
:param checkpoint_name: magenta checkpoint name to download,
one of "baseline-ckpt" or "wavenet-ckpt"
:param target_dir: local directory in which to write the checkpoint
"""
tf.gfile.MakeDirs(target_dir)
checkpoint_target = os.path.join(target_dir, f"{checkpoint_name}.tar")
if not os.path.exists(checkpoint_target):
response = urllib.request.urlopen(
f"http://download.magenta.tensorflow.org/"
f"models/nsynth/{checkpoint_name}.tar")
data = response.read()
local_file = open(checkpoint_target, 'wb')
local_file.write(data)
local_file.close()
tar = tarfile.open(checkpoint_target)
tar.extractall(target_dir)
tar.close()
def encode(wav_filenames: List[str],
checkpoint: str = "checkpoints/wavenet-ckpt/model.ckpt-200000",
sample_length: int = 16000,
sample_rate: int = 16000) -> List[np.ndarray]:
"""
Encodes the list of filename to encodings by loading the wav files,
encoding them using fastgen, and returning the result.
:param wav_filenames: the list of filenames to encode, they need to be
present in the "sound" folder
:param checkpoint: the checkpoint folder
:param sample_length: the sample length, can be calculated by multiplying
the desired number of seconds by 16000
:param sample_rate: the sample rate, should be 16000
"""
if not wav_filenames:
return []
# Loads the audio for each filenames
audios = []
for wav_filename in wav_filenames:
audio = utils.load_audio(os.path.join("sounds", wav_filename),
sample_length=sample_length,
sr=sample_rate)
audios.append(audio)
# Encodes the audio for each new wav
audios = np.array(audios)
encodings = fastgen.encode(audios, checkpoint, sample_length)
return encodings
def mix_encoding_pairs(encodings: List[np.ndarray],
encodings_name: List[str]) \
-> Tuple[np.ndarray, List[str]]:
"""
Mixes each elements of the encodings two by two, by adding the encodings
together and returning them, with their resulting mixed filename.
:param encodings: the list of encodings
:param encodings_name: the list of encodings names
"""
encodings_mix = []
encodings_mix_name = []
# Takes the pair of encodings two by two
for encoding1, encoding1_name in zip(encodings, encodings_name):
for encoding2, encoding2_name in zip(encodings, encodings_name):
if encoding1_name == encoding2_name:
continue
# Adds the encodings together
encoding_mix = encoding1 + encoding2 / 2.0
encodings_mix.append(encoding_mix)
# Merges the beginning of the track names
if "_" in encoding1_name and "_" in encoding2_name:
encoding_name = (f"{encoding1_name.split('_', 1)[0]}_"
f"{encoding2_name.split('_', 1)[0]}")
else:
encoding_name = f"{encoding1_name}_{encoding2_name}"
encodings_mix_name.append(encoding_name)
return np.array(encodings_mix), encodings_mix_name
def synthesize(encodings_mix: np.ndarray,
encodings_mix_name: List[str],
checkpoint: str = "checkpoints/wavenet-ckpt/model.ckpt-200000") \
-> None:
"""
Synthetizes the list of encodings and saves them under the list of names.
This might take a long time on commodity hardware (~15 minutes)
:param encodings_mix: the list of encodings to synth
:param encodings_mix_name: the list of encodings names for the files
:param checkpoint: the checkpoint folder
"""
os.makedirs(os.path.join("output", "nsynth"), exist_ok=True)
encodings_mix_name = [os.path.join("output", "nsynth",
encoding_mix_name + ".wav")
for encoding_mix_name in encodings_mix_name]
fastgen.synthesize(encodings_mix,
checkpoint_path=checkpoint,
save_paths=encodings_mix_name)
def app(unused_argv):
# Downloads and extracts the checkpoint to "checkpoints/wavenet-ckpt"
download_checkpoint("wavenet-ckpt")
# Encodes the wav files into 4 encodings (and saves them for later use)
encodings = encode(WAV_FILENAMES)
# Mix the 4 encodings pairs into 12 encodings
encodings_mix, encodings_mix_name = mix_encoding_pairs(encodings,
WAV_FILENAMES)
# Synthesize the 12 encodings into wavs
synthesize(encodings_mix, encodings_mix_name)
if __name__ == "__main__":
tf.logging.set_verbosity(FLAGS.log)
tf.app.run(app)
| 35.245283 | 80 | 0.687545 |
73c8774e9b18b4742353e9fffc733949d0db7ff7 | 3,068 | py | Python | sdk/translation/azure-ai-translation-document/samples/async_samples/sample_begin_translation_async.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/translation/azure-ai-translation-document/samples/async_samples/sample_begin_translation_async.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/translation/azure-ai-translation-document/samples/async_samples/sample_begin_translation_async.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_begin_translation_async.py
DESCRIPTION:
This sample demonstrates how to translate documents in your Azure Blob Storage container. To translate a
specific document only, or to translate documents under a folder, see sample_begin_translation_with_filters_async.py.
To set up your containers for translation and generate SAS tokens to your containers (or files)
with the appropriate permissions, see the README.
USAGE:
python sample_begin_translation_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_DOCUMENT_TRANSLATION_ENDPOINT - the endpoint to your Document Translation resource.
2) AZURE_DOCUMENT_TRANSLATION_KEY - your Document Translation API key.
3) AZURE_SOURCE_CONTAINER_URL - the container SAS URL to your source container which has the documents
to be translated.
4) AZURE_TARGET_CONTAINER_URL - the container SAS URL to your target container where the translated documents
will be written.
"""
import asyncio
async def sample_translation_async():
# [START begin_translation_async]
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document.aio import DocumentTranslationClient
endpoint = os.environ["AZURE_DOCUMENT_TRANSLATION_ENDPOINT"]
key = os.environ["AZURE_DOCUMENT_TRANSLATION_KEY"]
source_container_url = os.environ["AZURE_SOURCE_CONTAINER_URL"]
target_container_url = os.environ["AZURE_TARGET_CONTAINER_URL"]
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
async with client:
poller = await client.begin_translation(source_container_url, target_container_url, "fr")
result = await poller.result()
print(f"Status: {poller.status()}")
print(f"Created on: {poller.details.created_on}")
print(f"Last updated on: {poller.details.last_updated_on}")
print(f"Total number of translations on documents: {poller.details.documents_total_count}")
print("\nOf total documents...")
print(f"{poller.details.documents_failed_count} failed")
print(f"{poller.details.documents_succeeded_count} succeeded")
async for document in result:
print(f"Document ID: {document.id}")
print(f"Document status: {document.status}")
if document.status == "Succeeded":
print(f"Source document location: {document.source_document_url}")
print(f"Translated document location: {document.translated_document_url}")
print(f"Translated to language: {document.translated_to}\n")
else:
print(f"Error Code: {document.error.code}, Message: {document.error.message}\n")
# [END begin_translation_async]
async def main():
await sample_translation_async()
if __name__ == '__main__':
asyncio.run(main())
| 40.906667 | 121 | 0.711213 |
73c8afac260281cbf368d7247892d66c05588dd2 | 2,882 | py | Python | examples/torch_br_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
] | 38 | 2019-06-10T04:19:42.000Z | 2022-02-15T05:21:23.000Z | examples/torch_br_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
] | 4 | 2019-07-30T19:00:23.000Z | 2019-09-26T01:35:05.000Z | examples/torch_br_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
] | 10 | 2019-06-10T05:45:33.000Z | 2021-04-22T08:33:28.000Z | """ Test case for Torch """
from __future__ import absolute_import
import torch
import torchvision.models as models
import numpy as np
from perceptron.models.classification.pytorch import PyTorchModel
from perceptron.utils.image import imagenet_example
from perceptron.benchmarks.brightness import BrightnessMetric
from perceptron.utils.criteria.classification import Misclassification
from perceptron.utils.tools import plot_image
from perceptron.utils.tools import bcolors
# instantiate the model
resnet18 = models.resnet18(pretrained=True).eval()
if torch.cuda.is_available():
resnet18 = resnet18.cuda()
# initialize the PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
fmodel = PyTorchModel(
resnet18, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
# get source image and print the predicted label
image, _ = imagenet_example(data_format='channels_first')
image = image / 255. # because our model expects values in [0, 1]
# set the type of noise which will used to generate the adversarial examples
metric = BrightnessMetric(fmodel, criterion=Misclassification())
# set the label as the predicted one
label = np.argmax(fmodel.predictions(image))
print(bcolors.BOLD + 'Process start' + bcolors.ENDC)
adversary = metric(image, label, verify=True, unpack=False) # set 'unpack' as false so we can access the detailed info of adversary
print(bcolors.BOLD + 'Process finished' + bcolors.ENDC)
if adversary.image is None:
print(bcolors.WARNING + 'Warning: Cannot find an adversary!' + bcolors.ENDC)
exit(-1)
################### print summary info #####################################
keywords = ['PyTorch', 'ResNet18', 'Misclassification', 'Brightness']
true_label = np.argmax(fmodel.predictions(image))
fake_label = np.argmax(fmodel.predictions(adversary.image))
# interpret the label as human language
with open('perceptron/utils/labels.txt') as info:
imagenet_dict = eval(info.read())
print(bcolors.HEADER + bcolors.UNDERLINE + 'Summary:' + bcolors.ENDC)
print('Configuration:' + bcolors.CYAN + ' --framework %s '
'--model %s --criterion %s '
'--metric %s' % tuple(keywords) + bcolors.ENDC)
print('The predicted label of original image is '
+ bcolors.GREEN + imagenet_dict[true_label] + bcolors.ENDC)
print('The predicted label of adversary image is '
+ bcolors.RED + imagenet_dict[fake_label] + bcolors.ENDC)
print('Minimum perturbation required: %s' % bcolors.BLUE
+ str(adversary.distance) + bcolors.ENDC)
print('Verifiable bound: %s' % bcolors.BLUE
+ str(adversary.verifiable_bounds) + bcolors.ENDC)
print('\n')
plot_image(adversary,
title=', '.join(keywords),
figname='examples/images/%s.png' % '_'.join(keywords))
| 39.479452 | 132 | 0.705066 |
73c8b17b7874def49d81bd4d016d060fb222ae09 | 121,817 | py | Python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_app_service_plans_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_app_service_plans_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_app_service_plans_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
*,
detailed: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/serverfarms')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if detailed is not None:
query_parameters['detailed'] = _SERIALIZER.query("detailed", detailed, 'bool')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_capabilities_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/capabilities')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_hybrid_connection_request(
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"namespaceName": _SERIALIZER.url("namespace_name", namespace_name, 'str'),
"relayName": _SERIALIZER.url("relay_name", relay_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_hybrid_connection_request(
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"namespaceName": _SERIALIZER.url("namespace_name", namespace_name, 'str'),
"relayName": _SERIALIZER.url("relay_name", relay_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_hybrid_connection_keys_request(
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}/listKeys')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"namespaceName": _SERIALIZER.url("namespace_name", namespace_name, 'str'),
"relayName": _SERIALIZER.url("relay_name", relay_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_web_apps_by_hybrid_connection_request(
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}/sites')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"namespaceName": _SERIALIZER.url("namespace_name", namespace_name, 'str'),
"relayName": _SERIALIZER.url("relay_name", relay_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_hybrid_connection_plan_limit_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionPlanLimits/limit')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_hybrid_connections_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionRelays')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_restart_web_apps_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
soft_restart: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/restartSites')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if soft_restart is not None:
query_parameters['softRestart'] = _SERIALIZER.query("soft_restart", soft_restart, 'bool')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_web_apps_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
skip_token: Optional[str] = None,
filter: Optional[str] = None,
top: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/sites')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if skip_token is not None:
query_parameters['$skipToken'] = _SERIALIZER.query("skip_token", skip_token, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_server_farm_skus_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/skus')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_usages_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/usages')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_vnets_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_vnet_from_server_farm_request(
resource_group_name: str,
name: str,
vnet_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_vnet_gateway_request(
resource_group_name: str,
name: str,
vnet_name: str,
gateway_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"gatewayName": _SERIALIZER.url("gateway_name", gateway_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_vnet_gateway_request(
resource_group_name: str,
name: str,
vnet_name: str,
gateway_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"gatewayName": _SERIALIZER.url("gateway_name", gateway_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_routes_for_vnet_request(
resource_group_name: str,
name: str,
vnet_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_route_for_vnet_request(
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"routeName": _SERIALIZER.url("route_name", route_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_vnet_route_request(
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"routeName": _SERIALIZER.url("route_name", route_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_vnet_route_request(
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"routeName": _SERIALIZER.url("route_name", route_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_vnet_route_request(
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"vnetName": _SERIALIZER.url("vnet_name", vnet_name, 'str'),
"routeName": _SERIALIZER.url("route_name", route_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_reboot_worker_request(
resource_group_name: str,
name: str,
worker_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/workers/{workerName}/reboot')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerName": _SERIALIZER.url("worker_name", worker_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class AppServicePlansOperations(object):
"""AppServicePlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
detailed: Optional[bool] = None,
**kwargs: Any
) -> Iterable["_models.AppServicePlanCollection"]:
"""Get all App Service plans for a subscription.
Description for Get all App Service plans for a subscription.
:param detailed: Specify :code:`<code>true</code>` to return all App Service plan properties.
The default is :code:`<code>false</code>`, which returns a subset of the properties.
Retrieval of all properties may increase the API latency.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServicePlanCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.AppServicePlanCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlanCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
detailed=detailed,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
detailed=detailed,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServicePlanCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/serverfarms'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.AppServicePlanCollection"]:
"""Get all App Service plans in a resource group.
Description for Get all App Service plans in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServicePlanCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.AppServicePlanCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlanCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServicePlanCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Optional["_models.AppServicePlan"]:
"""Get an App Service plan.
Description for Get an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServicePlan, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.AppServicePlan or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AppServicePlan"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServicePlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
name: str,
app_service_plan: "_models.AppServicePlan",
**kwargs: Any
) -> "_models.AppServicePlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(app_service_plan, 'AppServicePlan')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServicePlan', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppServicePlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
name: str,
app_service_plan: "_models.AppServicePlan",
**kwargs: Any
) -> LROPoller["_models.AppServicePlan"]:
"""Creates or updates an App Service Plan.
Description for Creates or updates an App Service Plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param app_service_plan: Details of the App Service plan.
:type app_service_plan: ~azure.mgmt.web.v2020_09_01.models.AppServicePlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AppServicePlan or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2020_09_01.models.AppServicePlan]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
name=name,
app_service_plan=app_service_plan,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AppServicePlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete an App Service plan.
Description for Delete an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
name: str,
app_service_plan: "_models.AppServicePlanPatchResource",
**kwargs: Any
) -> "_models.AppServicePlan":
"""Creates or updates an App Service Plan.
Description for Creates or updates an App Service Plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param app_service_plan: Details of the App Service plan.
:type app_service_plan: ~azure.mgmt.web.v2020_09_01.models.AppServicePlanPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServicePlan, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.AppServicePlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(app_service_plan, 'AppServicePlanPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServicePlan', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppServicePlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'} # type: ignore
@distributed_trace
def list_capabilities(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.Capability"]:
"""List all capabilities of an App Service plan.
Description for List all capabilities of an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Capability, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2020_09_01.models.Capability]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.Capability"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_capabilities_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.list_capabilities.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[Capability]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_capabilities.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/capabilities'} # type: ignore
@distributed_trace
def get_hybrid_connection(
self,
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
**kwargs: Any
) -> "_models.HybridConnection":
"""Retrieve a Hybrid Connection in use in an App Service plan.
Description for Retrieve a Hybrid Connection in use in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param namespace_name: Name of the Service Bus namespace.
:type namespace_name: str
:param relay_name: Name of the Service Bus relay.
:type relay_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HybridConnection, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.HybridConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HybridConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_hybrid_connection_request(
resource_group_name=resource_group_name,
name=name,
namespace_name=namespace_name,
relay_name=relay_name,
subscription_id=self._config.subscription_id,
template_url=self.get_hybrid_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HybridConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_hybrid_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}'} # type: ignore
@distributed_trace
def delete_hybrid_connection(
self,
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
**kwargs: Any
) -> None:
"""Delete a Hybrid Connection in use in an App Service plan.
Description for Delete a Hybrid Connection in use in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param namespace_name: Name of the Service Bus namespace.
:type namespace_name: str
:param relay_name: Name of the Service Bus relay.
:type relay_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_hybrid_connection_request(
resource_group_name=resource_group_name,
name=name,
namespace_name=namespace_name,
relay_name=relay_name,
subscription_id=self._config.subscription_id,
template_url=self.delete_hybrid_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_hybrid_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}'} # type: ignore
@distributed_trace
def list_hybrid_connection_keys(
self,
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
**kwargs: Any
) -> "_models.HybridConnectionKey":
"""Get the send key name and value of a Hybrid Connection.
Description for Get the send key name and value of a Hybrid Connection.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param namespace_name: The name of the Service Bus namespace.
:type namespace_name: str
:param relay_name: The name of the Service Bus relay.
:type relay_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HybridConnectionKey, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.HybridConnectionKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HybridConnectionKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_hybrid_connection_keys_request(
resource_group_name=resource_group_name,
name=name,
namespace_name=namespace_name,
relay_name=relay_name,
subscription_id=self._config.subscription_id,
template_url=self.list_hybrid_connection_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HybridConnectionKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_hybrid_connection_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}/listKeys'} # type: ignore
@distributed_trace
def list_web_apps_by_hybrid_connection(
self,
resource_group_name: str,
name: str,
namespace_name: str,
relay_name: str,
**kwargs: Any
) -> Iterable["_models.ResourceCollection"]:
"""Get all apps that use a Hybrid Connection in an App Service Plan.
Description for Get all apps that use a Hybrid Connection in an App Service Plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param namespace_name: Name of the Hybrid Connection namespace.
:type namespace_name: str
:param relay_name: Name of the Hybrid Connection relay.
:type relay_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.ResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_web_apps_by_hybrid_connection_request(
resource_group_name=resource_group_name,
name=name,
namespace_name=namespace_name,
relay_name=relay_name,
subscription_id=self._config.subscription_id,
template_url=self.list_web_apps_by_hybrid_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_web_apps_by_hybrid_connection_request(
resource_group_name=resource_group_name,
name=name,
namespace_name=namespace_name,
relay_name=relay_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_web_apps_by_hybrid_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionNamespaces/{namespaceName}/relays/{relayName}/sites'} # type: ignore
@distributed_trace
def get_hybrid_connection_plan_limit(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.HybridConnectionLimits":
"""Get the maximum number of Hybrid Connections allowed in an App Service plan.
Description for Get the maximum number of Hybrid Connections allowed in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HybridConnectionLimits, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.HybridConnectionLimits
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HybridConnectionLimits"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_hybrid_connection_plan_limit_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_hybrid_connection_plan_limit.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HybridConnectionLimits', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_hybrid_connection_plan_limit.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionPlanLimits/limit'} # type: ignore
@distributed_trace
def list_hybrid_connections(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable["_models.HybridConnectionCollection"]:
"""Retrieve all Hybrid Connections in use in an App Service plan.
Description for Retrieve all Hybrid Connections in use in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either HybridConnectionCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.HybridConnectionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HybridConnectionCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_hybrid_connections_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.list_hybrid_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_hybrid_connections_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("HybridConnectionCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_hybrid_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/hybridConnectionRelays'} # type: ignore
@distributed_trace
def restart_web_apps(
self,
resource_group_name: str,
name: str,
soft_restart: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Restart all apps in an App Service plan.
Description for Restart all apps in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param soft_restart: Specify :code:`<code>true</code>` to perform a soft restart, applies the
configuration settings and restarts the apps if necessary. The default is
:code:`<code>false</code>`, which always restarts and reprovisions the apps.
:type soft_restart: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_web_apps_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
soft_restart=soft_restart,
template_url=self.restart_web_apps.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
restart_web_apps.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/restartSites'} # type: ignore
@distributed_trace
def list_web_apps(
self,
resource_group_name: str,
name: str,
skip_token: Optional[str] = None,
filter: Optional[str] = None,
top: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.WebAppCollection"]:
"""Get all apps associated with an App Service plan.
Description for Get all apps associated with an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param skip_token: Skip to a web app in the list of webapps associated with app service plan.
If specified, the resulting list will contain web apps starting from (including) the skipToken.
Otherwise, the resulting list contains web apps from the start of the list.
:type skip_token: str
:param filter: Supported filter: $filter=state eq running. Returns only web apps that are
currently running.
:type filter: str
:param top: List page size. If specified, results are paged.
:type top: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebAppCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.WebAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_web_apps_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
skip_token=skip_token,
filter=filter,
top=top,
template_url=self.list_web_apps.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_web_apps_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
skip_token=skip_token,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_web_apps.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/sites'} # type: ignore
@distributed_trace
def get_server_farm_skus(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Any:
"""Gets all selectable SKUs for a given App Service Plan.
Description for Gets all selectable SKUs for a given App Service Plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of App Service Plan.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_server_farm_skus_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_server_farm_skus.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_server_farm_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/skus'} # type: ignore
@distributed_trace
def list_usages(
self,
resource_group_name: str,
name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.CsmUsageQuotaCollection"]:
"""Gets server farm usage information.
Description for Gets server farm usage information.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of App Service Plan.
:type name: str
:param filter: Return only usages/metrics specified in the filter. Filter conforms to odata
syntax. Example: $filter=(name.value eq 'Metric1' or name.value eq 'Metric2').
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmUsageQuotaCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.CsmUsageQuotaCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmUsageQuotaCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_usages_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_usages.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_usages_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CsmUsageQuotaCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_usages.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/usages'} # type: ignore
@distributed_trace
def list_vnets(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.VnetInfo"]:
"""Get all Virtual Networks associated with an App Service plan.
Description for Get all Virtual Networks associated with an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VnetInfo, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2020_09_01.models.VnetInfo]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VnetInfo"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_vnets_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.list_vnets.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VnetInfo]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_vnets.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections'} # type: ignore
@distributed_trace
def get_vnet_from_server_farm(
self,
resource_group_name: str,
name: str,
vnet_name: str,
**kwargs: Any
) -> Optional["_models.VnetInfo"]:
"""Get a Virtual Network associated with an App Service plan.
Description for Get a Virtual Network associated with an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VnetInfo, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.VnetInfo or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VnetInfo"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_vnet_from_server_farm_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
subscription_id=self._config.subscription_id,
template_url=self.get_vnet_from_server_farm.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_vnet_from_server_farm.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}'} # type: ignore
@distributed_trace
def get_vnet_gateway(
self,
resource_group_name: str,
name: str,
vnet_name: str,
gateway_name: str,
**kwargs: Any
) -> "_models.VnetGateway":
"""Get a Virtual Network gateway.
Description for Get a Virtual Network gateway.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:param gateway_name: Name of the gateway. Only the 'primary' gateway is supported.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VnetGateway, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.VnetGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VnetGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_vnet_gateway_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
gateway_name=gateway_name,
subscription_id=self._config.subscription_id,
template_url=self.get_vnet_gateway.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VnetGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_vnet_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}'} # type: ignore
@distributed_trace
def update_vnet_gateway(
self,
resource_group_name: str,
name: str,
vnet_name: str,
gateway_name: str,
connection_envelope: "_models.VnetGateway",
**kwargs: Any
) -> "_models.VnetGateway":
"""Update a Virtual Network gateway.
Description for Update a Virtual Network gateway.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:param gateway_name: Name of the gateway. Only the 'primary' gateway is supported.
:type gateway_name: str
:param connection_envelope: Definition of the gateway.
:type connection_envelope: ~azure.mgmt.web.v2020_09_01.models.VnetGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VnetGateway, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.VnetGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VnetGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(connection_envelope, 'VnetGateway')
request = build_update_vnet_gateway_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
gateway_name=gateway_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_vnet_gateway.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VnetGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_vnet_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}'} # type: ignore
@distributed_trace
def list_routes_for_vnet(
self,
resource_group_name: str,
name: str,
vnet_name: str,
**kwargs: Any
) -> List["_models.VnetRoute"]:
"""Get all routes that are associated with a Virtual Network in an App Service plan.
Description for Get all routes that are associated with a Virtual Network in an App Service
plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VnetRoute, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2020_09_01.models.VnetRoute]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VnetRoute"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_routes_for_vnet_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
subscription_id=self._config.subscription_id,
template_url=self.list_routes_for_vnet.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VnetRoute]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_routes_for_vnet.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes'} # type: ignore
@distributed_trace
def get_route_for_vnet(
self,
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
**kwargs: Any
) -> Optional[List["_models.VnetRoute"]]:
"""Get a Virtual Network route in an App Service plan.
Description for Get a Virtual Network route in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:param route_name: Name of the Virtual Network route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VnetRoute, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2020_09_01.models.VnetRoute] or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.VnetRoute"]]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_route_for_vnet_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
route_name=route_name,
subscription_id=self._config.subscription_id,
template_url=self.get_route_for_vnet.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VnetRoute]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_route_for_vnet.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'} # type: ignore
@distributed_trace
def create_or_update_vnet_route(
self,
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
route: "_models.VnetRoute",
**kwargs: Any
) -> Optional["_models.VnetRoute"]:
"""Create or update a Virtual Network route in an App Service plan.
Description for Create or update a Virtual Network route in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:param route_name: Name of the Virtual Network route.
:type route_name: str
:param route: Definition of the Virtual Network route.
:type route: ~azure.mgmt.web.v2020_09_01.models.VnetRoute
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VnetRoute, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.VnetRoute or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VnetRoute"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(route, 'VnetRoute')
request = build_create_or_update_vnet_route_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
route_name=route_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_vnet_route.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 400, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetRoute', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_vnet_route.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'} # type: ignore
@distributed_trace
def delete_vnet_route(
self,
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
**kwargs: Any
) -> None:
"""Delete a Virtual Network route in an App Service plan.
Description for Delete a Virtual Network route in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:param route_name: Name of the Virtual Network route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_vnet_route_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
route_name=route_name,
subscription_id=self._config.subscription_id,
template_url=self.delete_vnet_route.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_vnet_route.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'} # type: ignore
@distributed_trace
def update_vnet_route(
self,
resource_group_name: str,
name: str,
vnet_name: str,
route_name: str,
route: "_models.VnetRoute",
**kwargs: Any
) -> Optional["_models.VnetRoute"]:
"""Create or update a Virtual Network route in an App Service plan.
Description for Create or update a Virtual Network route in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param vnet_name: Name of the Virtual Network.
:type vnet_name: str
:param route_name: Name of the Virtual Network route.
:type route_name: str
:param route: Definition of the Virtual Network route.
:type route: ~azure.mgmt.web.v2020_09_01.models.VnetRoute
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VnetRoute, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.VnetRoute or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VnetRoute"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(route, 'VnetRoute')
request = build_update_vnet_route_request(
resource_group_name=resource_group_name,
name=name,
vnet_name=vnet_name,
route_name=route_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_vnet_route.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 400, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetRoute', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_vnet_route.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'} # type: ignore
@distributed_trace
def reboot_worker(
self,
resource_group_name: str,
name: str,
worker_name: str,
**kwargs: Any
) -> None:
"""Reboot a worker machine in an App Service plan.
Description for Reboot a worker machine in an App Service plan.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service plan.
:type name: str
:param worker_name: Name of worker machine, which typically starts with RD.
:type worker_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reboot_worker_request(
resource_group_name=resource_group_name,
name=name,
worker_name=worker_name,
subscription_id=self._config.subscription_id,
template_url=self.reboot_worker.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
reboot_worker.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/workers/{workerName}/reboot'} # type: ignore
| 41.8184 | 254 | 0.665441 |
73c8d3d774359132f95f20250f913fed36a1af64 | 636 | py | Python | Python3/Books/Douson/chapter03/losing_battle-bad.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter03/losing_battle-bad.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter03/losing_battle-bad.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | # Losing Battle
# Demonstrates the dreaded infinite loop
print("Your lone hero is surrounded by a massive army of trolls.")
print("Their decaying green bodies stretch out, melting into the horizon.")
print("Your hero unsheathes his sword for the last fight of his life.\n")
health = 10
trolls = 0
damage = 3
while health != 0:
trolls += 1
health -= damage
print("Your hero swings and defeats an evil troll, " \
"but takes", damage, "damage points.\n")
print("Your hero fought valiantly and defeated", trolls, "trolls.")
print("But alas, your hero is no more.")
input("\n\nPress the enter key to exit.")
| 26.5 | 75 | 0.693396 |
73c8d863116c864c6222063260e28596691d886a | 23,324 | py | Python | tests/h/views/api/groups_test.py | Manuelinux/kubeh | a549f0d1c09619843290f9b78bce7668ed90853a | [
"BSD-2-Clause"
] | null | null | null | tests/h/views/api/groups_test.py | Manuelinux/kubeh | a549f0d1c09619843290f9b78bce7668ed90853a | [
"BSD-2-Clause"
] | 4 | 2020-03-24T17:38:24.000Z | 2022-03-02T05:45:01.000Z | tests/h/views/api/groups_test.py | Manuelinux/kubeh | a549f0d1c09619843290f9b78bce7668ed90853a | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import mock
import pytest
from pyramid.httpexceptions import (
HTTPNoContent,
HTTPBadRequest,
HTTPNotFound,
HTTPConflict,
)
from h.views.api import groups as views
from h.services.group_list import GroupListService
from h.services.group import GroupService
from h.services.group_create import GroupCreateService
from h.services.group_update import GroupUpdateService
from h.services.group_members import GroupMembersService
from h.services.user import UserService
from h.services.group_links import GroupLinksService
from h import traversal
pytestmark = pytest.mark.usefixtures("GroupsJSONPresenter")
@pytest.mark.usefixtures("group_list_service", "group_links_service")
class TestGetGroups:
def test_proxies_to_list_service(self, anonymous_request, group_list_service):
views.groups(anonymous_request)
group_list_service.request_groups.assert_called_once_with(
user=None, authority=None, document_uri=None
)
def test_proxies_request_params(self, anonymous_request, group_list_service):
anonymous_request.params["document_uri"] = "http://example.com/thisthing.html"
anonymous_request.params["authority"] = "foo.com"
views.groups(anonymous_request)
group_list_service.request_groups.assert_called_once_with(
user=None,
authority="foo.com",
document_uri="http://example.com/thisthing.html",
)
def test_converts_groups_to_resources(
self, GroupContext, anonymous_request, open_groups, group_list_service
):
group_list_service.request_groups.return_value = open_groups
views.groups(anonymous_request)
GroupContext.assert_has_calls(
[
mock.call(open_groups[0], anonymous_request),
mock.call(open_groups[1], anonymous_request),
]
)
def test_uses_presenter_for_formatting(
self,
group_links_service,
open_groups,
group_list_service,
GroupsJSONPresenter,
anonymous_request,
):
group_list_service.request_groups.return_value = open_groups
views.groups(anonymous_request)
GroupsJSONPresenter.assert_called_once()
def test_returns_dicts_from_presenter(
self, anonymous_request, open_groups, group_list_service, GroupsJSONPresenter
):
group_list_service.request_groups.return_value = open_groups
result = views.groups(anonymous_request)
assert result == GroupsJSONPresenter(open_groups).asdicts.return_value
def test_proxies_expand_to_presenter(
self, anonymous_request, open_groups, group_list_service, GroupsJSONPresenter
):
anonymous_request.params["expand"] = "organization"
group_list_service.request_groups.return_value = open_groups
views.groups(anonymous_request)
GroupsJSONPresenter(open_groups).asdicts.assert_called_once_with(
expand=["organization"]
)
def test_passes_multiple_expand_to_presenter(
self, anonymous_request, open_groups, group_list_service, GroupsJSONPresenter
):
anonymous_request.GET.add("expand", "organization")
anonymous_request.GET.add("expand", "foobars")
group_list_service.request_groups.return_value = open_groups
views.groups(anonymous_request)
GroupsJSONPresenter(open_groups).asdicts.assert_called_once_with(
expand=["organization", "foobars"]
)
@pytest.fixture
def open_groups(self, factories):
return [factories.OpenGroup(), factories.OpenGroup()]
@pytest.fixture
def authenticated_request(self, pyramid_request, factories):
pyramid_request.user = factories.User()
return pyramid_request
@pytest.mark.usefixtures(
"CreateGroupAPISchema",
"group_service",
"group_create_service",
"GroupContext",
"GroupJSONPresenter",
)
class TestCreateGroup:
def test_it_inits_group_create_schema(self, pyramid_request, CreateGroupAPISchema):
views.create(pyramid_request)
CreateGroupAPISchema.return_value.validate.assert_called_once_with({})
# @TODO Move this test once _json_payload() has been moved to a reusable util module
def test_it_raises_if_json_parsing_fails(self, pyramid_request):
"""It raises PayloadError if parsing of the request body fails."""
# Make accessing the request.json_body property raise ValueError.
type(pyramid_request).json_body = {}
with mock.patch.object(
type(pyramid_request), "json_body", new_callable=mock.PropertyMock
) as json_body:
json_body.side_effect = ValueError()
with pytest.raises(views.PayloadError):
views.create(pyramid_request)
def test_it_passes_request_params_to_group_create_service(
self, pyramid_request, CreateGroupAPISchema, group_create_service
):
CreateGroupAPISchema.return_value.validate.return_value = {
"name": "My Group",
"description": "How about that?",
}
views.create(pyramid_request)
group_create_service.create_private_group.assert_called_once_with(
"My Group",
pyramid_request.user.userid,
description="How about that?",
groupid=None,
)
def test_it_passes_groupid_to_group_create_as_authority_provided_id(
self, pyramid_request, CreateGroupAPISchema, group_create_service
):
# Note that CreateGroupAPISchema and its methods are mocked here, so
# ``groupid`` passes validation even though the request is not third party
# Tests for that are handled directly in the CreateGroupAPISchema unit tests
# and through functional tests
CreateGroupAPISchema.return_value.validate.return_value = {
"name": "My Group",
"description": "How about that?",
"groupid": "group:something@example.com",
}
views.create(pyramid_request)
group_create_service.create_private_group.assert_called_once_with(
"My Group",
pyramid_request.user.userid,
description="How about that?",
groupid="group:something@example.com",
)
def test_it_sets_description_to_none_if_not_present(
self, pyramid_request, CreateGroupAPISchema, group_create_service
):
CreateGroupAPISchema.return_value.validate.return_value = {"name": "My Group"}
views.create(pyramid_request)
group_create_service.create_private_group.assert_called_once_with(
"My Group", pyramid_request.user.userid, description=None, groupid=None
)
def test_it_raises_HTTPConflict_on_duplicate(
self, pyramid_request, CreateGroupAPISchema, group_service, factories
):
group = factories.Group(
authority_provided_id="something", authority="example.com"
)
group_service.fetch.return_value = group
with pytest.raises(HTTPConflict, match="group with groupid.*already exists"):
views.create(pyramid_request)
def test_it_creates_group_context_from_created_group(
self, pyramid_request, GroupContext, group_create_service
):
my_group = mock.Mock()
group_create_service.create_private_group.return_value = my_group
views.create(pyramid_request)
GroupContext.assert_called_with(my_group, pyramid_request)
def test_it_returns_new_group_formatted_with_presenter(
self, pyramid_request, GroupContext, GroupJSONPresenter
):
views.create(pyramid_request)
GroupJSONPresenter.assert_called_once_with(GroupContext.return_value)
GroupJSONPresenter.return_value.asdict.assert_called_once_with(
expand=["organization", "scopes"]
)
@pytest.fixture
def pyramid_request(self, pyramid_request, factories):
# Add a nominal json_body so that _json_payload() parsing of
# it doesn't raise
pyramid_request.json_body = {}
pyramid_request.user = factories.User()
return pyramid_request
@pytest.mark.usefixtures("GroupJSONPresenter", "GroupContext")
class TestReadGroup:
def test_it_creates_group_context_from_group_model(
self, GroupContext, factories, pyramid_request
):
group = factories.Group()
views.read(group, pyramid_request)
GroupContext.assert_called_once_with(group, pyramid_request)
def test_it_forwards_expand_param_to_presenter(
self, GroupJSONPresenter, factories, pyramid_request
):
pyramid_request.params["expand"] = "organization"
group = factories.Group()
views.read(group, pyramid_request)
GroupJSONPresenter.return_value.asdict.assert_called_once_with(["organization"])
def test_it_returns_presented_group(
self, GroupJSONPresenter, factories, pyramid_request
):
pyramid_request.params["expand"] = "organization"
group = factories.Group()
presented = views.read(group, pyramid_request)
assert presented == GroupJSONPresenter.return_value.asdict.return_value
@pytest.mark.usefixtures(
"UpdateGroupAPISchema",
"group_service",
"group_update_service",
"GroupContext",
"GroupJSONPresenter",
)
class TestUpdateGroup:
def test_it_inits_group_update_schema(
self, pyramid_request, group, UpdateGroupAPISchema
):
views.update(group, pyramid_request)
UpdateGroupAPISchema.return_value.validate.assert_called_once_with({})
def test_it_passes_request_params_to_group_update_service(
self, group, pyramid_request, UpdateGroupAPISchema, group_update_service
):
patch_payload = {"name": "My Group", "description": "How about that?"}
UpdateGroupAPISchema.return_value.validate.return_value = patch_payload
views.update(group, pyramid_request)
group_update_service.update.assert_called_once_with(group, **patch_payload)
def test_it_raises_HTTPConflict_on_duplicate(
self, pyramid_request, UpdateGroupAPISchema, group_service, factories
):
pre_existing_group = factories.Group(
authority_provided_id="something", authority="example.com"
)
group = factories.Group(
authority_provided_id="something_else", authority="example.com"
)
group_service.fetch.return_value = pre_existing_group
with pytest.raises(HTTPConflict, match="group with groupid.*already exists"):
views.update(group, pyramid_request)
def test_it_does_not_raise_HTTPConflict_if_duplicate_is_same_group(
self, pyramid_request, UpdateGroupAPISchema, group_service, factories
):
group = factories.Group(
authority_provided_id="something_else", authority="example.com"
)
group_service.fetch.return_value = group
views.update(group, pyramid_request)
def test_it_creates_group_context_from_updated_group(
self, pyramid_request, GroupContext, group_update_service
):
my_group = mock.Mock()
group_update_service.update.return_value = my_group
views.update(my_group, pyramid_request)
GroupContext.assert_called_with(my_group, pyramid_request)
def test_it_returns_updated_group_formatted_with_presenter(
self, pyramid_request, GroupContext, GroupJSONPresenter, group
):
views.update(group, pyramid_request)
GroupJSONPresenter.assert_called_once_with(GroupContext.return_value)
GroupJSONPresenter.return_value.asdict.assert_called_once_with(
expand=["organization", "scopes"]
)
@pytest.fixture
def pyramid_request(self, pyramid_request, factories):
# Add a nominal json_body so that _json_payload() parsing of
# it doesn't raise
pyramid_request.json_body = {}
pyramid_request.user = factories.User()
return pyramid_request
@pytest.fixture
def group(self, factories):
return factories.Group(authority="example.com")
@pytest.mark.usefixtures(
"create",
"CreateGroupAPISchema",
"group_service",
"group_update_service",
"GroupContext",
"GroupJSONPresenter",
)
class TestUpsertGroup:
def test_it_proxies_to_create_if_group_empty(
self, pyramid_request, create, GroupUpsertContext
):
context = GroupUpsertContext(None, pyramid_request)
res = views.upsert(context, pyramid_request)
create.assert_called_once_with(pyramid_request)
assert res == create.return_value
def test_it_does_not_proxy_to_create_if_group_extant(
self, pyramid_request, create, group, GroupUpsertContext
):
context = GroupUpsertContext(group, pyramid_request)
views.upsert(context, pyramid_request)
assert create.call_count == 0
def test_it_validates_against_group_update_schema_if_group_extant(
self, pyramid_request, group, GroupUpsertContext, CreateGroupAPISchema
):
context = GroupUpsertContext(group, pyramid_request)
pyramid_request.json_body = {"name": "Rename Group"}
views.upsert(context, pyramid_request)
CreateGroupAPISchema.return_value.validate.assert_called_once_with(
{"name": "Rename Group"}
)
def test_it_raises_HTTPConflict_on_duplicate(
self, pyramid_request, group_service, factories, GroupUpsertContext
):
pre_existing_group = factories.Group(
authority_provided_id="something", authority="example.com"
)
group = factories.Group(
authority_provided_id="something_else", authority="example.com"
)
context = GroupUpsertContext(group, pyramid_request)
group_service.fetch.return_value = pre_existing_group
with pytest.raises(HTTPConflict, match="group with groupid.*already exists"):
views.upsert(context, pyramid_request)
def test_it_does_not_raise_HTTPConflict_if_duplicate_is_same_group(
self, pyramid_request, group_service, factories, GroupUpsertContext
):
group = factories.Group(
authority_provided_id="something_else", authority="example.com"
)
context = GroupUpsertContext(group, pyramid_request)
group_service.fetch.return_value = group
views.upsert(context, pyramid_request)
def test_it_proxies_to_update_service_with_injected_defaults(
self,
pyramid_request,
group_update_service,
CreateGroupAPISchema,
GroupUpsertContext,
group,
):
context = GroupUpsertContext(group, pyramid_request)
CreateGroupAPISchema.return_value.validate.return_value = {"name": "Dingdong"}
views.upsert(context, pyramid_request)
group_update_service.update.assert_called_once_with(
group, **{"name": "Dingdong", "description": "", "groupid": None}
)
def test_it_creates_group_context_from_updated_group(
self,
pyramid_request,
GroupContext,
group_update_service,
group,
GroupUpsertContext,
):
context = GroupUpsertContext(group, pyramid_request)
group_update_service.update.return_value = group
views.upsert(context, pyramid_request)
GroupContext.assert_called_with(group, pyramid_request)
def test_it_returns_updated_group_formatted_with_presenter(
self,
pyramid_request,
GroupContext,
GroupJSONPresenter,
group,
GroupUpsertContext,
):
context = GroupUpsertContext(group, pyramid_request)
views.upsert(context, pyramid_request)
GroupJSONPresenter.assert_called_once_with(GroupContext.return_value)
GroupJSONPresenter.return_value.asdict.assert_called_once_with(
expand=["organization", "scopes"]
)
@pytest.fixture
def create(self, patch):
return patch("h.views.api.groups.create")
@pytest.fixture
def group_user(self, factories):
return factories.User()
@pytest.fixture
def group(self, factories, group_user):
return factories.Group(creator=group_user)
@pytest.fixture
def pyramid_request(self, pyramid_request):
# Add a nominal json_body so that _json_payload() parsing of
# it doesn't raise
pyramid_request.json_body = {}
return pyramid_request
@pytest.fixture
def GroupUpsertContext(self):
def context_factory(group, request):
return mock.create_autospec(
traversal.GroupUpsertContext, instance=True, group=group
)
return context_factory
class TestReadMembers:
def test_it_returns_formatted_users_from_group(
self, factories, pyramid_request, UserJSONPresenter
):
group = factories.Group.build()
group.members = [
factories.User.build(),
factories.User.build(),
factories.User.build(),
]
views.read_members(group, pyramid_request)
assert UserJSONPresenter.call_count == len(group.members)
@pytest.mark.usefixtures("group_members_service", "user_service")
class TestAddMember:
def test_it_adds_user_from_request_params_to_group(
self, group, user, pyramid_request, group_members_service
):
views.add_member(group, pyramid_request)
group_members_service.member_join.assert_called_once_with(group, user.userid)
def test_it_returns_HTTPNoContent_when_add_member_is_successful(
self, group, pyramid_request
):
resp = views.add_member(group, pyramid_request)
assert isinstance(resp, HTTPNoContent)
def test_it_raises_HTTPNotFound_with_mismatched_user_and_group_authorities(
self, factories, pyramid_request
):
group = factories.Group(authority="different_authority.com")
with pytest.raises(HTTPNotFound):
views.add_member(group, pyramid_request)
def test_it_raises_HTTPNotFound_with_non_existent_user(
self, group, pyramid_request, user_service
):
user_service.fetch.return_value = None
pyramid_request.matchdict["userid"] = "some_user"
with pytest.raises(HTTPNotFound):
views.add_member(group, pyramid_request)
def test_it_raises_HTTPNotFound_if_userid_malformed(
self, group, pyramid_request, user_service
):
user_service.fetch.side_effect = ValueError("nope")
pyramid_request.matchdict["userid"] = "invalidformat@wherever"
with pytest.raises(HTTPNotFound): # view handles ValueError and raises NotFound
views.add_member(group, pyramid_request)
def test_it_fetches_user_from_the_request_params(
self, group, user, pyramid_request, user_service
):
views.add_member(group, pyramid_request)
user_service.fetch.assert_called_once_with(user.userid)
@pytest.fixture
def group(self, factories):
return factories.Group(authority="example.com")
@pytest.fixture
def user(self, factories):
return factories.User(authority="example.com")
@pytest.fixture
def pyramid_request(self, pyramid_request, group, user):
pyramid_request.matchdict["userid"] = user.userid
pyramid_request.matchdict["pubid"] = group.pubid
return pyramid_request
@pytest.fixture
def user_service(self, pyramid_config, user):
service = mock.create_autospec(UserService, spec_set=True, instance=True)
service.fetch.return_value = user
pyramid_config.register_service(service, name="user")
return service
@pytest.mark.usefixtures("authenticated_userid", "group_members_service")
class TestRemoveMember:
def test_it_removes_current_user(
self, shorthand_request, authenticated_userid, group_members_service
):
group = mock.sentinel.group
views.remove_member(group, shorthand_request)
group_members_service.member_leave.assert_called_once_with(
group, authenticated_userid
)
def test_it_returns_no_content(self, shorthand_request):
group = mock.sentinel.group
response = views.remove_member(group, shorthand_request)
assert isinstance(response, HTTPNoContent)
def test_it_fails_with_username(self, username_request):
group = mock.sentinel.group
with pytest.raises(HTTPBadRequest):
views.remove_member(group, username_request)
@pytest.fixture
def shorthand_request(self, pyramid_request):
pyramid_request.matchdict["userid"] = "me"
return pyramid_request
@pytest.fixture
def username_request(self, pyramid_request):
pyramid_request.matchdict["userid"] = "bob"
return pyramid_request
@pytest.fixture
def authenticated_userid(self, pyramid_config):
userid = "acct:bob@example.org"
pyramid_config.testing_securitypolicy(userid)
return userid
@pytest.fixture
def anonymous_request(pyramid_request):
pyramid_request.user = None
return pyramid_request
@pytest.fixture
def GroupJSONPresenter(patch):
return patch("h.views.api.groups.GroupJSONPresenter")
@pytest.fixture
def GroupsJSONPresenter(patch):
return patch("h.views.api.groups.GroupsJSONPresenter")
@pytest.fixture
def UserJSONPresenter(patch):
return patch("h.views.api.groups.UserJSONPresenter")
@pytest.fixture
def GroupContext(patch):
return patch("h.views.api.groups.GroupContext")
@pytest.fixture
def CreateGroupAPISchema(patch):
return patch("h.views.api.groups.CreateGroupAPISchema")
@pytest.fixture
def UpdateGroupAPISchema(patch):
return patch("h.views.api.groups.UpdateGroupAPISchema")
@pytest.fixture
def group_create_service(pyramid_config):
service = mock.create_autospec(GroupCreateService, spec_set=True, instance=True)
pyramid_config.register_service(service, name="group_create")
return service
@pytest.fixture
def group_update_service(pyramid_config):
service = mock.create_autospec(GroupUpdateService, spec_set=True, instance=True)
pyramid_config.register_service(service, name="group_update")
return service
@pytest.fixture
def group_service(pyramid_config):
service = mock.create_autospec(GroupService, spec_set=True, instance=True)
service.fetch.return_value = None
pyramid_config.register_service(service, name="group")
return service
@pytest.fixture
def group_members_service(pyramid_config):
service = mock.create_autospec(GroupMembersService, spec_set=True, instance=True)
pyramid_config.register_service(service, name="group_members")
return service
@pytest.fixture
def group_links_service(pyramid_config):
svc = mock.create_autospec(GroupLinksService, spec_set=True, instance=True)
pyramid_config.register_service(svc, name="group_links")
return svc
@pytest.fixture
def group_list_service(pyramid_config):
svc = mock.create_autospec(GroupListService, spec_set=True, instance=True)
pyramid_config.register_service(svc, name="group_list")
return svc
| 33.036827 | 88 | 0.712228 |