code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
# from crispy_forms.helper import FormHelper
# from crispy_forms.layout import Submit
import sys
import os
from .models import User_Profile
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from orders.models import Location, Location_Info
from orders.locations import get_location_choices
class UserCreationForm(BaseUserCreationForm):
name = forms.CharField(label="Full Name")
location_id = forms.ChoiceField(
label="Hospital Location:",
help_text="Select an option from the menu above.",
choices=get_location_choices())
email = forms.EmailField(
required=False,
label="Email",
help_text = "(not required)" )
phone = forms.CharField(
required=False,
max_length=12,
label="Mobile Number",
help_text="(not required)")
class Meta:
model = User
fields = [
"name",
"username",
"password1",
"password2",
"location_id",
"email",
"phone"
]
def save(self, commit=True, *args, **kwargs):
user = super(UserCreationForm, self).save(commit=False, *args, **kwargs)
name = self.cleaned_data["name"]
if len(name.split()) >= 2:
user.first_name, user.last_name = (name.split()[0].title(), name.split()[-1].title())
elif len(name.split()) == 1:
user.first_name = name.title()
user.last_name = ""
user.set_password(self.cleaned_data["password1"])
user.email = self.cleaned_data["email"]
if commit:
user.save()
user.profile.phone = self.cleaned_data["phone"]
location_id = int(self.cleaned_data["location_id"])
loc = Location(
username = user.username,
location_id = location_id,
info = Location_Info.objects.filter(pk=location_id).first()
)
loc.save()
user.profile.location = loc
user.profile.save()
user.save()
return user
class UserUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
email = kwargs.get("instance").email
super(UserUpdateForm, self).__init__(*args, **kwargs)
self.initial["email"] = email
# self.helper = FormHelper(self)
# self.helper.add_input(Submit("submit", "Submit", css_class="btn btn-outline-info"))
# self.helper.form_method = "POST"
email = forms.EmailField(
required=False,
label="Email",
help_text = "(not required)")
class Meta:
model = User
fields = ["username", "email"]
class ProfileUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
location_id = kwargs.get("instance").location.location_id
phone = kwargs.get("instance").phone
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
self.initial["location_id"] = location_id
self.initial["phone"] = phone
# self.helper = FormHelper(self)
# self.helper.add_input(Submit("submit", "Submit", css_class="btn btn-outline-info"))
# self.helper.form_method = "POST"
location_id = forms.ChoiceField(
label="Hospital Location:",
help_text="Select an option from the menu above.",
choices=get_location_choices())
phone = forms.CharField(
required=False,
max_length=12,
label="Mobile Number",
help_text="(not required)")
class Meta:
model = User_Profile
fields = ["image", "location_id", "phone"]
def save(self, commit=True, *args, **kwargs):
profile = super(ProfileUpdateForm, self).save(commit=False, *args, **kwargs)
if commit:
profile.save()
profile.phone = self.cleaned_data["phone"]
new_location_id = int(self.cleaned_data["location_id"])
profile.location.delete()
new_location = Location(
username = self.instance.user.username,
location_id = new_location_id,
info = Location_Info.objects.filter(pk=new_location_id).first()
).save()
profile.location = new_location
profile.save()
return profile
| [
[
[
19,
24
],
[
525,
530
],
[
578,
583
],
[
745,
750
],
[
862,
867
],
[
2280,
2285
],
[
2675,
2680
],
[
2880,
2885
],
[
3400,
3405
],
[
3567,
3572
]
],
[
[
64,
68
],
[
1026,
1030
],
[
2811,
2815
]
],
[
[
107,
147
],
[
491,
511
]
],
[
[
241,
244
],
[
289,
292
]
],
[
[
252,
254
],
[
305,
307
],
[
321,
323
],
[
337,
339
]
],
[
[
275,
287
],
[
3731,
3743
]
],
[
[
393,
401
],
[
1933,
1941
],
[
4165,
4173
]
],
[
[
403,
416
],
[
2051,
2064
],
[
4301,
4314
]
],
[
[
446,
466
],
[
708,
728
],
[
3530,
3550
]
],
[
[
474,
490
],
[
1294,
1310
]
],
[
[
2265,
2279
],
[
2398,
2412
]
],
[
[
2862,
2879
],
[
3064,
3081
],
[
3869,
3886
]
]
] |
from flask import render_template, request
from flask_script import Manager, Server
from app import app
from model import Content, Summary, Article
import app.static.summ as summarizationModel
import os, json, logging
@app.route('/', endpoint='ACCESS')
@app.route('/index.html', endpoint='ACCESSFILE')
def index():
try:
all_pairs = Article.objects.all()
return render_template('index.html', history=all_pairs)
except Exception as e:
logging.error(e)
raise e
@app.route('/run_decode', methods=['POST'])
def run_decode():
logging.debug('decode your input by our pretrained model')
try:
source = request.get_json()['source'] # GET request with String from frontend directly
logging.debug('input: {}'.format(source)) # GET String-type context from the backend
try:
logging.debug('using the pretrained model.')
sentNums, summary = summarizationModel.decode.run_(source)
except Exception as e:
logging.error(e)
else:
logging.debug('The number of sentences is {}'.format(sentNums))
logging.debug('The abstract is that {}'.format(summary))
results = {'sent_no': sentNums, 'final': summary}
try:
article = Content(text=source)
abstract = Summary(text=summary)
pair = Article(article=article.id, abstract=abstract.id)
article.save()
abstract.save()
pair.save()
except Exception as e:
logging.error(e)
return json.dumps(results)
except:
message = {'message' : 'Fail to catch the data from client.'}
return json.dumps(message)
manager = Manager(app)
manager.add_command('runserver', Server(
use_debugger = True,
use_reloader = True,
host = os.getenv('IP', '0.0.0.0'),
port = int(os.getenv('PORT', 5001))
))
if __name__ == "__main__":
manager.run()
| [
[
[
18,
33
],
[
383,
398
]
],
[
[
35,
42
],
[
652,
659
]
],
[
[
68,
75
],
[
1731,
1738
]
],
[
[
77,
83
],
[
1777,
1783
]
],
[
[
101,
104
],
[
221,
224
],
[
256,
259
],
[
502,
505
],
[
1739,
1742
]
],
[
[
123,
130
],
[
1293,
1300
]
],
[
[
132,
139
],
[
1337,
1344
]
],
[
[
141,
148
],
[
346,
353
],
[
1378,
1385
]
],
[
[
156,
193
],
[
925,
943
]
],
[
[
201,
203
],
[
1846,
1848
],
[
1889,
1891
]
],
[
[
205,
209
],
[
1583,
1587
],
[
1700,
1704
]
],
[
[
211,
218
],
[
467,
474
],
[
567,
574
],
[
738,
745
],
[
848,
855
],
[
1050,
1057
],
[
1126,
1133
],
[
1007,
1014
],
[
1550,
1557
]
],
[
[
308,
313
]
],
[
[
549,
559
]
],
[
[
1721,
1728
],
[
1744,
1751
],
[
1956,
1963
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.errors import (
cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError, Codes
)
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
from synapse.util.caches import intern_dict
import synapse.metrics
import synapse.events
from canonicaljson import (
encode_canonical_json, encode_pretty_printed_json
)
from twisted.internet import defer
from twisted.web import server, resource
from twisted.web.server import NOT_DONE_YET
from twisted.web.util import redirectTo
import collections
import logging
import urllib
import ujson
logger = logging.getLogger(__name__)
metrics = synapse.metrics.get_metrics_for(__name__)
incoming_requests_counter = metrics.register_counter(
"requests",
labels=["method", "servlet", "tag"],
)
outgoing_responses_counter = metrics.register_counter(
"responses",
labels=["method", "code"],
)
response_timer = metrics.register_distribution(
"response_time",
labels=["method", "servlet", "tag"]
)
response_ru_utime = metrics.register_distribution(
"response_ru_utime", labels=["method", "servlet", "tag"]
)
response_ru_stime = metrics.register_distribution(
"response_ru_stime", labels=["method", "servlet", "tag"]
)
response_db_txn_count = metrics.register_distribution(
"response_db_txn_count", labels=["method", "servlet", "tag"]
)
response_db_txn_duration = metrics.register_distribution(
"response_db_txn_duration", labels=["method", "servlet", "tag"]
)
_next_request_id = 0
def request_handler(report_metrics=True):
"""Decorator for ``wrap_request_handler``"""
return lambda request_handler: wrap_request_handler(request_handler, report_metrics)
def wrap_request_handler(request_handler, report_metrics):
"""Wraps a method that acts as a request handler with the necessary logging
and exception handling.
The method must have a signature of "handle_foo(self, request)". The
argument "self" must have "version_string" and "clock" attributes. The
argument "request" must be a twisted HTTP request.
The method must return a deferred. If the deferred succeeds we assume that
a response has been sent. If the deferred fails with a SynapseError we use
it to send a JSON response with the appropriate HTTP reponse code. If the
deferred fails with any other type of error we send a 500 reponse.
We insert a unique request-id into the logging context for this request and
log the response and duration for this request.
"""
@defer.inlineCallbacks
def wrapped_request_handler(self, request):
global _next_request_id
request_id = "%s-%s" % (request.method, _next_request_id)
_next_request_id += 1
with LoggingContext(request_id) as request_context:
if report_metrics:
request_metrics = RequestMetrics()
request_metrics.start(self.clock)
request_context.request = request_id
with request.processing():
try:
with PreserveLoggingContext(request_context):
yield request_handler(self, request)
except CodeMessageException as e:
code = e.code
if isinstance(e, SynapseError):
logger.info(
"%s SynapseError: %s - %s", request, code, e.msg
)
else:
logger.exception(e)
outgoing_responses_counter.inc(request.method, str(code))
respond_with_json(
request, code, cs_exception(e), send_cors=True,
pretty_print=_request_user_agent_is_curl(request),
version_string=self.version_string,
)
except:
logger.exception(
"Failed handle request %s.%s on %r: %r",
request_handler.__module__,
request_handler.__name__,
self,
request
)
respond_with_json(
request,
500,
{
"error": "Internal server error",
"errcode": Codes.UNKNOWN,
},
send_cors=True
)
finally:
try:
if report_metrics:
request_metrics.stop(
self.clock, request, self.__class__.__name__
)
except:
pass
return wrapped_request_handler
class HttpServer(object):
""" Interface for registering callbacks on a HTTP server
"""
def register_paths(self, method, path_patterns, callback):
""" Register a callback that gets fired if we receive a http request
with the given method for a path that matches the given regex.
If the regex contains groups these gets passed to the calback via
an unpacked tuple.
Args:
method (str): The method to listen to.
path_patterns (list<SRE_Pattern>): The regex used to match requests.
callback (function): The function to fire if we receive a matched
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return a tuple of (code, response).
"""
pass
class JsonResource(HttpServer, resource.Resource):
""" This implements the HttpServer interface and provides JSON support for
Resources.
Register callbacks via register_path()
Callbacks can return a tuple of status code and a dict in which case the
the dict will automatically be sent to the client as a JSON object.
The JsonResource is primarily intended for returning JSON, but callbacks
may send something other than JSON, they may do so by using the methods
on the request object and instead returning None.
"""
isLeaf = True
_PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"])
def __init__(self, hs, canonical_json=True):
resource.Resource.__init__(self)
self.canonical_json = canonical_json
self.clock = hs.get_clock()
self.path_regexs = {}
self.version_string = hs.version_string
self.hs = hs
def register_paths(self, method, path_patterns, callback):
for path_pattern in path_patterns:
self.path_regexs.setdefault(method, []).append(
self._PathEntry(path_pattern, callback)
)
def render(self, request):
""" This gets called by twisted every time someone sends us a request.
"""
self._async_render(request)
return server.NOT_DONE_YET
# Disable metric reporting because _async_render does its own metrics.
# It does its own metric reporting because _async_render dispatches to
# a callback and it's the class name of that callback we want to report
# against rather than the JsonResource itself.
@request_handler(report_metrics=False)
@defer.inlineCallbacks
def _async_render(self, request):
""" This gets called from render() every time someone sends us a request.
This checks if anyone has registered a callback for that method and
path.
"""
if request.method == "OPTIONS":
self._send_response(request, 200, {})
return
request_metrics = RequestMetrics()
request_metrics.start(self.clock)
# Loop through all the registered callbacks to check if the method
# and path regex match
for path_entry in self.path_regexs.get(request.method, []):
m = path_entry.pattern.match(request.path)
if not m:
continue
# We found a match! Trigger callback and then return the
# returned response. We pass both the request and any
# matched groups from the regex to the callback.
callback = path_entry.callback
servlet_instance = getattr(callback, "__self__", None)
if servlet_instance is not None:
servlet_classname = servlet_instance.__class__.__name__
else:
servlet_classname = "%r" % callback
kwargs = intern_dict({
name: urllib.unquote(value).decode("UTF-8") if value else value
for name, value in m.groupdict().items()
})
callback_return = yield callback(request, **kwargs)
if callback_return is not None:
code, response = callback_return
self._send_response(request, code, response)
try:
request_metrics.stop(self.clock, request, servlet_classname)
except:
pass
return
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
raise UnrecognizedRequestError()
def _send_response(self, request, code, response_json_object,
response_code_message=None):
# could alternatively use request.notifyFinish() and flip a flag when
# the Deferred fires, but since the flag is RIGHT THERE it seems like
# a waste.
if request._disconnected:
logger.warn(
"Not sending response to request %s, already disconnected.",
request)
return
outgoing_responses_counter.inc(request.method, str(code))
# TODO: Only enable CORS for the requests that need it.
respond_with_json(
request, code, response_json_object,
send_cors=True,
response_code_message=response_code_message,
pretty_print=_request_user_agent_is_curl(request),
version_string=self.version_string,
canonical_json=self.canonical_json,
)
class RequestMetrics(object):
def start(self, clock):
self.start = clock.time_msec()
self.start_context = LoggingContext.current_context()
def stop(self, clock, request, servlet_classname):
context = LoggingContext.current_context()
tag = ""
if context:
tag = context.tag
if context != self.start_context:
logger.warn(
"Context have unexpectedly changed %r, %r",
context, self.start_context
)
return
incoming_requests_counter.inc(request.method, servlet_classname, tag)
response_timer.inc_by(
clock.time_msec() - self.start, request.method,
servlet_classname, tag
)
ru_utime, ru_stime = context.get_resource_usage()
response_ru_utime.inc_by(
ru_utime, request.method, servlet_classname, tag
)
response_ru_stime.inc_by(
ru_stime, request.method, servlet_classname, tag
)
response_db_txn_count.inc_by(
context.db_txn_count, request.method, servlet_classname, tag
)
response_db_txn_duration.inc_by(
context.db_txn_duration, request.method, servlet_classname, tag
)
class RootRedirect(resource.Resource):
"""Redirects the root '/' path to another path."""
def __init__(self, path):
resource.Resource.__init__(self)
self.url = path
def render_GET(self, request):
return redirectTo(self.url, request)
def getChild(self, name, request):
if len(name) == 0:
return self # select ourselves as the child to render
return resource.Resource.getChild(self, name, request)
def respond_with_json(request, code, json_object, send_cors=False,
response_code_message=None, pretty_print=False,
version_string="", canonical_json=True):
if pretty_print:
json_bytes = encode_pretty_printed_json(json_object) + "\n"
else:
if canonical_json or synapse.events.USE_FROZEN_DICTS:
json_bytes = encode_canonical_json(json_object)
else:
# ujson doesn't like frozen_dicts.
json_bytes = ujson.dumps(json_object, ensure_ascii=False)
return respond_with_json_bytes(
request, code, json_bytes,
send_cors=send_cors,
response_code_message=response_code_message,
version_string=version_string
)
def respond_with_json_bytes(request, code, json_bytes, send_cors=False,
version_string="", response_code_message=None):
"""Sends encoded JSON in response to the given request.
Args:
request (twisted.web.http.Request): The http request to respond to.
code (int): The HTTP response code.
json_bytes (bytes): The json bytes to use as the response body.
send_cors (bool): Whether to send Cross-Origin Resource Sharing headers
http://www.w3.org/TR/cors/
Returns:
twisted.web.server.NOT_DONE_YET"""
request.setResponseCode(code, message=response_code_message)
request.setHeader(b"Content-Type", b"application/json")
request.setHeader(b"Server", version_string)
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
if send_cors:
request.setHeader("Access-Control-Allow-Origin", "*")
request.setHeader("Access-Control-Allow-Methods",
"GET, POST, PUT, DELETE, OPTIONS")
request.setHeader("Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept")
request.write(json_bytes)
finish_request(request)
return NOT_DONE_YET
def finish_request(request):
""" Finish writing the response to the request.
Twisted throws a RuntimeException if the connection closed before the
response was written but doesn't provide a convenient or reliable way to
determine if the connection was closed. So we catch and log the RuntimeException
You might think that ``request.notifyFinish`` could be used to tell if the
request was finished. However the deferred it returns won't fire if the
connection was already closed, meaning we'd have to have called the method
right at the start of the request. By the time we want to write the response
it will already be too late.
"""
try:
request.finish()
except RuntimeError as e:
logger.info("Connection disconnected before response was written: %r", e)
def _request_user_agent_is_curl(request):
user_agents = request.requestHeaders.getRawHeaders(
"User-Agent", default=[]
)
for user_agent in user_agents:
if "curl" in user_agent:
return True
return False
| [
[
[
646,
658
],
[
4269,
4281
]
],
[
[
660,
672
],
[
3888,
3900
]
],
[
[
674,
694
],
[
3790,
3810
]
],
[
[
696,
720
],
[
9874,
9898
]
],
[
[
722,
727
],
[
5000,
5005
]
],
[
[
766,
780
],
[
10965,
10979
],
[
11072,
11086
],
[
3351,
3365
]
],
[
[
782,
804
],
[
3665,
3687
]
],
[
[
837,
848
],
[
9252,
9263
]
],
[
[
856,
871
]
],
[
[
879,
893
],
[
1251,
1258
],
[
12936,
12943
]
],
[
[
927,
948
],
[
12994,
13015
]
],
[
[
950,
976
],
[
12850,
12876
]
],
[
[
1009,
1014
],
[
8010,
8015
],
[
3139,
3144
]
],
[
[
1039,
1045
],
[
7664,
7670
]
],
[
[
1047,
1055
],
[
6358,
6366
],
[
12157,
12165
],
[
7039,
7047
],
[
12271,
12279
],
[
12558,
12566
]
],
[
[
1087,
1099
],
[
14594,
14606
]
],
[
[
1129,
1139
],
[
12379,
12389
]
],
[
[
1148,
1159
],
[
6919,
6930
]
],
[
[
1167,
1174
],
[
1212,
1219
]
],
[
[
1182,
1188
],
[
9288,
9294
]
],
[
[
1196,
1201
],
[
13115,
13120
]
],
[
[
1203,
1209
],
[
10241,
10247
],
[
11236,
11242
],
[
15356,
15362
],
[
3927,
3933
],
[
4093,
4099
],
[
4503,
4509
]
],
[
[
1241,
1248
],
[
1322,
1329
],
[
1436,
1443
],
[
1530,
1537
],
[
1645,
1652
],
[
1760,
1767
],
[
1879,
1886
],
[
2005,
2012
]
],
[
[
1294,
1319
],
[
11411,
11436
]
],
[
[
1407,
1433
],
[
10384,
10410
],
[
4133,
4159
]
],
[
[
1513,
1527
],
[
11490,
11504
]
],
[
[
1625,
1642
],
[
11686,
11703
]
],
[
[
1740,
1757
],
[
11791,
11808
]
],
[
[
1855,
1876
],
[
11896,
11917
]
],
[
[
1978,
2002
],
[
12017,
12041
]
],
[
[
2108,
2124
],
[
3289,
3305
],
[
3315,
3331
]
],
[
[
2135,
2150
],
[
7967,
7982
]
],
[
[
2317,
2337
],
[
2257,
2277
]
],
[
[
5453,
5463
],
[
6346,
6356
]
],
[
[
6333,
6345
]
],
[
[
10845,
10859
],
[
8398,
8412
],
[
3463,
3477
]
],
[
[
12144,
12156
]
],
[
[
12612,
12629
],
[
10515,
10532
],
[
4211,
4228
],
[
4792,
4809
]
],
[
[
13364,
13387
],
[
13172,
13195
]
],
[
[
14613,
14627
],
[
14559,
14573
]
],
[
[
15436,
15463
],
[
10693,
10720
],
[
4339,
4366
]
]
] |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import deque, defaultdict
from sys import stdin
def check_connected(x, connected, adjacency):
stack = deque([x])
while stack:
u = stack.pop()
for v in adjacency[u]:
if v not in connected:
connected[v] = x
stack.append(v)
t = int(stdin.readline())
for _ in range(t):
n, k = map(int, stdin.readline().strip().split())
equalities = defaultdict(set)
inequalities = []
for _ in range(k):
x1, r, x2 = stdin.readline().strip().split()
x1 = int(x1)
x2 = int(x2)
if r == '=':
equalities[x1].add(x2)
equalities[x2].add(x1)
else:
inequalities.append((x1, x2))
connected_components = {}
for i in range(1, n + 1):
if i not in connected_components:
connected_components[i] = i
check_connected(i, connected_components, equalities)
for x1, x2 in inequalities:
if connected_components[x1] == connected_components[x2]:
print('NO')
break
else:
print('YES')
| [
[
[
301,
306
],
[
402,
407
]
],
[
[
308,
319
],
[
703,
714
]
],
[
[
336,
341
],
[
595,
600
],
[
652,
657
],
[
785,
790
]
],
[
[
348,
363
],
[
1157,
1172
]
],
[
[
587,
588
],
[
628,
629
]
],
[
[
617,
618
]
],
[
[
636,
637
],
[
1059,
1060
]
],
[
[
639,
640
],
[
761,
762
]
],
[
[
690,
700
],
[
893,
903
],
[
928,
938
],
[
1198,
1208
]
],
[
[
724,
736
],
[
977,
989
],
[
1228,
1240
]
],
[
[
750,
751
]
],
[
[
773,
775
],
[
835,
837
]
],
[
[
777,
778
],
[
871,
872
]
],
[
[
780,
782
],
[
856,
858
]
],
[
[
826,
828
],
[
904,
906
],
[
947,
949
],
[
998,
1000
]
],
[
[
847,
849
],
[
912,
914
],
[
939,
941
],
[
1002,
1004
]
],
[
[
1011,
1031
],
[
1087,
1107
],
[
1121,
1141
],
[
1176,
1196
],
[
1253,
1273
],
[
1281,
1301
]
],
[
[
1045,
1046
],
[
1078,
1079
],
[
1147,
1148
],
[
1142,
1143
],
[
1173,
1174
]
],
[
[
1218,
1220
],
[
1274,
1276
]
],
[
[
1222,
1224
],
[
1302,
1304
]
]
] |
"""
Django settings for apply project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '--v$_^*0r5(ok1^2sxdm4w_wwskvuv-z0tcop+yf1-m@+7p#5i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'bootstrapform',
'yard',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'apply.urls'
WSGI_APPLICATION = 'apply.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'yard', # Or path to database file if using sqlite3.
'USER': 'frankie', # Not used with sqlite3.
'PASSWORD': 'frankie', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
DOCS_URL = BASE_DIR + '/yard/static/docs/'
LOGGING = {
'version': 1,
}
LOGIN_REDIRECT_URL="/"
| [
[
[
323,
325
],
[
337,
339
],
[
353,
355
]
],
[
[
326,
334
],
[
2436,
2444
]
],
[
[
585,
595
]
],
[
[
718,
723
]
],
[
[
732,
746
]
],
[
[
755,
768
]
],
[
[
802,
816
]
],
[
[
1050,
1068
]
],
[
[
1419,
1431
]
],
[
[
1448,
1464
]
],
[
[
1571,
1580
]
],
[
[
2204,
2217
]
],
[
[
2229,
2238
]
],
[
[
2248,
2256
]
],
[
[
2265,
2273
]
],
[
[
2282,
2288
]
],
[
[
2400,
2410
]
],
[
[
2425,
2433
]
],
[
[
2470,
2477
]
],
[
[
2503,
2521
]
]
] |
"""
WSGI config for Boats & Joy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.production')
application = get_wsgi_application()
| [
[
[
234,
236
],
[
289,
291
]
],
[
[
267,
287
],
[
382,
402
]
],
[
[
368,
379
]
]
] |
#!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wtscenario, wttest
from wtdataset import SimpleDataSet
# test_lsm01.py
# Test LSM tree configuration options.
class test_lsm01(wttest.WiredTigerTestCase):
K = 1024
M = 1024 * K
G = 1024 * M
uri = "lsm:test_lsm01"
chunk_size_scenarios = wtscenario.quick_scenarios('s_chunk_size',
[1*M,20*M,None], [0.6,0.6,0.6])
merge_max_scenarios = wtscenario.quick_scenarios('s_merge_max',
[2,10,20,None], None)
bloom_scenarios = wtscenario.quick_scenarios('s_bloom',
[True,False,None], None)
bloom_bit_scenarios = wtscenario.quick_scenarios('s_bloom_bit_count',
[2,8,20,None], None)
bloom_hash_scenarios = wtscenario.quick_scenarios('s_bloom_hash_count',
[2,10,20,None], None)
# Occasionally add a lot of records, so that merges (and bloom) happen.
record_count_scenarios = wtscenario.quick_scenarios(
'nrecs', [10, 10000], [0.9, 0.1])
config_vars = [ 'chunk_size', 'merge_max', 'bloom',
'bloom_bit_count', 'bloom_hash_count' ]
scenarios = wtscenario.make_scenarios(
chunk_size_scenarios, merge_max_scenarios, bloom_scenarios,
bloom_bit_scenarios, bloom_hash_scenarios, record_count_scenarios,
prune=100, prunelong=500)
# Test drop of an object.
def test_lsm(self):
args = 'key_format=S'
args += ',lsm=(' # Start the LSM configuration options.
# add names to args, e.g. args += ',session_max=30'
for var in self.config_vars:
value = getattr(self, 's_' + var)
if value != None:
if var == 'verbose':
value = '[' + str(value) + ']'
if value == True:
value = 'true'
if value == False:
value = 'false'
args += ',' + var + '=' + str(value)
args += ')' # Close the LSM configuration option group
self.verbose(3,
'Test LSM with config: ' + args + ' count: ' + str(self.nrecs))
SimpleDataSet(self, self.uri, self.nrecs).populate()
# TODO: Adding an explicit drop here can cause deadlocks, if a merge
# is still happening. See issue #349.
# self.session.drop(self.uri)
if __name__ == '__main__':
wttest.run()
| [
[
[
1306,
1316
]
],
[
[
1318,
1328
],
[
1579,
1589
],
[
1688,
1698
],
[
1782,
1792
],
[
1879,
1889
],
[
1983,
1993
],
[
2167,
2177
],
[
2371,
2381
]
],
[
[
1330,
1336
],
[
1449,
1455
],
[
3596,
3602
]
],
[
[
1359,
1372
],
[
3349,
3362
]
],
[
[
1438,
1448
]
]
] |
# pylint: skip-file
HARDWARE_ITEMS = [
{'attributes': [],
'capacity': '999',
'description': 'Unknown',
'itemCategory': {'categoryCode': 'unknown', 'id': 325},
'keyName': 'UNKNOWN',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1245172,
"locationGroupId": '',
'itemId': 935954,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '64',
'description': '1 IPv6 Address',
'itemCategory': {'categoryCode': 'pri_ipv6_addresses',
'id': 325},
'keyName': '1_IPV6_ADDRESS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 17129,
"locationGroupId": '',
'itemId': 4097,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '10',
'description': '10 Mbps Public & Private Network Uplinks',
'itemCategory': {'categoryCode': 'port_speed', 'id': 26},
'keyName': '10_MBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 272,
"locationGroupId": '',
'itemId': 186,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 5}]},
{'attributes': [],
'capacity': '0',
'description': 'Ubuntu Linux 14.04 LTS Trusty Tahr (64 bit)',
'itemCategory': {'categoryCode': 'os', 'id': 12},
'keyName': 'OS_UBUNTU_14_04_LTS_TRUSTY_TAHR_64_BIT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 37650,
"locationGroupId": '',
'itemId': 4702,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 9}],
'softwareDescription': {'id': 1362,
'longDescription': 'Ubuntu / 14.04-64',
'referenceCode': 'UBUNTU_14_64'}},
{'attributes': [],
'capacity': '1',
'description': '1 IP Address',
'itemCategory': {'categoryCode': 'pri_ip_addresses', 'id': 13},
'keyName': '1_IP_ADDRESS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 21,
"locationGroupId": '',
'itemId': 15,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [{'attributeTypeKeyName': 'RECLAIM_BYPASS',
'id': 1014}],
'description': 'Unlimited SSL VPN Users',
'itemCategory': {'categoryCode': 'vpn_management', 'id': 31},
'keyName': 'SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 420,
"locationGroupId": '',
'itemId': 309,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'description': 'Reboot / KVM over IP',
'itemCategory': {'categoryCode': 'remote_management',
'id': 46},
'keyName': 'REBOOT_KVM_OVER_IP',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 906,
"locationGroupId": '',
'itemId': 504,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'id': 22505,
"locationGroupId": '',
'itemId': 4481,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 98}]},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB_2',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1800,
"locationGroupId": '',
'itemId': 439,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'setupFee': '0',
'sort': 99}]}]
ENTERPRISE_PACKAGE = {
'categories': [
{'categoryCode': 'storage_service_enterprise'}
],
'id': 240,
'name': 'Endurance',
'items': [
{
'capacity': '0',
'itemCategory': {'categoryCode': 'storage_service_enterprise'},
'keyName': 'CODENAME_PRIME_STORAGE_SERVICE',
'prices': [
{
'categories': [
{'categoryCode': 'storage_service_enterprise'}
],
'id': 45058,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {'categoryCode': 'storage_file'},
'keyName': 'FILE_STORAGE_2',
'prices': [
{
'categories': [
{'categoryCode': 'storage_file'}
],
'id': 45108,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {'categoryCode': 'storage_block'},
'keyName': 'BLOCK_STORAGE_2',
'prices': [
{
'categories': [
{'categoryCode': 'storage_block'}
],
'id': 45098,
'locationGroupId': ''
}
]
}, {
'capacity': '10',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '10_GB_STORAGE_SPACE',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 46160,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 46170,
'locationGroupId': ''
}
]
}, {
'capacity': '20',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '20_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 45860,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 46659,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 45128,
'locationGroupId': ''
}
]
}, {
'capacity': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 46789,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 45318,
'locationGroupId': ''
}
]
}, {
'attributes': [
{'value': '300'}
],
'capacity': '300',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': 'WRITEHEAVY_TIER',
'prices': [
{
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'id': 45088,
'locationGroupId': ''
}
]
}, {
'attributes': [
{'value': '200'}
],
'capacity': '200',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': 'READHEAVY_TIER',
'prices': [
{
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'id': 45078,
'locationGroupId': ''
}
]
}
]
}
PERFORMANCE_PACKAGE = {
'categories': [
{'categoryCode': 'performance_storage_iscsi'},
{'categoryCode': 'performance_storage_nfs'}
],
'id': 222,
'name': 'Performance',
'items': [
{
'capacity': '0',
'itemCategory': {'categoryCode': 'performance_storage_iscsi'},
'keyName': 'BLOCK_STORAGE_PERFORMANCE_ISCSI',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_iscsi'}
],
'id': 40672,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {'categoryCode': 'performance_storage_nfs'},
'keyName': 'FILE_STORAGE_PERFORMANCE_NFS',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_nfs'}
],
'id': 40662,
'locationGroupId': ''
}
]
}, {
'capacity': '20',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '20_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 40682,
'locationGroupId': ''
}
]
}, {
'capacity': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 40742,
'locationGroupId': ''
}
]
}, {
'capacity': '800',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'keyName': '800_IOPS_4',
'prices': [
{
'capacityRestrictionMaximum': '1000',
'capacityRestrictionMinimum': '100',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 41562,
'locationGroupId': ''
}
]
}, {
'capacity': '1000',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'keyName': '1000_IOPS',
'prices': [
{
'capacityRestrictionMaximum': '20',
'capacityRestrictionMinimum': '20',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 40882,
'locationGroupId': ''
}
]
}
]
}
SAAS_PACKAGE = {
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'id': 759,
'name': 'Storage As A Service (StaaS)',
'items': [
{
'capacity': '0',
'keyName': '',
'prices': [
{
'id': 189433,
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'keyName': '',
'prices': [
{
'categories': [
{'categoryCode': 'storage_block'}
],
'id': 189443,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'keyName': '',
'prices': [
{
'categories': [
{'categoryCode': 'storage_file'}
],
'id': 189453,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '999',
'capacityMinimum': '500',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '500_999_GBS',
'prices': [
{
'id': 189993,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '1999',
'capacityMinimum': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_1999_GBS',
'prices': [
{
'id': 190113,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '12000',
'capacityMinimum': '1',
'keyName': 'STORAGE_SPACE_FOR_2_IOPS_PER_GB',
'prices': [
{
'id': 193433,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '12000',
'capacityMinimum': '1',
'keyName': 'STORAGE_SPACE_FOR_4_IOPS_PER_GB',
'prices': [
{
'id': 194763,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '10000',
'capacityMinimum': '100',
'keyName': '',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'prices': [
{
'capacityRestrictionMaximum': '999',
'capacityRestrictionMinimum': '500',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 190053,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '20000',
'capacityMinimum': '100',
'keyName': '',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'prices': [
{
'capacityRestrictionMaximum': '1999',
'capacityRestrictionMinimum': '1000',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 190173,
'locationGroupId': ''
}
]
}, {
'capacity': '200',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': '',
'prices': [
{
'id': 193373,
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '300',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': '',
'prices': [
{
'id': 194703,
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '10',
'keyName': '',
'prices': [
{
'capacityRestrictionMaximum': '48000',
'capacityRestrictionMinimum': '100',
'capacityRestrictionType': 'IOPS',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 191193,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 193613,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 194943,
'locationGroupId': ''}]
}, {
'capacity': '20',
'keyName': '',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 193853,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {
'categoryCode': 'performance_storage_replication'
},
'keyName': 'REPLICATION_FOR_IOPSBASED_PERFORMANCE',
'prices': [
{
'capacityRestrictionMaximum': '48000',
'capacityRestrictionMinimum': '1',
'capacityRestrictionType': 'IOPS',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 192033,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {
'categoryCode': 'performance_storage_replication'
},
'keyName': 'REPLICATION_FOR_TIERBASED_PERFORMANCE',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 194693,
'locationGroupId': ''
}
]
}
]
}
SAAS_REST_PACKAGE = {
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'id': 759,
'name': 'Storage As A Service (StaaS)',
'items': [
{
'capacity': '0',
'keyName': '',
'prices': [
{
'id': 189433,
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'locationGroupId': None
}
]
}, {
'capacity': '20',
'keyName': '',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 193853,
'locationGroupId': None
}
]
}, {
'capacity': '0',
'capacityMaximum': '1999',
'capacityMinimum': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_1999_GBS',
'prices': [
{
'id': 190113,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': None
}
]
}, {
'capacity': '0',
'capacityMaximum': '20000',
'capacityMinimum': '100',
'keyName': '',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'prices': [
{
'capacityRestrictionMaximum': '1999',
'capacityRestrictionMinimum': '1000',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 190173,
'locationGroupId': None
}
]
}, {
'capacity': '0',
'keyName': '',
'prices': [
{
'categories': [
{'categoryCode': 'storage_file'}
],
'id': 189453,
'locationGroupId': None
}
]
}
]
}
activePreset1 = {
'description': 'Single Xeon 1270, 8GB Ram, 2x1TB SATA disks, Non-RAID',
'id': 64,
'isActive': '1',
'keyName': 'S1270_8GB_2X1TBSATA_NORAID',
'name': 'S1270 8GB 2X1TBSATA NORAID',
'packageId': 200,
'prices': [
{
"hourlyRecurringFee": "1.18",
"id": 165711,
"locationGroupId": '',
"recurringFee": "780",
}
]
}
activePreset2 = {
'description': 'Dual Xeon Gold, 384GB Ram, 4x960GB SSD, RAID 10',
'id': 65,
'isActive': '1',
'keyName': 'DGOLD_6140_384GB_4X960GB_SSD_SED_RAID_10',
'name': 'DGOLD 6140 384GB 4X960GB SSD SED RAID 10',
'packageId': 200,
'prices': [
{
"hourlyRecurringFee": "1.18",
"id": 165711,
"locationGroupId": '',
"recurringFee": "780",
}
]
}
getAllObjects = [{
'activePresets': [activePreset1],
'accountRestrictedActivePresets': [activePreset2],
'description': 'Bare Metal Server',
'firstOrderStepId': 1,
'id': 200,
'isActive': 1,
'items': HARDWARE_ITEMS,
'name': 'Bare Metal Server',
'regions': [{'description': 'WDC01 - Washington, DC - East Coast U.S.',
'keyname': 'WASHINGTON_DC',
'location': {'location': {'id': 37473,
'longName': 'Washington 1',
'name': 'wdc01'}},
'sortOrder': 10}],
'subDescription': 'Bare Metal Server',
'unitSize': 1,
"itemPrices": [
{
"hourlyRecurringFee": ".027",
"id": 205911,
"laborFee": "0",
"locationGroupId": 505,
"capacityRestrictionMaximum": "40",
"capacityRestrictionMinimum": "40",
"capacityRestrictionType": "CORE",
"item": {
"capacity": "0",
"description": "Load Balancer Uptime",
"id": 10785,
"keyName": "LOAD_BALANCER_UPTIME",
}
},
{
"hourlyRecurringFee": "0",
"id": 199467,
"laborFee": "0",
"locationGroupId": '',
"recurringFee": "0",
"item": {
"capacity": "0",
"description": "Load Balancer Bandwidth",
"id": 10051,
"keyName": "LOAD_BALANCER_BANDWIDTH",
}
},
{
"hourlyRecurringFee": ".028",
"id": 205913,
"laborFee": "0",
"locationGroupId": 507,
"item": {
"capacity": "0",
"description": "Load Balancer Uptime",
"id": 10785,
"keyName": "LOAD_BALANCER_UPTIME",
}
}]
}]
getItems = [
{
'id': 1234,
'keyName': 'KeyName01',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'softwareDescription': {
'id': 1228,
'longDescription': 'Redhat EL 5.10-64',
'referenceCode': 'REDHAT_5_64'
},
'prices': [{'id': 1122,
'hourlyRecurringFee': 0.10,
'recurringFee': 0.10,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 2233,
'keyName': 'KeyName02',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'prices': [{'id': 4477,
'hourlyRecurringFee': 0.10,
'recurringFee': 0.10,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 1239,
'keyName': 'KeyName03',
'capacity': '2',
'description': 'RAM',
'itemCategory': {'categoryCode': 'RAM'},
'prices': [{'id': 1133,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 3,
'name': 'RAM',
'categoryCode': 'ram'}]}],
},
{
'id': 1240,
'keyName': 'KeyName014',
'capacity': '4',
'units': 'PRIVATE_CORE',
'description': 'Computing Instance (Dedicated)',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 1007,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 1250,
'keyName': 'KeyName015',
'capacity': '4',
'units': 'CORE',
'description': 'Computing Instance',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 1144,
'locationGroupId': None,
'hourlyRecurringFee': 0.10,
'recurringFee': 0.10,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 112233,
'keyName': 'KeyName016',
'capacity': '55',
'units': 'CORE',
'description': 'Computing Instance',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 332211,
'locationGroupId': 1,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 4439,
'keyName': 'KeyName017',
'capacity': '1',
'description': '1 GB iSCSI Storage',
'itemCategory': {'categoryCode': 'iscsi'},
'prices': [{'id': 2222, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 1121,
'keyName': 'KeyName081',
'capacity': '20',
'description': '20 GB iSCSI snapshot',
'itemCategory': {'categoryCode': 'iscsi_snapshot_space'},
'prices': [{'id': 2014, 'hourlyRecurringFee': 0.10}],
},
{
'id': 4440,
'keyName': 'KeyName019',
'capacity': '4',
'description': '4 Portable Public IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_pub'},
'prices': [{'id': 4444, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 8880,
'keyName': 'KeyName0199',
'capacity': '8',
'description': '8 Portable Public IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_pub'},
'prices': [{'id': 8888, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 44400,
'keyName': 'KeyName0155',
'capacity': '4',
'description': '4 Portable Private IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_priv'},
'prices': [{'id': 44441, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 88800,
'keyName': 'KeyName0144',
'capacity': '8',
'description': '8 Portable Private IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_priv'},
'prices': [{'id': 88881, 'hourlyRecurringFee': 0.0, 'recurringFee': 0.0}],
},
{
'id': 10,
'keyName': 'KeyName0341',
'capacity': '0',
'description': 'Global IPv4',
'itemCategory': {'categoryCode': 'global_ipv4'},
'prices': [{'id': 11, 'hourlyRecurringFee': 0.0, 'recurringFee': 0.0}],
},
{
'id': 66464,
'keyName': '1_IPV6_ADDRESS',
'capacity': '64',
'description': '/64 Block Portable Public IPv6 Addresses',
'itemCategory': {'categoryCode': 'static_ipv6_addresses'},
'prices': [{'id': 664641, 'hourlyRecurringFee': '0', 'locationGroupId': '', 'recurringFee': '0'}],
},
{
'id': 610,
'keyName': 'KeyName031',
'capacity': '0',
'description': 'Global IPv6',
'itemCategory': {'categoryCode': 'global_ipv6'},
'prices': [{'id': 611, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB_2',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1800,
"locationGroupId": '',
'itemId': 439,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'setupFee': '0',
'sort': 99}]},
{'attributes': [],
'capacity': '10',
'description': '10 Mbps Public & Private Network Uplinks',
'itemCategory': {'categoryCode': 'port_speed', 'id': 26},
'keyName': '10_MBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 272,
"locationGroupId": '',
'itemId': 186,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 5}]},
{'attributes': [],
'capacity': '0',
'description': 'Ubuntu Linux 14.04 LTS Trusty Tahr (64 bit)',
'itemCategory': {'categoryCode': 'os', 'id': 12},
'keyName': 'OS_UBUNTU_14_04_LTS_TRUSTY_TAHR_64_BIT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0.10',
'id': 37650,
"locationGroupId": '',
'itemId': 4702,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0.1',
'setupFee': '0.1',
'sort': 9}],
'softwareDescription': {'id': 1362,
'longDescription': 'Ubuntu / 14.04-64',
'referenceCode': 'UBUNTU_14_64'}}
]
getItemPricesISCSI = [
{
'currentPriceFlag': '',
'id': 2152,
'item': {
'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{'id': 46626,
'itemPriceId': 2152, 'packageId': 0}],
'quantity': '',
'recurringFee': '.35',
'setupFee': '0',
'sort': 0
},
{
'currentPriceFlag': '',
'id': 22501,
'item': {'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{
'id': 252983,
'itemPriceId': 22501, 'packageId': 0
}],
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0
},
{
'currentPriceFlag': '',
'id': 22441,
'item': {
'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547
},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{'id': 250326,
'itemPriceId': 22441, 'packageId': 0}],
'quantity': '',
'recurringFee': '15',
'setupFee': '0',
'sort': 0
}]
getItemsVS = [
{
'id': 1234,
'keyName': 'KeyName01',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'softwareDescription': {
'id': 1228,
'longDescription': 'Redhat EL 5.10-64',
'referenceCode': 'REDHAT_5_64'
},
'prices': [{'id': 1122,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 2233,
'keyName': 'KeyName02',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'prices': [{'id': 4477,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 1239,
'keyName': 'KeyName03',
'capacity': '2',
'description': 'RAM',
'itemCategory': {'categoryCode': 'RAM'},
'prices': [{'id': 1133,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 3,
'name': 'RAM',
'categoryCode': 'ram'}]}],
}
]
verifyOrderDH = {
'preTaxSetup': '0',
'storageGroups': [],
'postTaxRecurring': '3.164',
'billingOrderItemId': '',
'presetId': '',
'hardware': [
{
'domain': 't.com',
'hostname': 't',
'bareMetalInstanceFlag': '',
'hardwareStatusId': '',
'primaryBackendNetworkComponent': {
'router': {
'id': 51218
},
'networkVlanId': ''
},
'accountId': ''
}
],
'prices': [
{
'itemId': 10195,
'setupFee': '0',
'recurringFee': '0',
'hourlyRecurringFee': '3.164',
'oneTimeFee': '0',
'id': 200269,
'item': {
'thirdPartyPolicyAssignments': [],
'capacity': '56',
'description': '56 Cores X 242 RAM X 1.2 TB',
'bundle': [
{
'category': {
'categoryCode': 'dedicated_host_ram',
'id': 850,
'name': 'Dedicated Host RAM'
},
'itemPriceId': 200301,
'itemPrice': {
'itemId': 10199,
'setupFee': '0',
'recurringFee': '0',
'hourlyRecurringFee': '0',
'oneTimeFee': '0',
'id': 200301,
'laborFee': '0'
},
'bundleItemId': 10195,
'bundleItem': {
'units': 'CORE',
'keyName': '56_CORES_X_242_RAM_X_1_4_TB',
'capacity': '56',
'description': '56 Cores X 242 RAM X 1.2 TB',
'id': 10195
},
'id': 41763
},
{
'category': {
'categoryCode': 'dedicated_host_disk',
'id': 851,
'name': 'Dedicated Host Disk'
},
'itemPriceId': 200299,
'itemPrice': {
'itemId': 10197,
'setupFee': '0',
'recurringFee': '0',
'hourlyRecurringFee': '0',
'oneTimeFee': '0',
'id': 200299,
'laborFee': '0'
},
'bundleItemId': 10195,
'bundleItem': {
'units': 'CORE',
'keyName': '56_CORES_X_242_RAM_X_1_4_TB',
'capacity': '56',
'description': '56 Cores X 242 RAM X 1.2 TB',
'id': 10195
},
'id': 41761
}
],
'keyName': '56_CORES_X_242_RAM_X_1_4_TB',
'units': 'CORE',
'id': 10195
},
'laborFee': '0',
'categories': [
{
'categoryCode': 'dedicated_virtual_hosts',
'id': 848,
'name': 'Dedicated Host'
}
]
}
],
'sendQuoteEmailFlag': '',
'packageId': 813,
'useHourlyPricing': True,
'preTaxRecurringMonthly': '0',
'message': '',
'preTaxRecurring': '3.164',
'primaryDiskPartitionId': '',
'locationObject': {
'id': 138124,
'name': 'dal05',
'longName': 'Dallas 5'
},
'taxCompletedFlag': False,
'isManagedOrder': '',
'imageTemplateId': '',
'postTaxRecurringMonthly': '0',
'resourceGroupTemplateId': '',
'postTaxSetup': '0',
'sshKeys': [],
'location': '138124',
'stepId': '',
'proratedInitialCharge': '0',
'totalRecurringTax': '0',
'paymentType': '',
'resourceGroupId': '',
'sourceVirtualGuestId': '',
'bigDataOrderFlag': False,
'extendedHardwareTesting': '',
'preTaxRecurringHourly': '3.164',
'postTaxRecurringHourly': '3.164',
'currencyShortName': 'USD',
'containerSplHash': '000000003699c54000007f38ef8b0102',
'proratedOrderTotal': '0',
'serverCoreCount': '',
'privateCloudOrderFlag': False,
'totalSetupTax': '0',
'quantity': 1
}
itemsLoadbal = [
{
"capacity": "0",
"description": "Load Balancer as a Service",
"id": 10043,
"keyName": "LOAD_BALANCER_AS_A_SERVICE",
"itemCategory": {
"categoryCode": "load_balancer_as_a_service",
"id": 1116,
"name": "Load Balancer As A Service",
},
"prices": [
{
"hourlyRecurringFee": "0",
"id": 199447,
"locationGroupId": '',
"recurringFee": "0",
}
]
},
{
"capacity": "0",
"description": "Load Balancer Uptime",
"id": 10785,
"keyName": "LOAD_BALANCER_UPTIME",
"itemCategory": {
"categoryCode": "load_balancer_uptime",
"id": 1119,
"name": "Load Balancer Uptime",
},
"prices": [
{
"hourlyRecurringFee": ".028",
"id": 205913,
"locationGroupId": 507,
}]}
]
regionsLoadbal = [{'description': 'WDC01 - Washington, DC - East Coast U.S.',
'keyname': 'WASHINGTON_DC',
'location': {'location': {'id': 37473,
'longName': 'Washington 1',
'name': 'wdc01',
"groups": [
{
"description": "Location Group 4",
"id": 507,
"locationGroupTypeId": 82,
"name": "Location Group 4",
"locationGroupType": {
"name": "PRICING"
}
},
{
"description": "COS Cross Region - EU",
"id": 1303,
"locationGroupTypeId": 82,
"name": "eu",
"locationGroupType": {
"name": "PRICING"
}
},
{
"description": "COS Regional Frankfurt",
"id": 1783,
"locationGroupTypeId": 82,
"name": "eu-de",
"locationGroupType": {
"name": "PRICING"
}
}
]
}},
'sortOrder': 10}]
getAllObjectsLoadbal = [
{
"id": 805,
"keyName": "LBAAS",
"name": "Load Balancer As A Service (LBaaS)",
"items": itemsLoadbal,
"regions": regionsLoadbal
}
]
getAllObjectsDH = [{
"subDescription": "Dedicated Host",
"name": "Dedicated Host",
"items": [{
"capacity": "56",
"description": "56 Cores X 242 RAM X 1.2 TB",
"bundleItems": [
{
"capacity": "1200",
"keyName": "1_4_TB_LOCAL_STORAGE_DEDICATED_HOST_CAPACITY",
"categories": [{
"categoryCode": "dedicated_host_disk"
}]
},
{
"capacity": "242",
"keyName": "242_GB_RAM",
"categories": [{
"categoryCode": "dedicated_host_ram"
}]
}
],
"prices": [
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2099",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.164",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200269,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": "",
"quantity": ""
},
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2161.97",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.258",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200271,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": 503,
"quantity": ""
}
],
"keyName": "56_CORES_X_242_RAM_X_1_4_TB",
"id": 10195,
"itemCategory": {
"categoryCode": "dedicated_virtual_hosts"
}
}],
"keyName": "DEDICATED_HOST",
"unitSize": "",
"regions": [{
"location": {
"locationPackageDetails": [{
"isAvailable": 1,
"locationId": 138124,
"packageId": 813
}],
"location": {
"statusId": 2,
"priceGroups": [{
"locationGroupTypeId": 82,
"description": "CDN - North America - Akamai",
"locationGroupType": {
"name": "PRICING"
},
"securityLevelId": "",
"id": 1463,
"name": "NORTH-AMERICA-AKAMAI"
}],
"id": 138124,
"name": "dal05",
"longName": "Dallas 5"
}
},
"keyname": "DALLAS05",
"description": "DAL05 - Dallas",
"sortOrder": 12
}],
"firstOrderStepId": "",
"id": 813,
"isActive": 1,
"description": "Dedicated Host"
}]
getAllObjectsDHGpu = [{
"subDescription": "Dedicated Host",
"name": "Dedicated Host",
"items": [{
"capacity": "56",
"description": "56 Cores x 360 RAM x 1.2 TB x 2 GPU P100 [encryption enabled]",
"bundleItems": [
{
"capacity": "1200",
"keyName": "1.2 TB Local Storage (Dedicated Host Capacity)",
"categories": [{
"categoryCode": "dedicated_host_disk"
}]
},
{
"capacity": "242",
"keyName": "2_GPU_P100_DEDICATED",
"hardwareGenericComponentModel": {
"capacity": "16",
"id": 849,
"hardwareComponentType": {
"id": 20,
"keyName": "GPU"
}
},
"categories": [{
"categoryCode": "dedicated_host_ram"
}]
}
],
"prices": [
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2099",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.164",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200269,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": "",
"quantity": ""
},
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2161.97",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.258",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200271,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": 503,
"quantity": ""
}
],
"keyName": "56_CORES_X_484_RAM_X_1_5_TB_X_2_GPU_P100",
"id": 10195,
"itemCategory": {
"categoryCode": "dedicated_virtual_hosts"
}
}],
"keyName": "DEDICATED_HOST",
"unitSize": "",
"regions": [{
"location": {
"locationPackageDetails": [{
"isAvailable": 1,
"locationId": 138124,
"packageId": 813
}],
"location": {
"statusId": 2,
"priceGroups": [{
"locationGroupTypeId": 82,
"description": "CDN - North America - Akamai",
"locationGroupType": {
"name": "PRICING"
},
"securityLevelId": "",
"id": 1463,
"name": "NORTH-AMERICA-AKAMAI"
}],
"id": 138124,
"name": "dal05",
"longName": "Dallas 5"
}
},
"keyname": "DALLAS05",
"description": "DAL05 - Dallas",
"sortOrder": 12
}],
"firstOrderStepId": "",
"id": 813,
"isActive": 1,
"description": "Dedicated Host"
}]
getRegions = [{
"description": "WDC07 - Washington, DC",
"keyname": "WASHINGTON07",
"location": {
"locationId": 2017603,
"location": {
"id": 2017603,
"longName": "Washington 7",
"name": "wdc07",
"priceGroups": [
{
"description": "COS Regional - US East",
"id": 1305,
"locationGroupTypeId": 82,
"name": "us-east",
"locationGroupType": {
"name": "PRICING"
}
}
]
}
},
"locations": [{
"location": {
"euCompliantFlag": False,
"id": 2017603,
"longName": "Washington 7",
"name": "wdc07",
"statusId": 2},
"locationPackageDetails": [{
"isAvailable": 1,
"locationId": 2017603,
"packageId": 46
}]
}]
}]
getItemPrices = [
{
"hourlyRecurringFee": ".093",
"id": 204015,
"recurringFee": "62",
"categories": [
{
"categoryCode": "guest_core"
}
],
"item": {
"description": "4 x 2.0 GHz or higher Cores",
"id": 859,
"keyName": "GUEST_CORES_4",
},
"pricingLocationGroup": {
"id": 503,
"locations": [
{
"id": 449610,
"longName": "Montreal 1",
"name": "mon01",
"statusId": 2,
"regions": [
{
"description": "MON01 - Montreal",
"keyname": "MONTREAL",
"sortOrder": 94
}
]
},
{
"id": 449618,
"longName": "Montreal 2",
"name": "mon02",
"statusId": 2
},
{
"id": 448994,
"longName": "Toronto 1",
"name": "tor01",
"statusId": 2
},
{
"id": 350993,
"longName": "Toronto 2",
"name": "tor02",
"statusId": 2
},
{
"id": 221894,
"longName": "Amsterdam 2",
"name": "ams02",
"statusId": 2,
"regions": [
{
"description": "AMS02 POP - Amsterdam",
"keyname": "AMSTERDAM02",
"sortOrder": 12
}
]
},
{
"id": 265592,
"longName": "Amsterdam 1",
"name": "ams01",
"statusId": 2
},
{
"id": 814994,
"longName": "Amsterdam 3",
"name": "ams03",
"statusId": 2
}
]
}
},
{
"hourlyRecurringFee": ".006",
"id": 204663,
"recurringFee": "4.1",
"item": {
"description": "100 GB (LOCAL)",
"id": 3899,
"keyName": "GUEST_DISK_100_GB_LOCAL_3",
},
"pricingLocationGroup": {
"id": 503,
"locations": [
{
"id": 449610,
"longName": "Montreal 1",
"name": "mon01",
"statusId": 2
},
{
"id": 449618,
"longName": "Montreal 2",
"name": "mon02",
"statusId": 2
},
{
"id": 448994,
"longName": "Toronto 1",
"name": "tor01",
"statusId": 2
},
{
"id": 350993,
"longName": "Toronto 2",
"name": "tor02",
"statusId": 2
},
{
"id": 221894,
"longName": "Amsterdam 2",
"name": "ams02",
"statusId": 2
},
{
"id": 265592,
"longName": "Amsterdam 1",
"name": "ams01",
"statusId": 2
},
{
"id": 814994,
"longName": "Amsterdam 3",
"name": "ams03",
"statusId": 2
}
]
}
},
{
"hourlyRecurringFee": ".217",
"id": 204255,
"recurringFee": "144",
"item": {
"description": "16 GB ",
"id": 1017,
"keyName": "RAM_16_GB",
},
"pricingLocationGroup": {
"id": 503,
"locations": [
{
"id": 449610,
"longName": "Montreal 1",
"name": "mon01",
"statusId": 2
},
{
"id": 449618,
"longName": "Montreal 2",
"name": "mon02",
"statusId": 2
},
{
"id": 448994,
"longName": "Toronto 1",
"name": "tor01",
"statusId": 2
},
{
"id": 350993,
"longName": "Toronto 2",
"name": "tor02",
"statusId": 2
},
{
"id": 221894,
"longName": "Amsterdam 2",
"name": "ams02",
"statusId": 2
},
{
"id": 265592,
"longName": "Amsterdam 1",
"name": "ams01",
"statusId": 2
},
{
"id": 814994,
"longName": "Amsterdam 3",
"name": "ams03",
"statusId": 2
}
]
}
}
]
getActivePresets = [
{
"description": "M1.64x512x25",
"id": 799,
"isActive": "1",
"keyName": "M1_64X512X25",
"name": "M1.64x512x25",
"packageId": 835
},
{
"description": "M1.56x448x100",
"id": 797,
"isActive": "1",
"keyName": "M1_56X448X100",
"name": "M1.56x448x100",
"packageId": 835
},
{
"description": "M1.64x512x100",
"id": 801,
"isActive": "1",
"keyName": "M1_64X512X100",
"name": "M1.64x512x100",
"packageId": 835
}
]
getAccountRestrictedActivePresets = []
RESERVED_CAPACITY = [{"id": 1059}]
getItems_RESERVED_CAPACITY = [
{
'id': 12273,
'keyName': 'B1_1X2_1_YEAR_TERM',
'description': 'B1 1x2 1 year term',
'capacity': 12,
'itemCategory': {
'categoryCode': 'reserved_capacity',
'id': 2060,
'name': 'Reserved Capacity',
'quantityLimit': 20,
'sortOrder': ''
},
'prices': [
{
'currentPriceFlag': '',
'hourlyRecurringFee': '.032',
'id': 217561,
'itemId': 12273,
'laborFee': '0',
'locationGroupId': '',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'setupFee': '0',
'sort': 0,
'tierMinimumThreshold': '',
'categories': [
{
'categoryCode': 'reserved_capacity',
'id': 2060,
'name': 'Reserved Capacity',
'quantityLimit': 20,
'sortOrder': ''
}
]
}
]
}
]
getItems_1_IPV6_ADDRESS = [
{
'id': 4097,
'keyName': '1_IPV6_ADDRESS',
'itemCategory': {
'categoryCode': 'pri_ipv6_addresses',
'id': 325,
'name': 'Primary IPv6 Addresses',
'quantityLimit': 0,
'sortOrder': 34
},
'prices': [
{
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 17129,
'itemId': 4097,
'laborFee': '0',
'locationGroupId': '',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0,
'tierMinimumThreshold': '',
'categories': [
{
'categoryCode': 'pri_ipv6_addresses',
'id': 325,
'name': 'Primary IPv6 Addresses',
'quantityLimit': 0,
'sortOrder': 34
}
]
}
]
}
]
getObject = {
'id': 200,
'regions': [{'description': 'WDC01 - Washington, DC - East Coast U.S.',
'keyname': 'WASHINGTON_DC',
'location': {'location': {'id': 37473,
'longName': 'Washington 1',
'name': 'wdc01'}},
'sortOrder': 10}],
'accountRestrictedActivePresets': [],
'activePresets': [
{
'description': 'AC2.8x60x25',
'id': 861,
'isActive': '1',
'keyName': 'AC2_8X60X25',
'name': 'AC2.8x60x25',
'packageId': 835
},
{
'description': 'AC2.8x60x100',
'id': 863,
'isActive': '1',
'keyName': 'AC2_8X60X100',
'name': 'AC2.8x60x100',
'packageId': 835
}],
"items": [{
"capacity": "56",
"description": "56 Cores x 360 RAM x 1.2 TB x 2 GPU P100 [encryption enabled]",
"bundleItems": [
{
"capacity": "1200",
"keyName": "1.2 TB Local Storage (Dedicated Host Capacity)",
"categories": [{
"categoryCode": "dedicated_host_disk"
}]
},
{
"capacity": "242",
"keyName": "2_GPU_P100_DEDICATED",
"hardwareGenericComponentModel": {
"capacity": "16",
"id": 849,
"hardwareComponentType": {
"id": 20,
"keyName": "GPU"
}
},
"categories": [{
"categoryCode": "dedicated_host_ram"
}, {
"capacity": "2",
"description": "2 x 2.0 GHz or higher Cores",
"keyName": "GUEST_CORES_2",
"attributes": [
{
"id": 8261,
"attributeTypeKeyName": "ORDER_SAVES_USAGE_FEES"
}
],
"itemCategory": {
"categoryCode": "guest_core",
"id": 80
}}]
}
],
"prices": [
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2099",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.164",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200269,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": "",
"quantity": ""
},
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2161.97",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.258",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200271,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": 503,
"quantity": ""
}
],
"keyName": "56_CORES_X_484_RAM_X_1_5_TB_X_2_GPU_P100",
"id": 10195,
"itemCategory": {
"categoryCode": "dedicated_virtual_hosts"
}
}]}
| [
[
[
20,
34
],
[
27658,
27672
]
],
[
[
6224,
6242
]
],
[
[
12216,
12235
]
],
[
[
15477,
15489
]
],
[
[
23970,
23987
]
],
[
[
26568,
26581
],
[
27473,
27486
]
],
[
[
26989,
27002
],
[
27528,
27541
]
],
[
[
27432,
27445
]
],
[
[
29391,
29399
]
],
[
[
37558,
37576
]
],
[
[
39427,
39437
]
],
[
[
41077,
41090
]
],
[
[
45796,
45808
],
[
49329,
49341
]
],
[
[
46818,
46832
],
[
49362,
49376
]
],
[
[
49180,
49200
]
],
[
[
49386,
49401
]
],
[
[
52312,
52330
]
],
[
[
55583,
55593
]
],
[
[
56580,
56593
]
],
[
[
62256,
62272
]
],
[
[
62849,
62882
]
],
[
[
62889,
62906
]
],
[
[
62924,
62950
]
],
[
[
64126,
64149
]
],
[
[
65291,
65300
]
]
] |
f = lambda x: x + 1
map(f, [1, 2, 3, 4]) | [
[
[
0,
1
],
[
25,
26
]
]
] |
import os
import time
from slackclient import SlackClient
import bot_id
# Instructor and student imports
import wray.slacklib
import joe.slacklib
import chris.slacklib
# constants
try:
AT_BOT = "<@" + bot_id.get_id() + ">"
except TypeError:
pass
# instantiate client
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
Need to determine an algorithm for student overloaded commands.
"""
response = wray.slacklib.handle_command(command)
response += joe.slacklib.handle_command(command)
response += chris.slacklib.handle_command(command)
print("["+response+"]")
if len(response) == 0:
response = "Why thank you, I don't know what else to say."
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
print(output_list)
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
print(command,channel)
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| [
[
[
7,
9
],
[
306,
308
]
],
[
[
17,
21
],
[
2144,
2148
]
],
[
[
46,
57
],
[
294,
305
]
],
[
[
66,
72
],
[
208,
214
]
],
[
[
114,
127
],
[
678,
682
]
],
[
[
135,
147
],
[
732,
735
]
],
[
[
155,
169
],
[
785,
790
]
],
[
[
192,
198
],
[
1494,
1500
],
[
1634,
1640
]
],
[
[
279,
291
],
[
1838,
1850
],
[
1987,
1999
],
[
961,
973
]
],
[
[
347,
361
],
[
2099,
2113
]
],
[
[
1081,
1099
],
[
1968,
1986
]
],
[
[
1759,
1779
],
[
2155,
2175
]
],
[
[
1949,
1956
],
[
2030,
2037
],
[
2062,
2069
],
[
2114,
2121
]
],
[
[
1958,
1965
],
[
2038,
2045
],
[
2074,
2081
],
[
2123,
2130
]
]
] |
"""Auto-generated file, do not edit by hand. PH metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PH = PhoneMetadata(id='PH', country_code=63, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='1800\\d{7,9}|(?:2|[89]\\d{4})\\d{5}|[2-8]\\d{8}|[28]\\d{7}', possible_length=(6, 8, 9, 10, 11, 12, 13), possible_length_local_only=(4, 5, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:(?:2[3-8]|3[2-68]|4[2-9]|5[2-6]|6[2-58]|7[24578])\\d{3}|88(?:22\\d\\d|42))\\d{4}|2\\d{5}(?:\\d{2})?|8[2-8]\\d{7}', example_number='21234567', possible_length=(6, 8, 9, 10), possible_length_local_only=(4, 5, 7)),
mobile=PhoneNumberDesc(national_number_pattern='(?:8(?:1[37]|9[5-8])|9(?:0[5-9]|1[0-24-9]|[2357]\\d|4[2-9]|6[0-35-9]|8[189]|9[1-9]))\\d{7}', example_number='9051234567', possible_length=(10,)),
toll_free=PhoneNumberDesc(national_number_pattern='1800\\d{7,9}', example_number='180012345678', possible_length=(11, 12, 13)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['2'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d{4})(\\d{4,6})', format='\\1 \\2', leading_digits_pattern=['3(?:23|39|46)|4(?:2[3-6]|[35]9|4[26]|76)|544|88[245]|(?:52|64|86)2', '3(?:230|397|461)|4(?:2(?:35|[46]4|51)|396|4(?:22|63)|59[347]|76[15])|5(?:221|446)|642[23]|8(?:622|8(?:[24]2|5[13]))'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d{5})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['346|4(?:27|9[35])|883', '3469|4(?:279|9(?:30|56))|8834'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[3-7]|8[2-8]'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[89]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{4})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1']),
NumberFormat(pattern='(\\d{4})(\\d{1,2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['1'])])
| [
[
[
88,
100
],
[
1104,
1116
],
[
1241,
1253
],
[
1390,
1402
],
[
1716,
1728
],
[
1909,
1921
],
[
2058,
2070
],
[
2221,
2233
],
[
2374,
2386
],
[
2484,
2496
]
],
[
[
102,
117
],
[
238,
253
],
[
437,
452
],
[
704,
719
],
[
905,
920
]
],
[
[
119,
132
],
[
154,
167
]
],
[
[
134,
151
]
]
] |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils.timezone import now as tz_now
@login_required
def start_page(request):
# dummy view to illustrate all custom template filters and tags
obj = {
"created": tz_now() - timedelta(days=3),
"content": f"""
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
<figure>
<img src="{settings.STATIC_URL}site/img/logo.svg" alt="" />
<figcaption>Logo</figcaption>
</figure>
<p>Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?</p>
""",
"website": "https://docs.djangoproject.com/en/dev/howto/custom-template-tags/",
"content_to_parse": u"""
{% if request.user.is_authenticated %}
Hello, {{ request.user.username }}!
{% else %}
Hello anonymous visitor!
{% endif %}
""",
}
return render(request, "index.html", {
"object": obj,
}) | [
[
[
47,
63
]
],
[
[
85,
94
],
[
424,
433
]
],
[
[
120,
128
],
[
969,
977
]
],
[
[
172,
186
],
[
274,
288
]
],
[
[
216,
222
],
[
2314,
2320
]
],
[
[
257,
270
],
[
413,
419
]
],
[
[
293,
303
]
]
] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import inspect
from athena.onnx import constants
class athena_op:
_OPSETS = collections.OrderedDict()
_MAPPING = None
def __init__(self, name,onnx_op=None, domain=constants.ONNX_DOMAIN, **kwargs):
if not isinstance(name, list):
name = [name]
self.name = name
if not isinstance(onnx_op,list):
onnx_op=[onnx_op]*len(name)
self.onnx_op=onnx_op
self.domain = domain
self.kwargs = kwargs
def __call__(self, func):
opset = athena_op._OPSETS.get(self.domain)
if not opset:
opset = []
athena_op._OPSETS[self.domain] = opset
for k, v in inspect.getmembers(func, inspect.ismethod):
if k.startswith("version_"):
version = int(k.replace("version_", ""))
while version >= len(opset):
opset.append({})
opset_dict = opset[version]
for i,name in enumerate(self.name):
opset_dict[name] = (v,self.onnx_op[i], self.kwargs)
return func
@staticmethod
def get_opsets():
return athena_op._OPSETS
@staticmethod
def create_mapping(max_onnx_opset_version):
mapping = {constants.ONNX_DOMAIN: max_onnx_opset_version}
ops_mapping = {}
for domain, opsets in athena_op.get_opsets().items():
for target_opset, op_map in enumerate(opsets):
m = mapping.get(domain)
if m:
if target_opset <= m and op_map:
ops_mapping.update(op_map)
athena_op._MAPPING = ops_mapping
return ops_mapping
@staticmethod
def find_effective_op(name):
"""Find the effective version of an op create_mapping.
This is used if we need to compose ops from other ops where we'd need to find the
op that is doing to be used in the final graph, for example there is a custom op
that overrides a onnx op ...
:param name: The operator name.
"""
map_info = athena_op._MAPPING.get(name)
if map_info is None:
return None
return map_info
| [
[
[
122,
137
]
],
[
[
161,
169
]
],
[
[
193,
207
]
],
[
[
231,
247
]
],
[
[
256,
267
],
[
352,
363
]
],
[
[
275,
282
],
[
940,
947
],
[
965,
972
]
],
[
[
308,
317
],
[
448,
457
],
[
1512,
1521
]
],
[
[
326,
335
],
[
789,
798
],
[
881,
890
],
[
1408,
1417
],
[
1614,
1623
],
[
1880,
1889
],
[
2352,
2361
]
]
] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import QComponent
class MyQComponent1(QComponent):
"""Demonstration1 - Straight segment with variable width/length"""
### def __init__() <- comes from QComponent
### Initiaizes base variables such as self.id, self.name and self.options
### Also launches the first execution of make()
### def rebuild() <- comes from QComponent
### Clear output from previous runs of make() (geom/pin/net) and re-runs it
def make(self):
"""calculates the geometries of the QComponent"""
rect = draw.rectangle(0.5, 0.1, 0, 0) #width, height, pos_x, pos_y
# add_geometry() expects shapely, thus the use of drawn module above
self.add_qgeometry('poly', {'my_polygon': rect},
layer=1,
subtract=False)
self.add_pin('in', rect.exterior.coords[:-3:-1],
0.1) #name, tangent, width
class MyQComponent2(QComponent):
"""Demonstration2 - Straight segment with variable width/length"""
# Your knobs to modify the cell behavior
default_options = Dict(width='0.5mm',
height='0.1mm',
pos_x='0mm',
pos_y='0mm',
layer='1')
"""Default drawing options"""
def make(self):
"""calculates the geometries of the QComponent"""
p = self.parse_options(
) # short-handle alias for the options interpreter
rect = draw.rectangle(p.width, p.height, p.pos_x, p.pos_y)
self.add_qgeometry('poly', {'my_polygon': rect},
layer=p.layer,
subtract=False)
self.add_pin('in', rect.exterior.coords[:-3:-1], p.height)
class MyQComponent3(QComponent):
"""Demonstration2 - Straight segment with variable width/length"""
default_options = Dict(width='0.5mm',
height='0.1mm',
pos_x='0mm',
pos_y='0mm',
layer='1')
"""Default drawing options"""
# Name prefix of component + import of renderer-specific default_options
component_metadata = Dict(
short_name='Trace',
_qgeometry_table_path='False', #wirebonds
_qgeometry_table_poly='True',
_qgeometry_table_junction='False') #gds imports and analysis inputs
"""Component metadata"""
def make(self):
"""calculates the geometries of the QComponent"""
p = self.parse_options() # short-handle alias. Options interpreter
rect = draw.rectangle(p.width, p.height, p.pos_x, p.pos_y)
self.add_qgeometry('poly', {'my_polygon': rect},
layer=p.layer,
subtract=False)
self.add_pin('in', rect.exterior.coords[:-3:-1], p.height)
class MyQComponent4(QComponent):
"""Demonstration3 - Straight segment with variable width/length"""
default_options = Dict(width='0.5mm',
height='0.1mm',
gap='0.02mm',
pos_x='0mm',
pos_y='0mm',
layer='1')
"""Default drawing options"""
# Name prefix of component + import of renderer-specific default_options
component_metadata = Dict(
short_name='Trace',
_qgeometry_table_path='True', #wirebonds
_qgeometry_table_poly='False',
_qgeometry_table_junction='False') #gds
"""Component metadata"""
def make(self):
"""calculates the geometries of the QComponent"""
p = self.parse_options()
line = draw.LineString([(-p.width / 2, 0), (p.width / 2, 0)])
line = draw.translate(line, p.pos_x, p.pos_y)
self.add_qgeometry('path', {'trace': line},
width=p.height,
layer=p.layer,
subtract=False)
line2 = draw.LineString([((-p.width / 2) - 2 * p.gap, 0),
((p.width / 2) + 2 * p.gap, 0)])
line2 = draw.translate(line2, p.pos_x, p.pos_y)
self.add_qgeometry('path', {'cut': line2},
width=p.height + 2 * p.gap,
layer=p.layer,
subtract=True)
self.add_pin('in', line.coords[::-1], p.height, input_as_norm=True)
| [
[
[
534,
538
],
[
1108,
1112
],
[
2064,
2068
],
[
3172,
3176
],
[
4250,
4254
],
[
4320,
4324
],
[
4555,
4559
],
[
4687,
4691
]
],
[
[
540,
544
],
[
1662,
1666
],
[
2454,
2458
],
[
2772,
2776
],
[
3562,
3566
],
[
3921,
3925
]
],
[
[
584,
594
],
[
617,
627
],
[
1510,
1520
],
[
2347,
2357
],
[
3455,
3465
]
],
[
[
603,
616
]
],
[
[
1496,
1509
]
],
[
[
2333,
2346
]
],
[
[
3441,
3454
]
]
] |
import math
from typing import Dict, Union, List
import torch
from torch import nn, Tensor
from .. import properties as p
def fix_msk(mol: Dict[str, Tensor], idx: Tensor):
_, atm, dim = mol[p.pos].size()
msk = torch.zeros([atm, dim], dtype=torch.bool, device=idx.device)
msk[idx, :] = True
return msk
class Fix(nn.Module):
idx: Tensor
def __init__(self, idx: Union[Tensor, List[int]]):
super().__init__()
if isinstance(idx, list):
idx = torch.tensor(idx)
self.register_buffer('idx', idx)
def forward(self, mol: Dict[str, Tensor]):
out = mol.copy()
msk = fix_msk(mol, self.idx)[None, :, :]
if p.fix_msk not in out:
out[p.fix_msk] = msk
else:
out[p.fix_msk] = out[p.fix_msk] | msk
return out
class FixGen(nn.Module):
pbc: Tensor
idx: Tensor
def __init__(self, idx: Union[Tensor, List[int]], num_dim: int):
super().__init__()
if isinstance(idx, list):
idx = torch.tensor(idx, dtype=torch.long)
n = idx.numel() * num_dim
self.register_buffer('idx', idx)
self.register_buffer('pbc', torch.ones(n) * math.inf)
def forward(self, mol: Dict[str, Tensor]):
msk = fix_msk(mol, self.idx)
return mol[p.pos][:, msk]
| [
[
[
7,
11
],
[
1195,
1199
]
],
[
[
31,
35
],
[
141,
145
],
[
581,
585
],
[
1233,
1237
]
],
[
[
37,
42
],
[
388,
393
],
[
912,
917
]
],
[
[
44,
48
],
[
402,
406
],
[
926,
930
]
],
[
[
56,
61
],
[
220,
225
],
[
250,
255
],
[
494,
499
],
[
1032,
1037
],
[
1056,
1061
],
[
1179,
1184
]
],
[
[
80,
82
],
[
331,
333
],
[
839,
841
]
],
[
[
84,
90
],
[
352,
358
],
[
860,
866
],
[
876,
882
],
[
151,
157
],
[
165,
171
],
[
394,
400
],
[
591,
597
],
[
918,
924
],
[
1243,
1249
]
],
[
[
106,
121
],
[
196,
197
],
[
686,
687
],
[
724,
725
],
[
788,
789
],
[
771,
772
],
[
1309,
1310
]
],
[
[
128,
135
],
[
640,
647
],
[
1267,
1274
]
],
[
[
327,
330
]
],
[
[
832,
838
]
]
] |
import numpy as np
import sys
sys.dont_write_bytecode = True
def ExtractCameraPose(E, K):
U, S, V_T = np.linalg.svd(E)
W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
# print("E svd U", U)
# print("E svd S", S)
# print("E svd U[:, 2]", U[:, 2])
R = []
C = []
R.append(np.dot(U, np.dot(W, V_T)))
R.append(np.dot(U, np.dot(W, V_T)))
R.append(np.dot(U, np.dot(W.T, V_T)))
R.append(np.dot(U, np.dot(W.T, V_T)))
C.append(U[:, 2])
C.append(-U[:, 2])
C.append(U[:, 2])
C.append(-U[:, 2])
for i in range(4):
if (np.linalg.det(R[i]) < 0):
R[i] = -R[i]
C[i] = -C[i]
return R, C
| [
[
[
7,
18
],
[
109,
111
],
[
134,
136
],
[
305,
307
],
[
315,
317
],
[
345,
347
],
[
355,
357
],
[
385,
387
],
[
395,
397
],
[
427,
429
],
[
437,
439
],
[
582,
584
]
],
[
[
26,
29
],
[
31,
34
]
],
[
[
67,
84
]
]
] |
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new token for a user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authenticated user"""
# authentication_classes would have populated the request with the
# authenticated user if they provided a valid token
return self.request.user
| [
[
[
27,
35
],
[
262,
270
],
[
576,
584
]
],
[
[
37,
51
],
[
717,
731
]
],
[
[
53,
64
],
[
780,
791
]
],
[
[
108,
123
],
[
391,
406
]
],
[
[
160,
172
],
[
515,
527
]
],
[
[
203,
217
],
[
352,
366
],
[
672,
686
]
],
[
[
219,
238
],
[
472,
491
]
],
[
[
247,
261
]
],
[
[
375,
390
]
],
[
[
561,
575
]
]
] |
"""myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
[
[
657,
662
],
[
728,
733
]
],
[
[
687,
691
],
[
713,
717
]
],
[
[
693,
704
]
]
] |
a = [0, 1, 2]
b = [0, 1]
for a, b in zip(a, b):
print(a, b) | [
[
[
0,
1
],
[
41,
42
]
],
[
[
14,
15
],
[
44,
45
]
],
[
[
29,
30
],
[
58,
59
]
],
[
[
32,
33
],
[
61,
62
]
]
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 7 09:36:45 2019
@author: MyPC
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import math
import pymssql
import numpy as np
import copy
import re
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from pyecharts import Map, Geo , Timeline
def get_data_signalmachine():
df = pd.read_excel('Data.xlsx' , sheet_name='example')
# df.fillna(0 , inplace = True)
# df.set_index('year' , inplace = True)
df.drop(columns = ['NO' , '首次售出年份' , '总计'] , inplace = True)
df.rename(columns = {'行标签':'city'} , inplace = True)
df.set_index('city' , inplace = True)
df = df.T
df.rename(columns = {'合计' : 'total'} , inplace = True)
# print(df)
return df
def plot_map(df):
# maptype='china' 只显示全国直辖市和省级
# 数据只能是省名和直辖市的名称
# province_distribution = {'青岛': 22, '龙口': 37.56, '河北': 21, '辽宁': 12, '江西': 6, '上海': 20, '安徽': 10, '江苏': 16, '湖南': 9, '浙江': 13, '海南': 2, '广东': 22, '湖北': 8, '黑龙江': 11, '澳门': 1, '陕西': 11, '四川': 7, '内蒙古': 3, '重庆': 3, '云南': 6, '贵州': 2, '吉林': 3, '山西': 12, '山东': 11, '福建': 4, '青海': 1, '舵主科技,质量保证': 1, '天津': 1, '其他': 1}
# provice=list(province_distribution.keys())
# values=list(province_distribution.values())
years = list(df.index)
geos = []
timeline = Timeline(width=1700,height=900,is_auto_play=True, timeline_bottom=-10,timeline_symbol_size=20,timeline_play_interval=400,timeline_left=20,timeline_right=100 , \
is_timeline_show = False )
for index in range(len(years)):
cities = list(df.columns)
cities.remove('total')
values = list(df.loc[years[index] , :])
total_num = values[-1]
del(values[-1])
# print(cities)
# print(values)
geos.append(Geo( str(int(total_num)), title_top="10%" , title_text_size=50 , subtitle = years[index] +" , subtitle", \
subtitle_text_size = 23 , subtitle_color="white", \
title_color="red", title_pos="center", width=1200, height=600, \
background_color='#404a59'))
# type="effectScatter", is_random=True, effect_scale=5 使点具有发散性
geos[index].add("title level1", cities, values, type="effectScatter", maptype='china' , is_random=True, effect_scale=3, is_selected = True,is_toolbox_show = True ,is_more_utils =True,\
visual_text_color="#fff", symbol_size=10, is_label_show = True , legend_orient = 'left' ,is_legend_show = False, legend_top = 'bottom' , label_formatter = '{b}' , \
is_visualmap=True, is_roam=True , label_text_color="#00FF00" , is_piecewise=True, label_text_size = 7,visual_range=[1, 300] , \
geo_cities_coords = {'赣江': [115.934192 , 28.826235] , '红河州' : [103.381549,23.369996] , '蒙自' : [103.371546,23.40208] , '海安' : [120.469259,32.544553] , \
'济阳' : [117.023094,36.965519] , '库车' : [82.970183,41.733785] , '文山-砚山' : [104.334442,23.621612] , '文安':[116.455985,38.891083] , '罗平':[104.309188,24.890519] , \
'宣城' : [118.762662,30.957007] , '古田' : [118.747401,26.596702] , '泗阳':[118.699691,33.723524] , } , \
pieces=[
{"min":0.1, "max": 50 , "label": "0-50"},
{"min": 51, "max": 100 , "label": "51-100"},
{"min": 101, "max": 200 , "label": "101-200"},
{"min":201, "max": 500, "label": "201-500"},
{"min":500, "max": 2900, "label": ">500"}, ] )
geos[index].show_config()
geos[index].render("xxxx售出数量.html")
# 时间轴定义
timeline.add(geos[index],years[index] )
timeline.render('final_graph.html')
def main():
df = get_data_signalmachine()
# print(df)
plot_map(df)
if __name__ == "__main__":
main() | [
[
[
96,
108
],
[
407,
409
]
],
[
[
117,
141
]
],
[
[
150,
160
]
],
[
[
170,
174
]
],
[
[
183,
190
]
],
[
[
199,
210
]
],
[
[
219,
223
]
],
[
[
232,
234
]
],
[
[
256,
269
]
],
[
[
304,
320
]
],
[
[
344,
347
]
],
[
[
349,
352
],
[
1854,
1857
]
],
[
[
355,
363
],
[
1370,
1378
]
],
[
[
371,
393
],
[
3748,
3770
]
],
[
[
821,
829
],
[
3795,
3803
]
],
[
[
3730,
3734
],
[
3845,
3849
]
]
] |
"""
This file contains the implementation of the main class object: anaRDPacct --- an analytical moment accountant
that keeps track the effects of a hetereogeneous sequence of randomized algorithms using the RDP technique.
In particular it supports amplification of RDP by subsampling without replacement and the amplification of RDP
by poisson sampling, but unfortunately not (yet) together.
"""
import numpy as np
from scipy.optimize import minimize_scalar
import sys
sys.path.append('..')
import autodp
from autodp import utils, rdp_bank
from autodp.privacy_calibrator import subsample_epsdelta
import scipy
import math
def general_upperbound(func, mm, prob):
"""
:param func:
:param mm: alpha in RDP
:param prob: sample probability
:return: the upperbound in theorem 1 in 2019 ICML,could be applied for general case(including poisson distribution)
k_approx = 100 k approximation is applied here
"""
def cgf(x):
return (x - 1) * func(x)
if np.isinf(func(mm)):
return np.inf
if mm == 1 or mm == 0:
return 0
cur_k = np.minimum(50, mm - 1) # choose small k-approx for general upperbound (here is 50) in case of scipy-accuracy
log_term_1 = mm * np.log(1 - prob)
#logBin = utils.get_binom_coeffs(mm)
log_term_2 = np.log(3) - func(mm) + mm * utils.stable_logsumexp_two(np.log(1 - prob), np.log(prob) + func(mm))
neg_term_3 = [np.log(scipy.special.comb(mm,l)) + np.log(3) + (mm - l) * np.log(1 - prob) + l * np.log(prob) +
utils.stable_log_diff_exp((l - 1) * func(mm), cgf(l))[1] for l in
range(3, cur_k + 1)]
neg_term_4 = np.log(mm*(mm - 1)/2) + 2 * np.log(prob) + (mm - 2) * np.log(
1 - prob) + utils.stable_log_diff_exp(np.log(3) + func(mm), func(2))[1]
neg_term_5 = np.log(2) + np.log(prob) + np.log(mm) + (mm - 1) * np.log(1 - prob)
neg_term_6 = mm * np.log(1 - prob) + np.log(3) - func(mm)
pos_term = utils.stable_logsumexp([log_term_1, log_term_2])
neg_term_3.append(neg_term_4)
neg_term_3.append(neg_term_5)
neg_term_3.append(neg_term_6)
neg_term = utils.stable_logsumexp(neg_term_3)
bound = utils.stable_log_diff_exp(pos_term, neg_term)[1]
return bound
def fast_subsampled_cgf_upperbound(func, mm, prob, deltas_local):
# evaulate the fast CGF bound for the subsampled mechanism
# func evaluates the RDP of the base mechanism
# mm is alpha. NOT lambda.
return np.inf
if np.isinf(func(mm)):
return np.inf
if mm == 1:
return 0
secondterm = np.minimum(np.minimum((2) * np.log(np.exp(func(np.inf)) - 1)
+ np.minimum(func(2), np.log(4)),
np.log(2) + func(2)),
np.log(4) + 0.5 * deltas_local[int(2 * np.floor(2 / 2.0)) - 1]
+ 0.5 * deltas_local[int(2 * np.ceil(2 / 2.0)) - 1]
) + 2 * np.log(prob) + np.log(mm) + np.log(mm - 1) - np.log(2)
if mm == 2:
return utils.stable_logsumexp([0, secondterm])
# approximate the remaining terms using a geometric series
logratio1 = np.log(prob) + np.log(mm) + func(mm)
logratio2 = logratio1 + np.log(np.exp(func(np.inf)) - 1)
logratio = np.minimum(logratio1, logratio2)
if logratio1 > logratio2:
coeff = 1
else:
coeff = 2
if mm == 3:
return utils.stable_logsumexp([0, secondterm, np.log(coeff) + 3 * logratio])
# Calculate the sum of the geometric series starting from the third term. This is a total of mm-2 terms.
if logratio < 0:
geometric_series_bound = np.log(coeff) + 3 * logratio - np.log(1 - np.exp(logratio)) \
+ np.log(1 - np.exp((mm - 2) * logratio))
elif logratio > 0:
geometric_series_bound = np.log(coeff) + 3 * logratio + (mm-2) * logratio - np.log(np.exp(logratio) - 1)
else:
geometric_series_bound = np.log(coeff) + np.log(mm - 2)
# we will approximate using (1+h)^mm
logh1 = np.log(prob) + func(mm - 1)
logh2 = logh1 + np.log(np.exp(func(np.inf)) - 1)
binomial_series_bound1 = np.log(2) + mm * utils.stable_logsumexp_two(0, logh1)
binomial_series_bound2 = mm * utils.stable_logsumexp_two(0, logh2)
tmpsign, binomial_series_bound1 \
= utils.stable_sum_signed(True, binomial_series_bound1, False, np.log(2)
+ utils.stable_logsumexp([0, logh1 + np.log(mm), 2 * logh1 + np.log(mm)
+ np.log(mm - 1) - np.log(2)]))
tmpsign, binomial_series_bound2 \
= utils.stable_sum_signed(True, binomial_series_bound2, False,
utils.stable_logsumexp([0, logh2 + np.log(mm), 2 * logh2 + np.log(mm)
+ np.log(mm - 1) - np.log(2)]))
remainder = np.min([geometric_series_bound, binomial_series_bound1, binomial_series_bound2])
return utils.stable_logsumexp([0, secondterm, remainder])
def fast_poission_subsampled_cgf_upperbound(func, mm, prob):
# evaulate the fast CGF bound for the subsampled mechanism
# func evaluates the RDP of the base mechanism
# mm is alpha. NOT lambda.
if np.isinf(func(mm)):
return np.inf
if mm == 1:
return 0
# Bound #1: log [ (1-\gamma + \gamma e^{func(mm)})^mm ]
bound1 = mm * utils.stable_logsumexp_two(np.log(1-prob), np.log(prob) + func(mm))
# Bound #2: log [ (1-gamma)^alpha E [ 1 + gamma/(1-gamma) E[p/q]]^mm ]
# log[ (1-gamma)^\alpha { 1 + alpha gamma / (1-gamma) + gamma^2 /(1-gamma)^2 * alpha(alpha-1) /2 e^eps(2))
# + alpha \choose 3 * gamma^3 / (1-gamma)^3 / e^(-2 eps(alpha)) * (1 + gamma /(1-gamma) e^{eps(alpha)}) ^ (alpha - 3) }
# ]
if mm >= 3:
bound2 = utils.stable_logsumexp([mm * np.log(1-prob), (mm-1) * np.log(1-prob) + np.log(mm) + np.log(prob),
(mm-2)*np.log(1-prob) + 2 * np.log(prob) + np.log(mm) + np.log(mm-1) + func(2),
np.log(mm) + np.log(mm-1) + np.log(mm-2) - np.log(3*2) + 3 * np.log(prob)
+ (mm-3)*np.log(1-prob) + 2 * func(mm) +
(mm-3) * utils.stable_logsumexp_two(0, np.log(prob) - np.log(1-prob) + func(mm))])
else:
bound2 = bound1
#print('www={} func={} mm={}'.format(np.exp(func(mm))-1),func, mm)
#print('bound1 ={} bound2 ={}'.format(bound1,bound2))
return np.minimum(bound1,bound2)
def fast_k_subsample_upperbound(func, mm, prob, k):
"""
:param func:
:param mm:
:param prob: sample probability
:param k: approximate term
:return: k-term approximate upper bound in therorem 11 in ICML-19
"""
def cgf(x):
return (x - 1) * func(x)
if np.isinf(func(mm)):
return np.inf
if mm == 1:
return 0
#logBin = utils.get_binom_coeffs(mm)
cur_k = np.minimum(k, mm - 1)
if (2 * cur_k) >= mm:
exact_term_1 = (mm - 1) * np.log(1 - prob) + np.log(mm * prob - prob + 1)
exact_term_2 = [np.log(scipy.special.comb(mm,l)) + (mm - l) * np.log(1 - prob) + l * np.log(prob) + cgf(l) for l in
range(2, mm + 1)]
exact_term_2.append(exact_term_1)
bound = utils.stable_logsumexp(exact_term_2)
return bound
s, mag1 = utils.stable_log_diff_exp(0, -func(mm - cur_k))
new_log_term_1 = np.log(1 - prob) * mm + mag1
new_log_term_2 = -func(mm - cur_k) + mm * utils.stable_logsumexp_two(np.log(1 - prob),
np.log(prob) + func(mm - cur_k))
new_log_term_3 = [np.log(scipy.special.comb(mm,l)) + (mm - l) * np.log(1 - prob) + l * np.log(prob) +
utils.stable_log_diff_exp((l - 1) * func(mm - cur_k), cgf(l))[1] for l in
range(2, cur_k + 1)]
if len(new_log_term_3) > 0:
new_log_term_3 = utils.stable_logsumexp(new_log_term_3)
else:
return utils.stable_logsumexp_two(new_log_term_1, new_log_term_2)
new_log_term_4 = [np.log(scipy.special.comb(mm,mm-l)) + (mm - l) * np.log(1 - prob) + l * np.log(prob) +
utils.stable_log_diff_exp(cgf(l), (l - 1) * func(mm - cur_k))[1] for l in
range(mm - cur_k + 1, mm + 1)]
new_log_term_4.append(new_log_term_1)
new_log_term_4.append(new_log_term_2)
new_log_term_4 = utils.stable_logsumexp(new_log_term_4)
s, new_log_term_5 = utils.stable_log_diff_exp(new_log_term_4, new_log_term_3)
new_bound = new_log_term_5
return new_bound
class anaRDPacct:
"""A class that keeps track of the analytical expression of the RDP --- 1/(alpha-1)*CGF of the privacy loss R.V."""
def __init__(self, m=100, tol=0.1, m_max=500, m_lin_max=10000, approx = False, verbose=False):
# m_max indicates the number that we calculate binomial coefficients exactly up to.
# beyond that we use Stirling approximation.
# ------ Class Attributes -----------
self.m = m # default number of binomial coefficients to precompute
self.m_max = m_max # An upper bound of the quadratic dependence
self.m_lin_max = m_lin_max # An upper bound of the linear dependence.
self.verbose = verbose
self.approx = approx
self.lambs = np.linspace(1, self.m, self.m).astype(int) # Corresponds to \alpha = 2,3,4,5,.... for RDP
self.alphas = np.linspace(1, self.m, self.m).astype(int)
self.RDPs_int = np.zeros_like(self.alphas, float)
self.n=0
self.RDPs = [] # analytical CGFs
self.coeffs = []
self.RDP_inf = .0 # This is effectively for pure DP.
self.logBinomC = utils.get_binom_coeffs(self.m + 1) # The logBinomC is only needed for subsampling mechanisms.
self.idxhash = {} # save the index of previously used algorithms
self.cache = {} # dictionary to save results from previously seen algorithms
self.deltas_cache = {} # dictionary to save results of all discrete derivative path
self.evalRDP = lambda x: 0
self.flag = True # a flag indicating whether evalCGF is out of date
self.flag_subsample = False # a flag to indicate whether we need to expand the logBinomC.
self.tol = tol
# ---------- Methods ------------
def build_zeroth_oracle(self):
self.evalRDP = lambda x: sum([c * item(x) for (c, item) in zip(self.coeffs, self.RDPs)])
def plot_rdp(self):
if not self.flag:
self.build_zeroth_oracle()
self.flag = True
import matplotlib.pyplot as plt
plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
x = range(0,self.m,1)
y = [self.evalRDP(item) for item in x]
plt.loglog(x, y)
plt.show()
def plot_cgf_int(self):
import matplotlib.pyplot as plt
plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(self.alphas, self.RDPs_int)
plt.xlabel(r'$\lambda$')
plt.ylabel('CGF')
plt.show()
def plot_rdp_int(self):
import matplotlib.pyplot as plt
plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.loglog(self.alphas, self.RDPs_int)
if not self.flag:
self.build_zeroth_oracle()
self.flag = True
x = range(1,self.m_lin_max,1)
y = [self.evalRDP(item) for item in x]
plt.loglog(x, y)
plt.xlabel(r'$\alpha$')
plt.ylabel(r'RDP $\epsilon$')
plt.show()
def get_rdp(self,alphas):
# alphas is a numpy array or a list of numbers
# we will return a numpy array of the corresponding RDP
if not self.flag:
self.build_zeroth_oracle()
self.flag = True
alphas = np.array(alphas)
assert(np.all(alphas >= 1))
rdp_list = []
for alpha in alphas:
rdp_list.append(self.evalRDP(alpha))
return np.array(rdp_list)
def get_eps(self, delta): # minimize over \lambda
if not self.flag:
self.build_zeroth_oracle()
self.flag = True
if delta<0 or delta > 1:
print("Error! delta is a probability and must be between 0 and 1")
if delta == 0:
return self.RDP_inf
else:
def fun(x): # the input the RDP's \alpha
if x <= 1:
return np.inf
else:
return np.log(1 / delta)/(x-1) + self.evalRDP(x)
def fun_int(i): # the input is RDP's \alpha in integer
if i <= 1 | i >= len(self.RDPs_int):
return np.inf
else:
return np.log(1 / delta) / (i-1) + self.RDPs_int[i - 1]
# When do we have computational constraints?
# Only when we have subsampled items.
# First check if the forward difference is positive at self.m, or if it is infinite
while (self.m<self.m_max) and (not np.isposinf(fun(self.m))) and (fun_int(self.m-1)-fun_int(self.m-2) < 0):
# If so, double m, expand logBimomC until the forward difference is positive
if self.flag_subsample:
# The following line is m^2 time.
self.logBinomC = utils.get_binom_coeffs(self.m*2+1)
# Update deltas_caches
for key, val in self.deltas_cache.items():
if type(key) is tuple:
func_tmp = key[0]
else:
func_tmp = key
cgf = lambda x: x*func_tmp(x+1)
deltas,signs_deltas = utils.get_forward_diffs(cgf,self.m*2)
self.deltas_cache[key] = [deltas, signs_deltas]
new_alphas = range(self.m + 1, self.m * 2 + 1, 1)
self.alphas = np.concatenate((self.alphas, np.array(new_alphas))) # array of integers
self.m = self.m * 2
mm = np.max(self.alphas)
rdp_int_new = np.zeros_like(self.alphas, float)
for key,val in self.cache.items():
idx = self.idxhash[key]
rdp = self.RDPs[idx]
newarray = np.zeros_like(self.alphas, float)
for j in range(2,mm+1,1):
newarray[j-1] = rdp(1.0*j)
newarray[0]=newarray[1]
coeff = self.coeffs[idx]
rdp_int_new += newarray * coeff
self.cache[key] = newarray
self.RDPs_int = rdp_int_new
# # update the integer CGF and the cache for each function
# rdp_int_new = np.zeros_like(self.RDPs_int)
# for key,val in self.cache.items():
# idx = self.idxhash[key]
# rdp = self.RDPs[idx]
# newarray = np.zeros_like(self.RDPs_int)
# for j in range(self.m):
# newarray[j] = rdp(1.0*(j+self.m+1))
#
# coeff = self.coeffs[idx]
# rdp_int_new += newarray * coeff
# self.cache[key] = np.concatenate((val, newarray))
#
# # update the corresponding quantities
# self.RDPs_int = np.concatenate((self.RDPs_int, rdp_int_new))
#self.m = self.m*2
bestint = np.argmin(np.log(1 / delta)/(self.alphas[1:]-1) + self.RDPs_int[1:]) + 1
if bestint == self.m-1:
if self.verbose:
print('Warning: Reach quadratic upper bound: m_max.')
# In this case, we matches the maximum qudaratic upper bound
# Fix it by calling O(1) upper bounds and do logarithmic search
cur = fun(bestint)
while (not np.isposinf(cur)) and fun(bestint-1)-fun(bestint-2) < -1e-8:
bestint = bestint*2
cur = fun(bestint)
if bestint > self.m_lin_max and self.approx ==True:
print('Warning: Reach linear upper bound: m_lin_max.')
return cur
results = minimize_scalar(fun, method='Bounded', bounds=[self.m-1, bestint + 2],
options={'disp': False})
if results.success:
return results.fun
else:
return None
#return fun(bestint)
if bestint == 0:
if self.verbose:
print('Warning: Smallest alpha = 1.')
# find the best integer alpha.
bestalpha = self.alphas[bestint]
results = minimize_scalar(fun, method='Bounded',bounds=[bestalpha-1, bestalpha+1],
options={'disp':False})
# the while loop above ensures that bestint+2 is at most m, and also bestint is at least 0.
if results.success:
return results.fun
else:
# There are cases when certain \delta is not feasible.
# For example, let p and q be uniform the privacy R.V. is either 0 or \infty and unless all \infty
# events are taken cared of by \delta, \epsilon cannot be < \infty
return -1
def compose_mechanism(self, func, coeff=1.0):
self.flag = False
if func in self.idxhash:
self.coeffs[self.idxhash[func]] += coeff
# also update the integer CGFs
self.RDPs_int += self.cache[func] * coeff
else:
# book keeping
self.idxhash[func] = self.n
self.n += 1
self.coeffs.append(coeff)
# update the analytical
self.RDPs.append(func)
# also update the integer results
if func in self.cache:
tmp = self.cache[func]
else:
tmp = np.zeros_like(self.RDPs_int, float)
for i in range(self.m):
tmp[i] = func(i+1)
self.cache[func] = tmp # save in cache
self.RDPs_int += tmp * coeff
self.RDP_inf += func(np.inf) * coeff
#795010
#imple 100
def compose_subsampled_mechanism(self, func, prob, coeff=1.0):
# This function is for subsample without replacements.
self.flag = False
self.flag_subsample = True
if (func, prob) in self.idxhash:
idx = self.idxhash[(func, prob)]
# update the coefficients of each function
self.coeffs[idx] += coeff
# also update the integer CGFs
self.RDPs_int += self.cache[(func, prob)] * coeff
else:
def cgf(x):
return x * func(x+1)
# we need forward differences of thpe exp(cgf)
# The following line is the numericall y stable way of implementing it.
# The output is in polar form with logarithmic magnitude
deltas, signs_deltas = utils.get_forward_diffs(cgf,self.m)
#deltas1, signs_deltas1 = get_forward_diffs_direct(func, self.m)
#tmp = deltas-deltas1
self.deltas_cache[(func,prob)] = [deltas,signs_deltas]
def subsample_func_int(x):
# This function evaluates teh CGF at alpha = x, i.e., lamb = x- 1
deltas_local, signs_deltas_local = self.deltas_cache[(func,prob)]
if np.isinf(func(x)):
return np.inf
mm = int(x)
fastupperbound = fast_subsampled_cgf_upperbound(func, mm, prob, deltas_local)
fastupperbound2 = general_upperbound(func, mm, prob)
if self.approx ==True:
if fastupperbound2 <0:
print('general rdp is negative',x)
return fastupperbound2
if mm <= self.alphas[-1]: # compute the bound exactly. Requires book keeping of O(x^2)
moments = [ np.minimum(np.minimum((j)*np.log(np.exp(func(np.inf))-1) + np.minimum(cgf(j-1),np.log(4)),
np.log(2) + cgf(j-1)),
np.log(4) + 0.5*deltas_local[int(2*np.floor(j/2.0))-1]
+ 0.5*deltas_local[int(2*np.ceil(j/2.0))-1]) + j*np.log(prob)
+self.logBinomC[int(mm), j] for j in range(2,int(mm+1),1)]
return np.minimum(fastupperbound, utils.stable_logsumexp([0]+moments))
elif mm <= self.m_lin_max: # compute the bound with stirling approximation. Everything is O(x) now.
moment_bound = lambda j: np.minimum(j * np.log(np.exp(func(np.inf)) - 1)
+ np.minimum(cgf(j - 1), np.log(4)), np.log(2)
+ cgf(j - 1)) + j * np.log(prob) + utils.logcomb(mm, j)
moments = [moment_bound(j) for j in range(2,mm+1,1)]
return np.minimum(fastupperbound, utils.stable_logsumexp([0]+ moments))
else: # Compute the O(1) upper bound
return fastupperbound
def subsample_func(x):
# This function returns the RDP at alpha = x
# RDP with the linear interpolation upper bound of the CGF
epsinf, tmp = subsample_epsdelta(func(np.inf),0,prob)
if np.isinf(x):
return epsinf
if prob == 1.0:
return func(x)
if (x >= 1.0) and (x <= 2.0):
return np.minimum(epsinf, subsample_func_int(2.0) / (2.0-1))
if np.equal(np.mod(x, 1), 0):
return np.minimum(epsinf, subsample_func_int(x) / (x-1) )
xc = math.ceil(x)
xf = math.floor(x)
return np.minimum(
epsinf,
((x-xf)*subsample_func_int(xc) + (1-(x-xf))*subsample_func_int(xf)) / (x-1)
)
# book keeping
self.idxhash[(func, prob)] = self.n # save the index
self.n += 1 # increment the number of unique mechanisms
self.coeffs.append(coeff) # Update the coefficient
self.RDPs.append(subsample_func) # update the analytical functions
# also update the integer results up to m_max.
if (func,prob) in self.cache:
results = self.cache[(func,prob)]
else:
results = np.zeros_like(self.RDPs_int, float)
# m = np.max(self.lambs)
mm = np.max(self.alphas)
for alpha in range(2, mm+1):
results[alpha-1] = subsample_func(alpha)
results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.
self.cache[(func,prob)] = results # save in cache
self.RDPs_int += results * coeff
# update the pure DP
eps, delta = subsample_epsdelta(func(np.inf), 0, prob)
self.RDP_inf += eps * coeff
# mm = np.max(self.alphas)
#
# jvec = np.arange(2, mm+1) #
# logterm3plus = np.zeros_like(results)
# for j in jvec:
# logterm3plus[j-2] = (np.minimum(np.minimum(j * np.log(np.exp(func(np.inf)) - 1)
# + np.minimum(np.log(4),cgf(j-1)), np.log(2) + cgf(j-1)),
# np.log(4) + 0.5 * deltas[int(2 * np.floor(j / 2.0))-1]
# + 0.5 * deltas[int(2 * np.ceil(j / 2.0))-1])
# + j * np.log(prob))
#
# for alpha in range(2, mm+1):
# if np.isinf(logterm3plus[alpha-1]):
# results[alpha-1] = np.inf
# else:
# tmp = utils.stable_logsumexp(logterm3plus[0:alpha-1] + self.logBinomC[alpha, 2:(alpha+1)])
# results[alpha-1] = utils.stable_logsumexp_two(0, tmp) / (1.0*alpha-1)
#
# results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.
#
# self.cache[(func,prob)] = results # save in cache
# self.RDPs_int += results
#
# # For debugging: The following 'results1' should be the same as 'results' above.
# # results1 = np.zeros_like(self.RDPs_int, float)
# # for j in range(self.m):
# # results1[j] = subsample_func(j+1)
#
# eps, delta = subsample_epsdelta(func(np.inf), 0, prob)
# self.RDP_inf += eps
def compose_poisson_subsampled_mechanisms(self, func, prob, coeff=1.0):
# This function implements the lower bound for subsampled RDP.
# It is also the exact formula of poission_subsampled RDP for many mechanisms including Gaussian mech.
#
# At the moment, we do not support mixing poisson subsampling and standard subsampling.
# TODO: modify the caching identifies so that we can distinguish different types of subsampling
#
self.flag = False
self.flag_subsample = True
if (func, prob) in self.idxhash:
idx = self.idxhash[(func, prob)] # TODO: this is really where it needs to be changed.
# update the coefficients of each function
self.coeffs[idx] += coeff
# also update the integer CGFs
self.RDPs_int += self.cache[(func, prob)] * coeff
else: # compute an easy to compute upper bound of it.
def cgf(x):
return x * func(x+1)
def subsample_func_int(x):
# This function evaluates teh CGF at alpha = x, i.e., lamb = x- 1
if np.isinf(func(x)):
return np.inf
mm = int(x)
#
fastbound = fast_poission_subsampled_cgf_upperbound(func, mm, prob)
k = self.alphas[-1]
fastbound_k = fast_k_subsample_upperbound(func, mm, prob,k)
if self.approx == True:
return fastbound_k
#fastbound = min(fastbound, fastbound_k)
if x <= self.alphas[-1]: # compute the bound exactly.
moments = [cgf(j-1) +j*np.log(prob) + (mm-j) * np.log(1-prob)
+ self.logBinomC[mm, j] for j in range(2,mm+1,1)]
return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)]+moments)
elif mm <= self.m_lin_max:
moments = [cgf(j-1) +j*np.log(prob) + (mm-j) * np.log(1-prob)
+ utils.logcomb(mm,j) for j in range(2,mm+1,1)]
return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)] + moments)
else:
return fastbound
def subsample_func(x): # linear interpolation upper bound
# This function implements the RDP at alpha = x
if np.isinf(func(x)):
return np.inf
if prob == 1.0:
return func(x)
epsinf, tmp = subsample_epsdelta(func(np.inf),0,prob)
if np.isinf(x):
return epsinf
if (x >= 1.0) and (x <= 2.0):
return np.minimum(epsinf, subsample_func_int(2.0) / (2.0-1))
if np.equal(np.mod(x, 1), 0):
return np.minimum(epsinf, subsample_func_int(x) / (x-1) )
xc = math.ceil(x)
xf = math.floor(x)
return np.minimum(
epsinf,
((x-xf)*subsample_func_int(xc) + (1-(x-xf))*subsample_func_int(xf)) / (x-1)
)
# book keeping
self.idxhash[(func, prob)] = self.n # save the index
self.n += 1 # increment the number of unique mechanisms
self.coeffs.append(coeff) # Update the coefficient
self.RDPs.append(subsample_func) # update the analytical functions
# also update the integer results, with a vectorized computation.
# TODO: pre-computing subsampled RDP for integers is error-prone (implement the same thing twice)
# TODO: and its benefits are not clear. We should consider removing it and simply call the lambda function.
#
if (func,prob) in self.cache:
results = self.cache[(func,prob)]
else:
results = np.zeros_like(self.RDPs_int, float)
mm = np.max(self.alphas) # evaluate the RDP up to order mm
jvec = np.arange(2, mm + 1)
logterm3plus = np.zeros_like(results) # This saves everything from j=2 to j = m+1
for j in jvec:
logterm3plus[j-2] = cgf(j-1) + j * np.log(prob) #- np.log(1-prob))
for alpha in range(2, mm+1):
if np.isinf(logterm3plus[alpha-1]):
results[alpha-1] = np.inf
else:
tmp = utils.stable_logsumexp(logterm3plus[0:alpha-1] + self.logBinomC[alpha , 2:(alpha + 1)]
+ (alpha+1-jvec[0:alpha-1])*np.log(1-prob))
results[alpha-1] = utils.stable_logsumexp_two((alpha-1)*np.log(1-prob)
+ np.log(1+(alpha-1)*prob), tmp) / (1.0*alpha-1)
results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.
self.cache[(func,prob)] = results # save in cache
self.RDPs_int += results * coeff
# update the pure DP tracker
eps, delta = subsample_epsdelta(func(np.inf), 0, prob)
self.RDP_inf += eps * coeff
def compose_poisson_subsampled_mechanisms1(self, func, prob, coeff=1.0):
# This function implements the general amplification bounds for Poisson sampling.
# No additional assumptions are needed.
# At the moment, we do not support mixing poisson subsampling and standard subsampling.
#
self.flag = False
self.flag_subsample = True
if (func, prob) in self.idxhash:
idx = self.idxhash[(func, prob)]
# update the coefficients of each function
self.coeffs[idx] += coeff
# also update the integer CGFs
self.RDPs_int += self.cache[(func, prob)] * coeff
else: # compute an easy to compute upper bound of it.
cgf = lambda x: x*func(x+1)
def subsample_func_int(x):
# This function evaluates the CGF at alpha = x, i.e., lamb = x- 1
if np.isinf(func(x)):
return np.inf
if prob == 1.0:
return func(x)
mm = int(x)
fastbound = fast_poission_subsampled_cgf_upperbound(func, mm, prob)
if x <= self.alphas[-1]: # compute the bound exactly.
moments = [cgf(1) + 2*np.log(prob) + (mm-2) * np.log(1 - prob) + self.logBinomC[mm, 2]]
moments = moments + [cgf(j-1+1) +j*np.log(prob) + (mm-j) * np.log(1 - prob)
+ self.logBinomC[mm, j] for j in range(3,mm+1,1)]
return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)]+moments)
elif mm <= self.m_lin_max:
moments = [cgf(1) + 2*np.log(prob) + (mm-2) * np.log(1 - prob) + utils.logcomb(mm, 2)]
moments = moments + [cgf(j-1+1) +j*np.log(prob) + (mm-j) * np.log(1 - prob)
+ utils.logcomb(mm, j) for j in range(3,mm+1,1)]
return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)]+moments)
else:
return fastbound
def subsample_func(x): # linear interpolation upper bound
epsinf, tmp = subsample_epsdelta(func(np.inf),0,prob)
if np.isinf(x):
return epsinf
if (x >= 1.0) and (x <= 2.0):
return np.minimum(epsinf, subsample_func_int(2.0) / (2.0-1))
if np.equal(np.mod(x, 1), 0):
return np.minimum(epsinf, subsample_func_int(x) / (x-1) )
xc = math.ceil(x)
xf = math.floor(x)
return np.minimum(
epsinf,
((x-xf)*subsample_func_int(xc) + (1-(x-xf))*subsample_func_int(xf)) / (x-1)
)
# book keeping
self.idxhash[(func, prob)] = self.n # save the index
self.n += 1 # increment the number of unique mechanisms
self.coeffs.append(coeff) # Update the coefficient
self.RDPs.append(subsample_func) # update the analytical functions
# also update the integer results
if (func,prob) in self.cache:
results = self.cache[(func,prob)]
else:
results = np.zeros_like(self.RDPs_int, float)
mm = np.max(self.alphas) # evaluate the RDP up to order mm
for alpha in range(2, mm+1):
results[alpha-1] = subsample_func_int(alpha)
results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.
self.cache[(func,prob)] = results # save in cache
self.RDPs_int += results * coeff
# update the pure DP tracker
eps, delta = subsample_epsdelta(func(np.inf), 0, prob)
self.RDP_inf += eps * coeff
# TODO: 1. Modularize the several Poission sampling versions. 2. Support both sampling schemes together.
| [
[
[
412,
423
],
[
999,
1001
],
[
1034,
1036
],
[
1098,
1100
],
[
1229,
1231
],
[
1304,
1306
],
[
1359,
1361
],
[
1377,
1379
],
[
1420,
1422
],
[
1455,
1457
],
[
1478,
1480
],
[
1501,
1503
],
[
1656,
1658
],
[
1684,
1686
],
[
1710,
1712
],
[
1764,
1766
],
[
1815,
1817
],
[
1827,
1829
],
[
1842,
1844
],
[
1866,
1868
],
[
1905,
1907
],
[
1924,
1926
],
[
2463,
2465
],
[
2478,
2480
],
[
2513,
2515
],
[
2570,
2572
],
[
2581,
2583
],
[
2598,
2600
],
[
2605,
2607
],
[
2617,
2619
],
[
2672,
2674
],
[
2692,
2694
],
[
2743,
2745
],
[
2793,
2795
],
[
2832,
2834
],
[
2913,
2915
],
[
2972,
2974
],
[
2987,
2989
],
[
3000,
3002
],
[
3017,
3019
],
[
3179,
3181
],
[
3194,
3196
],
[
3244,
3246
],
[
3251,
3253
],
[
3263,
3265
],
[
3292,
3294
],
[
3473,
3475
],
[
3668,
3670
],
[
3699,
3701
],
[
3710,
3712
],
[
3765,
3767
],
[
3776,
3778
],
[
3861,
3863
],
[
3912,
3914
],
[
3919,
3921
],
[
3984,
3986
],
[
4000,
4002
],
[
4069,
4071
],
[
4118,
4120
],
[
4125,
4127
],
[
4137,
4139
],
[
4181,
4183
],
[
4416,
4418
],
[
4497,
4499
],
[
4521,
4523
],
[
4594,
4596
],
[
4611,
4613
],
[
4802,
4804
],
[
4826,
4828
],
[
4897,
4899
],
[
4914,
4916
],
[
4944,
4946
],
[
5306,
5308
],
[
5341,
5343
],
[
5489,
5491
],
[
5505,
5507
],
[
5927,
5929
],
[
5952,
5954
],
[
5969,
5971
],
[
5982,
5984
],
[
6040,
6042
],
[
6061,
6063
],
[
6076,
6078
],
[
6089,
6091
],
[
6150,
6152
],
[
6163,
6165
],
[
6178,
6180
],
[
6193,
6195
],
[
6211,
6213
],
[
6270,
6272
],
[
6378,
6380
],
[
6393,
6395
],
[
6597,
6599
],
[
6925,
6927
],
[
6960,
6962
],
[
7053,
7055
],
[
7135,
7137
],
[
7154,
7156
],
[
7207,
7209
],
[
7254,
7256
],
[
7277,
7279
],
[
7550,
7552
],
[
7652,
7654
],
[
7743,
7745
],
[
7798,
7800
],
[
7844,
7846
],
[
7867,
7869
],
[
8223,
8225
],
[
8272,
8274
],
[
8295,
8297
],
[
9474,
9476
],
[
9587,
9589
],
[
9654,
9656
],
[
12005,
12007
],
[
12037,
12039
],
[
12174,
12176
],
[
13235,
13237
],
[
14154,
14156
],
[
14183,
14185
],
[
14281,
14283
],
[
14328,
14330
],
[
14514,
14516
],
[
15638,
15640
],
[
15648,
15650
],
[
16074,
16076
],
[
18220,
18222
],
[
18462,
18464
],
[
22927,
22929
],
[
23025,
23027
],
[
23451,
23453
],
[
29200,
29202
],
[
29257,
29259
],
[
29335,
29337
],
[
29387,
29389
],
[
29541,
29543
],
[
29643,
29645
],
[
29719,
29721
],
[
29944,
29946
],
[
30040,
30042
],
[
30125,
30127
],
[
30481,
30483
],
[
33828,
33830
],
[
33885,
33887
],
[
34359,
34361
],
[
12631,
12633
],
[
12687,
12689
],
[
12877,
12879
],
[
12933,
12935
],
[
19744,
19746
],
[
19790,
19792
],
[
20311,
20313
],
[
20322,
20324
],
[
20337,
20339
],
[
20344,
20346
],
[
20356,
20358
],
[
20370,
20372
],
[
20390,
20392
],
[
20456,
20458
],
[
20522,
20524
],
[
20557,
20559
],
[
20645,
20647
],
[
20669,
20671
],
[
20801,
20803
],
[
21390,
21392
],
[
21779,
21781
],
[
21815,
21817
],
[
22003,
22005
],
[
22076,
22078
],
[
22085,
22087
],
[
22130,
22132
],
[
22273,
22275
],
[
26386,
26388
],
[
26432,
26434
],
[
26932,
26934
],
[
26956,
26958
],
[
27111,
27113
],
[
27126,
27128
],
[
27244,
27246
],
[
27268,
27270
],
[
27420,
27422
],
[
27435,
27437
],
[
27683,
27685
],
[
27729,
27731
],
[
27858,
27860
],
[
27894,
27896
],
[
28014,
28016
],
[
28087,
28089
],
[
28096,
28098
],
[
28141,
28143
],
[
28284,
28286
],
[
31450,
31452
],
[
31496,
31498
],
[
31797,
31799
],
[
31821,
31823
],
[
31918,
31920
],
[
31942,
31944
],
[
32099,
32101
],
[
32114,
32116
],
[
32231,
32233
],
[
32255,
32257
],
[
32351,
32353
],
[
32375,
32377
],
[
32530,
32532
],
[
32545,
32547
],
[
32762,
32764
],
[
32798,
32800
],
[
32918,
32920
],
[
32991,
32993
],
[
33000,
33002
],
[
33045,
33047
],
[
33188,
33190
],
[
21027,
21029
],
[
21042,
21044
],
[
21049,
21051
],
[
21061,
21063
],
[
21133,
21135
],
[
21156,
21158
],
[
21168,
21170
],
[
21254,
21256
]
],
[
[
451,
466
],
[
16427,
16442
],
[
16964,
16979
]
],
[
[
474,
477
],
[
478,
481
]
],
[
[
507,
513
]
],
[
[
534,
539
],
[
1332,
1337
],
[
1534,
1539
],
[
1738,
1743
],
[
1960,
1965
],
[
2126,
2131
],
[
2173,
2178
],
[
3059,
3064
],
[
3434,
3439
],
[
4198,
4203
],
[
4269,
4274
],
[
4355,
4360
],
[
4462,
4467
],
[
4672,
4677
],
[
4767,
4772
],
[
5037,
5042
],
[
5462,
5467
],
[
5898,
5903
],
[
6348,
6353
],
[
7408,
7413
],
[
7481,
7486
],
[
7625,
7630
],
[
7904,
7909
],
[
8078,
8083
],
[
8142,
8147
],
[
8332,
8337
],
[
8564,
8569
],
[
8627,
8632
],
[
9858,
9863
],
[
13535,
13540
],
[
13946,
13951
],
[
19303,
19308
],
[
29782,
29787
],
[
30003,
30008
],
[
20828,
20833
],
[
21417,
21422
],
[
27080,
27085
],
[
27316,
27321
],
[
27389,
27394
],
[
32068,
32073
],
[
32274,
32279
],
[
32425,
32430
],
[
32499,
32504
],
[
21269,
21274
]
],
[
[
541,
549
]
],
[
[
588,
606
],
[
23427,
23445
],
[
30457,
30475
],
[
34335,
34353
],
[
21755,
21773
],
[
27834,
27852
],
[
32738,
32756
]
],
[
[
614,
619
],
[
1427,
1432
],
[
7214,
7219
],
[
7805,
7810
],
[
8230,
8235
]
],
[
[
627,
631
],
[
22202,
22206
],
[
22236,
22240
],
[
28213,
28217
],
[
28247,
28251
],
[
33117,
33121
],
[
33151,
33155
]
],
[
[
637,
655
],
[
19955,
19973
]
],
[
[
2244,
2274
],
[
19860,
19890
]
],
[
[
5095,
5134
],
[
26514,
26553
],
[
31628,
31667
]
],
[
[
6628,
6655
],
[
26637,
26664
]
],
[
[
8745,
8755
]
]
] |
import logging
import unittest
import numpy as np
from cave.utils.statistical_tests import paired_permutation, paired_t_student
class TestStatisticalTests(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger("TestStatisticalTests")
def test_paired_permutation(self):
""" Testing paired permutation test. """
rng = np.random.RandomState(42)
a, b = rng.normal(loc=0, size=100), rng.normal(loc=0, size=100)
result = paired_permutation(a, a, rng, 100, self.logger)
self.assertGreater(result, 0.9999)
result = paired_permutation(a, b, rng, 100, self.logger)
self.assertGreater(result, 0.3)
a, b = rng.normal(loc=-1, size=100), rng.normal(loc=1, size=100)
result = paired_permutation(a, b, rng, 1000, self.logger)
self.assertLess(result, 0.001)
def test_t_student(self):
""" Testing paired t-test. """
rng = np.random.RandomState(42)
a, b = rng.normal(loc=0, size=100), rng.normal(loc=0, size=100)
result = paired_t_student(a, b, self.logger)
self.assertGreater(result, 0.3)
a, b = rng.normal(loc=-1, size=100), rng.normal(loc=1, size=100)
result = paired_t_student(a, b, self.logger)
self.assertLess(result, 0.001)
| [
[
[
7,
14
],
[
223,
230
]
],
[
[
22,
30
],
[
159,
167
]
],
[
[
39,
50
],
[
368,
370
],
[
941,
943
]
],
[
[
93,
111
],
[
483,
501
],
[
591,
609
],
[
769,
787
]
],
[
[
113,
129
],
[
1056,
1072
],
[
1222,
1238
]
],
[
[
138,
158
]
]
] |
import ibmsecurity.utilities.tools
import logging
logger = logging.getLogger(__name__)
module_uri = "/isam/felb/configuration/services/"
requires_modulers = None
requires_version = None
def add(isamAppliance, service_name, address, active, port, weight, secure, ssllabel, check_mode=False, force=False):
"""
Creating a server
"""
change_required = _check_exist(isamAppliance, service_name, address, port=port)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post("Creating a server",
"{0}{1}/servers".format(module_uri, service_name, address),
{
"active": active,
"address": address,
"port": port,
"weight": weight,
"secure": secure,
"ssllabel": ssllabel
},
requires_version=requires_version, requires_modules=requires_modulers)
else:
return isamAppliance.create_return_object()
def delete(isamAppliance, service_name, address, check_mode=False, force=False):
"""
deletes a server from specified service name
"""
if force is True or _check_exist(isamAppliance, service_name, address) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete("Deleting a server",
"{0}{1}/servers/{2}".format(module_uri, service_name, address),
requires_version=requires_version, requires_modules=requires_modulers)
else:
return isamAppliance.create_return_object()
def get(isamAppliance, service_name, address, check_mode=False, force=False):
"""
Retrieves server from specified service name
"""
return (
isamAppliance.invoke_get("Retrieving a server", "{0}{1}/servers/{2}".format(module_uri, service_name, address),
requires_version=requires_version, requires_modules=requires_modulers))
def get_all(isamAppliance, service_name, check_mode=False, force=False):
"""
Retrieves a list of servers under a specified service
"""
return isamAppliance.invoke_get("Retrieving servers for a service",
"{0}{1}/servers".format(module_uri, service_name),
requires_version=requires_version, requires_modules=requires_modulers)
def update(isamAppliance, service_name, address, active, new_address, new_port, weight, secure=False, ssllabel=None,
check_mode=False,
force=False):
"""
Updating server
"""
change_required = _check_update(isamAppliance, service_name, address, active, new_address, new_port, weight, secure,
ssllabel)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put("Updating a server",
"{0}{1}/servers/{2}".format(module_uri, service_name, address),
{
"address": new_address,
"active": active,
"port": new_port,
"weight": weight,
"secure": secure,
"ssllabel": ssllabel
},
requires_modules=requires_modulers,
requires_version=requires_version)
else:
return isamAppliance.create_return_object()
def _check_update(isamAppliance, service_name, address, active, new_address, new_port, weight, secure=False,
ssllabel=None):
"""
idempontency test
"""
org_obj = get(isamAppliance, service_name, address)
if org_obj['data']['address'] != new_address:
return True
elif org_obj['data']['active'] != active:
return True
elif org_obj['data']['port'] != new_port:
return True
elif org_obj['data']['weight'] != weight:
return True
elif org_obj['data']['secure'] != secure:
return True
elif org_obj['data']['ssllabel'] != ssllabel:
return True
else:
return False
def _check_exist(isamAppliance, service_name, address):
"""
idempotency test for delete function
"""
check_obj = {}
# Check weather the address with corresponding server exists
try:
check_obj = get(isamAppliance, service_name, address)
except:
return False
return True
def compare(isamAppliance1, isamAppliance2):
"""
Compare cluster configuration between two appliances
"""
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
| [
[
[
7,
34
],
[
5575,
5586
]
],
[
[
42,
49
],
[
60,
67
]
],
[
[
51,
57
]
],
[
[
89,
99
],
[
729,
739
],
[
1930,
1940
],
[
2389,
2399
],
[
2811,
2821
],
[
3622,
3632
]
],
[
[
139,
156
],
[
1361,
1378
],
[
2065,
2082
],
[
2510,
2527
],
[
2926,
2943
],
[
4218,
4235
]
],
[
[
164,
180
],
[
1326,
1342
],
[
2030,
2046
],
[
2475,
2491
],
[
2891,
2907
],
[
4298,
4314
]
],
[
[
194,
197
]
],
[
[
1448,
1454
]
],
[
[
2153,
2156
],
[
4575,
4578
],
[
5281,
5284
],
[
5508,
5511
],
[
5543,
5546
]
],
[
[
2536,
2543
]
],
[
[
2951,
2957
]
],
[
[
4384,
4397
],
[
3176,
3189
]
],
[
[
5059,
5071
],
[
368,
380
],
[
1614,
1626
]
],
[
[
5379,
5386
]
]
] |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define TensorFlow Probability version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '9'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_VERSION_SUFFIX = 'dev'
# Example, '0.4.0-dev'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = '{}-{}'.format(__version__, _VERSION_SUFFIX)
| [
[
[
790,
804
],
[
1277,
1291
]
],
[
[
811,
825
],
[
1297,
1311
]
],
[
[
832,
846
],
[
1317,
1331
]
],
[
[
1200,
1215
],
[
1339,
1354
],
[
1400,
1415
]
],
[
[
1248,
1259
],
[
1387,
1398
]
],
[
[
1358,
1369
]
]
] |
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import PIL.Image
import tensorflow as tf
from elasticdl.python.common.constants import Mode
def custom_model():
inputs = tf.keras.Input(shape=(28, 28), name="image")
x = tf.keras.layers.Reshape((28, 28, 1))(inputs)
x = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu")(x)
x = tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Dropout(0.25)(x)
x = tf.keras.layers.Flatten()(x)
outputs = tf.keras.layers.Dense(10)(x)
return tf.keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
def prepare_data_for_a_single_file(file_object, filename):
"""
:param filename: training data file name
:param file_object: a file object associated with filename
"""
label = int(filename.split("/")[-2])
image = PIL.Image.open(file_object)
numpy_image = np.array(image)
example_dict = {
"image": tf.train.Feature(
float_list=tf.train.FloatList(value=numpy_image.flatten())
),
"label": tf.train.Feature(
int64_list=tf.train.Int64List(value=[label])
),
}
example = tf.train.Example(
features=tf.train.Features(feature=example_dict)
)
return example.SerializeToString()
def loss(labels, predictions):
labels = tf.reshape(labels, [-1])
return tf.reduce_mean(
input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=predictions, labels=labels
)
)
def optimizer(lr=0.01):
return tf.optimizers.SGD(lr)
def feed(dataset, mode, _):
def _parse_data(record):
if mode == Mode.PREDICTION:
feature_description = {
"image": tf.io.FixedLenFeature([28, 28], tf.float32)
}
else:
feature_description = {
"image": tf.io.FixedLenFeature([28, 28], tf.float32),
"label": tf.io.FixedLenFeature([1], tf.int64),
}
r = tf.io.parse_single_example(record, feature_description)
features = {
"image": tf.math.divide(tf.cast(r["image"], tf.float32), 255.0)
}
if mode == Mode.PREDICTION:
return features
else:
return features, tf.cast(r["label"], tf.int32)
dataset = dataset.map(_parse_data)
if mode == Mode.TRAINING:
dataset = dataset.shuffle(buffer_size=1024)
return dataset
def eval_metrics_fn():
return {
"accuracy": lambda labels, predictions: tf.equal(
tf.argmax(predictions, 1, output_type=tf.int32),
tf.cast(tf.reshape(labels, [-1]), tf.int32),
)
}
| [
[
[
609,
620
],
[
1591,
1593
]
],
[
[
628,
637
],
[
1545,
1548
]
],
[
[
645,
661
],
[
749,
751
],
[
802,
804
],
[
855,
857
],
[
932,
934
],
[
1009,
1011
],
[
1057,
1059
],
[
1115,
1117
],
[
1156,
1158
],
[
1199,
1201
],
[
1240,
1242
],
[
1645,
1647
],
[
1686,
1688
],
[
1762,
1764
],
[
1803,
1805
],
[
1868,
1870
],
[
1903,
1905
],
[
2034,
2036
],
[
2070,
2072
],
[
2107,
2109
],
[
2254,
2256
],
[
2432,
2434
],
[
2464,
2466
],
[
2565,
2567
],
[
2597,
2599
],
[
2635,
2637
],
[
2662,
2664
],
[
2699,
2701
],
[
2797,
2799
],
[
2812,
2814
],
[
2832,
2834
],
[
2969,
2971
],
[
2989,
2991
],
[
3227,
3229
],
[
3249,
3251
],
[
3287,
3289
],
[
3310,
3312
],
[
3318,
3320
],
[
3344,
3346
]
],
[
[
709,
713
],
[
3055,
3059
],
[
2354,
2358
],
[
2881,
2885
]
],
[
[
720,
732
]
],
[
[
1313,
1343
]
],
[
[
1994,
1998
]
],
[
[
2223,
2232
]
],
[
[
2282,
2286
]
],
[
[
3147,
3162
]
]
] |
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Geddy before v13.0.8 LFI''',
"description": '''Directory traversal vulnerability in lib/app/index.js in Geddy before 13.0.8 for Node.js allows remote attackers to read arbitrary files via a ..%2f (dot dot encoded slash) in the PATH_INFO to the default URI.''',
"severity": "high",
"references": [
"https://nodesecurity.io/advisories/geddy-directory-traversal",
"https://github.com/geddy/geddy/issues/697"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2015", "geddy", "lfi"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = '/..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2fetc/passwd'
resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)
if resp.status_code == 200 and "root:" in resp.text:
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url | [
[
[
7,
15
],
[
1137,
1145
]
],
[
[
39,
43
],
[
1336,
1340
]
],
[
[
889,
900
]
],
[
[
947,
950
],
[
1515,
1518
]
],
[
[
1494,
1497
]
],
[
[
1538,
1548
],
[
996,
1006
]
]
] |
import math
n = 100
print(sum(map(int, str(math.factorial(n))))) | [
[
[
7,
11
],
[
43,
47
]
],
[
[
12,
13
],
[
58,
59
]
]
] |
import os
import sys
import numpy as np
import cv2
import statistics
import datetime
def getMedian(arr, x, y):
values = []
for a in arr:
values.append(a[x][y])
return statistics.median_grouped(values)
def getMean(arr, x, y):
values = []
for a in arr:
values.append(a[x][y])
return statistics.mean(values)
def getMode(arr, x, y):
values = []
for a in arr:
values.append(a[x][y])
try:
mode = statistics.mode(values)
return mode
except statistics.StatisticsError: # all values are the same
return getMedian(arr,x,y)
method = sys.argv[1]
imgs = ["1.png","2.png", "3.png", "4.png", "5.png"] # image
#direct = os.getcwd() + "/images/" # where to get test images
#saved = os.getcwd() + "/saved/" # where to get test images
direct = "/var/www/html/" # where to get test images
saved = "/var/www/html/" # where to get test images
i=0
images = []
for img in imgs:
image = cv2.imread(direct + img) # open template image
images.append(image)
(height, width) = image.shape[:2] # get dimensions
red = []
green = []
blue = []
for image in images:
redMatrix = [[0 for x in range(width)] for y in range(height)]
greenMatrix = [[0 for x in range(width)] for y in range(height)]
blueMatrix = [[0 for x in range(width)] for y in range(height)]
for x in range(height):
for y in range(width):
redMatrix[x][y] = image[x,y,0]
greenMatrix[x][y] = image[x,y,1]
blueMatrix[x][y] = image[x,y,2]
red.append(redMatrix)
green.append(greenMatrix)
blue.append(blueMatrix)
newImage = np.zeros((height,width,3), np.uint8)
for x in range(height):
for y in range(width):
rgb = []
if(method == "median"):
redMedian = getMedian(red,x,y)
greenMedian = getMedian(green,x,y)
blueMedian = getMedian(blue,x,y)
if(method == "mean"):
redMedian = getMean(red,x,y)
greenMedian = getMean(green,x,y)
blueMedian = getMean(blue,x,y)
if(method == "mode"):
redMedian = getMode(red,x,y)
greenMedian = getMode(green,x,y)
blueMedian = getMode(blue,x,y)
rgb.append(redMedian)
rgb.append(greenMedian)
rgb.append(blueMedian)
newImage[x][y] = rgb
cv2.imwrite(saved + "results.jpg", newImage) # save image
| [
[
[
7,
9
]
],
[
[
17,
20
],
[
615,
618
]
],
[
[
28,
39
],
[
1636,
1638
],
[
1663,
1665
]
],
[
[
47,
50
],
[
961,
964
],
[
2357,
2360
]
],
[
[
58,
68
],
[
188,
198
],
[
323,
333
],
[
462,
472
],
[
517,
527
]
],
[
[
76,
84
]
],
[
[
90,
99
],
[
1800,
1809
],
[
1845,
1854
],
[
1891,
1900
],
[
586,
595
]
],
[
[
227,
234
],
[
1966,
1973
],
[
2009,
2016
],
[
2053,
2060
]
],
[
[
352,
359
],
[
2126,
2133
],
[
2169,
2176
],
[
2213,
2220
]
],
[
[
606,
612
],
[
1755,
1761
],
[
1923,
1929
],
[
2083,
2089
]
],
[
[
628,
632
],
[
943,
947
]
],
[
[
810,
816
],
[
972,
978
]
],
[
[
863,
868
],
[
2369,
2374
]
],
[
[
915,
916
]
],
[
[
919,
925
],
[
1012,
1018
],
[
1135,
1141
]
],
[
[
936,
939
],
[
981,
984
]
],
[
[
953,
958
],
[
1026,
1031
],
[
1055,
1060
]
],
[
[
1038,
1044
],
[
1202,
1208
],
[
1271,
1277
],
[
1339,
1345
],
[
1368,
1374
],
[
1646,
1652
],
[
1689,
1695
]
],
[
[
1046,
1051
],
[
1179,
1184
],
[
1248,
1253
],
[
1316,
1321
],
[
1400,
1405
],
[
1653,
1658
],
[
1717,
1722
]
],
[
[
1091,
1094
],
[
1544,
1547
],
[
1810,
1813
],
[
1974,
1977
],
[
2134,
2137
]
],
[
[
1100,
1105
],
[
1570,
1575
],
[
1855,
1860
],
[
2017,
2022
],
[
2177,
2182
]
],
[
[
1111,
1115
],
[
1600,
1604
],
[
1901,
1905
],
[
2061,
2065
],
[
2221,
2225
]
],
[
[
1126,
1131
],
[
1438,
1443
],
[
1483,
1488
],
[
1527,
1532
]
],
[
[
1148,
1157
],
[
1420,
1429
],
[
1555,
1564
]
],
[
[
1215,
1226
],
[
1463,
1474
],
[
1583,
1594
]
],
[
[
1284,
1294
],
[
1508,
1518
],
[
1612,
1622
]
],
[
[
1357,
1358
],
[
1444,
1445
],
[
1430,
1431
],
[
1489,
1490
],
[
1475,
1476
],
[
1533,
1534
],
[
1519,
1520
]
],
[
[
1389,
1390
],
[
1446,
1447
],
[
1433,
1434
],
[
1491,
1492
],
[
1478,
1479
],
[
1535,
1536
],
[
1522,
1523
]
],
[
[
1625,
1633
],
[
2335,
2343
],
[
2392,
2400
]
],
[
[
1678,
1679
],
[
1814,
1815
],
[
1861,
1862
],
[
1906,
1907
],
[
1978,
1979
],
[
2023,
2024
],
[
2066,
2067
],
[
2138,
2139
],
[
2183,
2184
],
[
2226,
2227
],
[
2344,
2345
]
],
[
[
1706,
1707
],
[
1816,
1817
],
[
1863,
1864
],
[
1908,
1909
],
[
1980,
1981
],
[
2025,
2026
],
[
2068,
2069
],
[
2140,
2141
],
[
2185,
2186
],
[
2228,
2229
],
[
2347,
2348
]
],
[
[
1734,
1737
],
[
2241,
2244
],
[
2271,
2274
],
[
2303,
2306
],
[
2352,
2355
]
],
[
[
1788,
1797
],
[
2252,
2261
]
],
[
[
1831,
1842
],
[
2282,
2293
]
],
[
[
1878,
1888
],
[
2314,
2324
]
],
[
[
1954,
1963
],
[
2252,
2261
]
],
[
[
1995,
2006
],
[
2282,
2293
]
],
[
[
2040,
2050
],
[
2314,
2324
]
],
[
[
2114,
2123
],
[
2252,
2261
]
],
[
[
2155,
2166
],
[
2282,
2293
]
],
[
[
2200,
2210
],
[
2314,
2324
]
]
] |
#9TH PROGRAM
# THIS PROGRAM WILL HELP IN ACCESSING DICTIONARY ITEMS AND PERFROM CERTAIN OPERATIONS WITH DICTIONARY
ages = {} #EMPTY DICTIONARY
ages["Micky"] = 24
ages["Lucky"] = 25
print(ages)
keys = ages.keys # .keys prints all the keys avaialble in Dictionary
print(keys)
values = ages.values # .values prints all the values avaialble in Dictionary
print(values)
print(sorted(ages))
# NOTE Unable to sort print(sorted(ages.values))
print(ages.values) # Prints the values
# NOTE has_key() has been replaced by "in" in Python 3 , You can access like below.
# Syntax : "Values" in "dict"
if("Micky" in ages):
print("Micky is there")
else:
print("Micky is not there")
print(len(ages)) # Print the length of the dictionary
#Adding new item
# New initialization
ages = {"Snehasis" : "24" , "Sradhasis" : 25}
print(ages)
# New members
ages["LKP"] = 45 # Here value is saved as int
if("LKP" in ages):
updatedValue = ages.get("LKP") + 10
print("Updated Value = " , updatedValue)
print(ages)
ages["JYOTI"] = "38" # Here value is saved as string
if("JYOTI" in ages):
updatedValue = ages.get("JYOTI") + " New Age"
print("Updated Value = " , updatedValue)
print(ages)
| [
[
[
117,
121
],
[
147,
151
],
[
166,
170
],
[
192,
196
],
[
206,
210
],
[
290,
294
],
[
387,
391
],
[
451,
455
],
[
615,
619
],
[
699,
703
]
],
[
[
199,
203
],
[
274,
278
]
],
[
[
281,
287
],
[
364,
370
]
],
[
[
783,
787
],
[
835,
839
],
[
857,
861
],
[
915,
919
],
[
941,
945
],
[
1014,
1018
],
[
1022,
1026
],
[
1089,
1093
],
[
1115,
1119
],
[
1197,
1201
]
],
[
[
926,
938
],
[
994,
1006
]
],
[
[
1100,
1112
],
[
1177,
1189
]
]
] |
from __future__ import print_function
import itertools
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.HeppyCore.framework.event import Event
from PhysicsTools.HeppyCore.statistics.counter import Counter, Counters
from DataFormats.FWLite import Events, Handle,Lumis
class SkimAnalyzerCount( Analyzer ):
#---------------------------------------------
# TO FINDS THE INITIAL EVENTS BEFORE THE SKIM
#---------------------------------------------
def __init__(self, cfg_ana, cfg_comp, looperName):
super(SkimAnalyzerCount, self).__init__(cfg_ana, cfg_comp, looperName)
self.useLumiBlocks = self.cfg_ana.useLumiBlocks if (hasattr(self.cfg_ana,'useLumiBlocks')) else False
self.verbose = getattr(self.cfg_ana, 'verbose', False)
def declareHandles(self):
super(SkimAnalyzerCount, self).declareHandles()
self.counterHandle = Handle("edm::MergeableCounter")
self.mchandles['GenInfo'] = AutoHandle( ('generator','',''), 'GenEventInfoProduct' )
def beginLoop(self, setup):
super(SkimAnalyzerCount,self).beginLoop(setup)
self.counters.addCounter('SkimReport')
self.count = self.counters.counter('SkimReport')
self.count.register('All Events')
if self.cfg_comp.isMC:
self.count.register('Sum Weights')
if not self.useLumiBlocks:
#print 'Will actually count events instead of accessing lumi blocks'
return True
print('Counting the total events before the skim by accessing luminosity blocks')
lumis = Lumis(self.cfg_comp.files)
totalEvents=0
for lumi in lumis:
if lumi.getByLabel('prePathCounter',self.counterHandle):
totalEvents+=self.counterHandle.product().value
else:
self.useLumiBlocks = False
break
if self.useLumiBlocks:
self.count.inc('All Events',totalEvents)
if self.cfg_comp.isMC:
self.count.inc('Sum Weights',totalEvents)
print('Done -> proceeding with the analysis')
else:
print('Failed -> will have to actually count events (this can happen if the input dataset is not a CMG one)')
def process(self, event):
if self.verbose:
print("\nProcessing run:lumi:event %d:%d:%d" % (
event.input.eventAuxiliary().id().run(),
event.input.eventAuxiliary().id().luminosityBlock(),
event.input.eventAuxiliary().id().event()))
if not self.useLumiBlocks:
self.readCollections( event.input )
self.count.inc('All Events')
if self.cfg_comp.isMC:
self.count.inc('Sum Weights', self.mchandles['GenInfo'].product().weight())
return True
| [
[
[
23,
37
]
],
[
[
45,
54
]
],
[
[
111,
119
],
[
396,
404
]
],
[
[
177,
187
],
[
1057,
1067
]
],
[
[
239,
244
]
],
[
[
299,
306
]
],
[
[
308,
316
]
],
[
[
349,
355
]
],
[
[
357,
363
],
[
989,
995
]
],
[
[
364,
369
],
[
1684,
1689
]
],
[
[
377,
394
],
[
634,
651
],
[
918,
935
],
[
1169,
1186
]
]
] |
from django.contrib import admin
from .models import blog
# Register your models here.
admin.site.register(blog) | [
[
[
27,
32
],
[
89,
94
]
],
[
[
54,
58
],
[
109,
113
]
]
] |
#!/usr/local/sal/Python.framework/Versions/Current/bin/python3
import datetime
import pathlib
import plistlib
import sys
import sal
sys.path.insert(0, "/usr/local/munki")
from munkilib import munkicommon
__version__ = "1.2.0"
def main():
# If we haven't successfully submitted to Sal, pull the existing
# munki section rather than start from scratch, as we want to
# keep any install/removal history that may be there.
munki_submission = sal.get_checkin_results().get("munki", {})
munki_report = get_managed_install_report()
extras = {}
extras["munki_version"] = munki_report["MachineInfo"].get("munki_version")
extras["manifest"] = munki_report.get("ManifestName")
extras["runtype"] = munki_report.get("RunType", "custom")
munki_submission["extra_data"] = extras
munki_submission["facts"] = {
"checkin_module_version": __version__,
"RunType": munki_report["RunType"],
"StartTime": munki_report["StartTime"],
"EndTime": munki_report["EndTime"],
}
if munki_report.get("Conditions"):
for condition, value in munki_report["Conditions"].items():
# Join lists of strings into a comma-delimited string, as
# the server wants just text.
try:
if hasattr(value, "append"):
value = ", ".join(value)
except Exception as e:
# We god something weird from a condtion that probably wouldn't work anyway
continue
munki_submission["facts"][condition] = value
munki_submission["messages"] = []
for key in ("Errors", "Warnings"):
for msg in munki_report[key]:
# We need to drop the final 'S' to match Sal's message types.
munki_submission["messages"].append(
{"message_type": key.upper()[:-1], "text": msg}
)
now = datetime.datetime.now().astimezone(datetime.timezone.utc).isoformat()
# Process managed items and update histories.
munki_submission["managed_items"] = {}
optional_manifest = get_optional_manifest()
for item in munki_report.get("ManagedInstalls", []):
submission_item = {"date_managed": now}
submission_item["status"] = "PRESENT" if item["installed"] else "PENDING"
version_key = (
"version_to_install" if not item["installed"] else "installed_version"
)
version = item[version_key]
name = f'{item["name"]} {version}'
submission_item["name"] = name
# Pop off these two since we already used them.
item.pop("name")
item.pop("installed")
item["type"] = "ManagedInstalls"
self_serve = (
"True" if name in optional_manifest.get("managed_installs", []) else "False"
)
item["self_serve"] = self_serve
submission_item["data"] = item
munki_submission["managed_items"][name] = submission_item
for item in munki_report.get("managed_uninstalls_list", []):
submission_item = {"date_managed": now, "status": "ABSENT"}
self_serve = (
"True"
if name in optional_manifest.get("managed_uninstalls", [])
else "False"
)
submission_item["data"] = {
"self_serve": self_serve,
"type": "ManagedUninstalls",
}
munki_submission["managed_items"][item] = submission_item
# Process InstallResults and RemovalResults into update history
for report_key in ("InstallResults", "RemovalResults"):
for item in munki_report.get(report_key, []):
# Skip Apple software update items.
if item.get("applesus"):
continue
# Construct key; we pop the name off because we don't need
# to submit it again when we stuff `item` into `data`.
name = f'{item.pop("name")} {item["version"]}'
submission_item = munki_submission["managed_items"].get(
name, {"name": name}
)
if item.get("status") != 0:
# Something went wrong, so change the status.
submission_item["status"] = "ERROR"
if "data" in submission_item:
submission_item["data"].update(item)
else:
submission_item["data"] = item
if "type" not in submission_item["data"]:
submission_item["data"]["type"] = (
"ManagedInstalls"
if report_key == "InstallResults"
else "ManagedUninstalls"
)
# This UTC datetime gets converted to a naive datetime by
# plistlib. Fortunately, we can just tell it that it's UTC.
submission_item["date_managed"] = (
item["time"].replace(tzinfo=datetime.timezone.utc).isoformat()
)
munki_submission["managed_items"][name] = submission_item
sal.set_checkin_results("Munki", munki_submission)
def get_managed_install_report():
"""Return Munki ManagedInstallsReport.plist as a plist dict.
Returns:
ManagedInstalls report for last Munki run as a plist
dict, or an empty dict.
"""
# Checks munki preferences to see where the install directory is set to.
managed_install_dir = munkicommon.pref("ManagedInstallDir")
# set the paths based on munki's configuration.
managed_install_report = (
pathlib.Path(managed_install_dir) / "ManagedInstallReport.plist"
)
try:
munki_report = plistlib.loads(managed_install_report.read_bytes())
except (IOError, plistlib.InvalidFileException):
munki_report = {}
if "MachineInfo" not in munki_report:
munki_report["MachineInfo"] = {}
return sal.unobjctify(munki_report)
def get_optional_manifest():
"""Return Munki SelfServeManifest as a plist dict.
Returns:
SelfServeManifest for last Munki run as a plist
dict, or an empty dict.
"""
# Checks munki preferences to see where the install directory is set to.
managed_install_dir = munkicommon.pref("ManagedInstallDir")
# set the paths based on munki's configuration.
optional_manifest_path = (
pathlib.Path(managed_install_dir) / "manifests/SelfServeManifest"
)
try:
optional_manifest = plistlib.loads(optional_manifest_path.read_bytes())
except (IOError, plistlib.InvalidFileException):
optional_manifest = {}
return optional_manifest
if __name__ == "__main__":
main()
| [
[
[
72,
80
],
[
1901,
1909
],
[
1936,
1944
],
[
4850,
4858
]
],
[
[
88,
95
],
[
5474,
5481
],
[
6263,
6270
]
],
[
[
103,
111
],
[
5578,
5586
],
[
5651,
5659
],
[
6373,
6381
],
[
6446,
6454
]
],
[
[
119,
122
],
[
136,
139
]
],
[
[
131,
134
],
[
462,
465
],
[
4974,
4977
],
[
5805,
5808
]
],
[
[
196,
207
],
[
5344,
5355
],
[
6133,
6144
]
],
[
[
210,
221
],
[
883,
894
]
],
[
[
238,
242
],
[
6572,
6576
]
],
[
[
5031,
5057
],
[
524,
550
]
],
[
[
5840,
5861
],
[
2089,
2110
]
]
] |
__all__ = ["PY2", "PY3"]
import sys
if sys.version_info[0] == 2:
PY2 = True
PY3 = False
elif sys.version_info[0] == 3:
PY2 = False
PY3 = True
else:
PY2 = False
PY3 = False
| [
[
[
2,
9
]
],
[
[
35,
38
],
[
43,
46
],
[
105,
108
]
],
[
[
73,
76
]
],
[
[
88,
91
]
],
[
[
135,
138
]
],
[
[
151,
154
]
],
[
[
172,
175
]
],
[
[
188,
191
]
]
] |
## @ StitchLoader.py
# This is a python stitching script for Slim Bootloader APL build
#
# Copyright (c) 2018 - 2022, Intel Corporation. All rights reserved. <BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import struct
import argparse
import zipfile
import shutil
from ctypes import *
from subprocess import check_output
from functools import reduce
sys.dont_write_bytecode = True
sblopen_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../'))
if not os.path.exists (sblopen_dir):
sblopen_dir = os.getenv('SBL_SOURCE', '')
sys.path.append (os.path.join(sblopen_dir, 'BootloaderCorePkg' , 'Tools'))
try:
from IfwiUtility import *
except ImportError:
err_msg = "Cannot find IfwiUtility module!\n"
err_msg += "Please make sure 'SBL_SOURCE' environment variable is set to open source SBL root folder."
raise ImportError(err_msg)
extra_usage_txt = \
"""This script creates a new Apollo Lake Slim Bootloader IFWI image basing
on an existing IFWI base image. Please note, this stitching method will work
only if Boot Guard in the base image is not enabled, and the silicon is not
fused with Boot Guard enabled.
Please follow steps below:
1. Download an existing Apollo Lake UEFI IFWI image associated with the target platform,
such as MinnowBoard 3, LeafHill, etc. The URL is below:
https://firmware.intel.com/projects/minnowboard3
Alternatively, the original IFWI image from the onboard SPI flash can be
read out as the base image too.
2. Build Slim Bootloader source tree and generate a stitching ZIP package.
The generated ZIP package is located at:
$(WORKSPACE)/Outputs/apl/Stitch_Components.zip
3. Stitch to create a new IFWI image.
EX:
python StitchLoader.py -i LEAFHILD.X64.0070.R01.1805070352.bin -s
Stitch_Components.zip -o SlimBoot.bin
4. Optionally, to view the flash layout for an given IFWI image,
specify '-i' option only.
EX:
python StitchLoader.py -i LEAFHILD.X64.0070.R01.1805070352.bin
"""
FILE_ALIGN = 0x1000
class IFWI_MANIPULATE:
def add_component (self, root, path, before = '$', file_path = ''):
nodes = path.split('/')
parent_path = '/'.join(nodes[:-1])
dir_comp = IFWI_PARSER.locate_component (root, parent_path)
if not dir_comp:
print ('Cannot find DIR %s !' % '/'.join(nodes[:-1]))
return -1
if dir_comp.type != COMPONENT.COMP_TYPE['PART']:
print ('Can only add FILE type !')
return -2
index = None
if before == '$':
# Add to end
index = len(dir_comp.child)
elif before == '^':
# Add to top
index = 0
else:
for idx, file in enumerate(dir_comp.child):
if before == file.name:
index = idx
if index is None:
print ('Cannot find FILE %s !' % before)
return -3
else:
length = os.path.getsize(file_path) if file_path else 0x1000
comp = COMPONENT (nodes[-1], COMPONENT.COMP_TYPE['FILE'], 0, length)
comp.set_data (file_path)
dir_comp.add_child (comp, index)
return 0
def remove_component (self, root, path):
nodes = path.split('/')
parent_path = '/'.join(nodes[:-1])
dir_comp = IFWI_PARSER.locate_component (root, parent_path)
if not dir_comp:
print ('Cannot find DIR %s !' % '/'.join(nodes[:-1]))
return -1
if dir_comp.type != COMPONENT.COMP_TYPE['PART']:
print ('Can only replace FILE type !')
return -2
index = None
for idx, file in enumerate(dir_comp.child):
if file.name == nodes[-1]:
index = idx
break
if index is None:
print ('Cannot find FILE %s !' % path)
return -3
else:
del dir_comp.child[index]
return 0
def replace_component (self, root, path, file_path):
comp = IFWI_PARSER.locate_component (root, path)
if not comp:
print ('Cannot find FILE %s !' % path)
return -1
if comp.type != COMPONENT.COMP_TYPE['FILE']:
print ('Can only replace FILE type !' % path)
return -2
comp.length = os.path.getsize(file_path) if file_path else 0x1000
if file_path:
comp.set_data (file_path)
return 0
def copy_component (self, root, path, ifwi_data):
print ("COPY BP0 BPDT to BP1 BPDT ...")
# Backup BP0 BPDT and BP1 SBPDT
bp1 = IFWI_PARSER.locate_component (root, 'IFWI/BIOS/BP1')
bp0bpdt = IFWI_PARSER.locate_component (root, 'IFWI/BIOS/BP0/BPDT')
bp1bpdt = IFWI_PARSER.locate_component (root, 'IFWI/BIOS/BP1/BPDT')
bp1sbpdt = IFWI_PARSER.locate_component (root, 'IFWI/BIOS/BP1/SBPDT')
bp0bpdt_data = bytearray(ifwi_data[bp0bpdt.offset :bp0bpdt.offset + bp0bpdt.length])
bp1sbpdt_data = bytearray(ifwi_data[bp1sbpdt.offset:bp1sbpdt.offset + bp1sbpdt.length])
# Copy to BP0 BPDT to BP1 BPDT
bp1sbpdt_offset = bp1bpdt.offset + bp0bpdt.length
ifwi_data[bp1bpdt.offset:bp1sbpdt_offset] = bp0bpdt_data
# Append original BP1 SBPDT
bp1sbpdt_end_offset = bp1sbpdt_offset + bp1sbpdt.length
ifwi_data[bp1sbpdt_offset:bp1sbpdt_end_offset] = bp1sbpdt_data
padding = bp1.offset + bp1.length - bp1sbpdt_end_offset
if padding < 0:
print ('Insufficiant space in BP1 partition !')
return -1
ifwi_data[bp1sbpdt_end_offset:bp1sbpdt_end_offset + padding] = b'\xff' * padding
# Fix Sbpdt length in BP1 BPDT
offset = bp1bpdt.offset
bpdt_hdr = BPDT_HEADER.from_buffer(ifwi_data, offset)
offset += sizeof(BPDT_HEADER)
for idx in range(bpdt_hdr.desc_cnt):
bpdt_entry = BPDT_ENTRY.from_buffer(ifwi_data, offset)
if "BpdtSbpdt" == str(bpdt_entry.type):
bpdt_entry.sub_part_size = bp1sbpdt.length
offset += sizeof(BPDT_ENTRY)
# Fix Sbpdt headers
offset = bp1sbpdt_offset
bpdt_hdr = BPDT_HEADER.from_buffer(ifwi_data, offset)
offset += sizeof(BPDT_HEADER)
for idx in range(bpdt_hdr.desc_cnt):
bpdt_entry = BPDT_ENTRY.from_buffer(ifwi_data, offset)
bpdt_entry.sub_part_offset += (bp0bpdt.length - bp1bpdt.length)
offset += sizeof(BPDT_ENTRY)
print ("Done!")
return 0
def create_dir_data (self, dir, ifwi_data):
# Claculate new DIR length and creaet new DIR data
support_list = ["BpdtIbb", "BpdtObb"]
if dir.name not in support_list:
raise Exception ('Only %s are supported !' % ' '.join(support_list))
adjust = True
offset = len(dir.child) * sizeof(SUBPART_DIR_ENTRY) + sizeof(SUBPART_DIR_HEADER)
sub_dir_hdr = SUBPART_DIR_HEADER.from_buffer(ifwi_data, dir.offset)
dir_data = bytearray(sub_dir_hdr) + b'\xff' * (offset - sizeof(SUBPART_DIR_HEADER))
for idx, comp in enumerate(dir.child):
delta = 0
parts = os.path.splitext(comp.name)
if len(parts) > 1 and parts[1] in ['.man', '.met']:
align = 1
elif comp.name in ['IPAD', 'OPAD']:
align = 0x40
else:
align = FILE_ALIGN
delta = dir.offset & (FILE_ALIGN - 1)
next_offset = ((offset + delta + align - 1) & ~(align - 1))
count = next_offset - offset
if adjust:
adjust = False
count -= delta
dir_data.extend(b'\xff' * count)
comp_data = comp.get_data()
if comp_data:
dir_data.extend(comp_data)
else:
dir_data.extend(ifwi_data[comp.offset : comp.offset + comp.length])
sub_dir = SUBPART_DIR_ENTRY()
sub_dir.entry_name = comp.name.encode()
sub_dir.entry_offset = next_offset - delta
sub_dir.entry_size = comp.length
sub_dir.reserved1 = 0
sub_dir.reserved2 = 0
entry_offset = idx * sizeof(SUBPART_DIR_ENTRY) + sizeof(SUBPART_DIR_HEADER)
dir_data[entry_offset:entry_offset+sizeof(SUBPART_DIR_ENTRY)] = bytearray(sub_dir)
next_offset += comp.length
offset = next_offset
align = FILE_ALIGN
next_offset = ((offset + align - 1) & ~(align - 1))
dir_data.extend(b'\xff' * (next_offset - offset))
# Update checksum
sub_dir_hdr = SUBPART_DIR_HEADER.from_buffer_copy(dir_data)
sub_dir_hdr.num_of_entries = len(dir.child)
sub_dir_hdr.checksum = 0
dir_data[:sizeof(SUBPART_DIR_HEADER)] = bytearray(sub_dir_hdr)
length = sub_dir_hdr.num_of_entries * sizeof(SUBPART_DIR_ENTRY) + sizeof(SUBPART_DIR_HEADER)
sum_buf = (c_uint8 * length).from_buffer_copy(dir_data)
sub_dir_hdr.checksum = (~sum(sum_buf) + 1) & 0xFF
dir_data[:sizeof(SUBPART_DIR_HEADER)] = bytearray(sub_dir_hdr)
remaining = (dir.offset + len(dir_data)) & (FILE_ALIGN - 1)
if remaining:
# Not page aligned, add padding
dir_data.extend(b'\xff' * (FILE_ALIGN - remaining))
return dir_data
def refresh_ifwi_for_dir (self, dir, ifwi_data):
# Claculate new DIR length and creaet new DIR data
dir_data = self.create_dir_data (dir, ifwi_data)
length = len (dir_data)
adjust_length = length - dir.length
if (dir.offset + length) & (FILE_ALIGN - 1):
print ('DIR total size needs to be 4KB aligned !')
# Remember original SBPDT offset
org_bpdt_offset = dir.parent.parent.child[0].offset
org_sbpdt_offset = dir.parent.parent.child[1].offset
# Adjust offset and size for peer and up level in tree
old_dir = dir
while dir.type != COMPONENT.COMP_TYPE['BP']:
for each in dir.parent.child:
if each.offset > dir.offset:
each.offset += adjust_length
dir.length += adjust_length
dir = dir.parent
dir = old_dir
# Update parent BPDT header info in IFWI data
parent = dir.parent
bpdt_hdr = BPDT_HEADER.from_buffer(ifwi_data, parent.offset)
base = parent.offset + sizeof(BPDT_HEADER)
found = False
for idx in range(bpdt_hdr.desc_cnt):
bpdt_entry = BPDT_ENTRY.from_buffer(ifwi_data, base + idx * sizeof(BPDT_ENTRY))
comps = [x for x in parent.child if x.name == str(bpdt_entry.type)]
if len(comps) == 0:
continue
if len(comps) > 1:
raise Exception ('Found duplicated DIR %s !', bpdt_entry.type)
bpdt_entry.sub_part_offset = comps[0].offset - parent.parent.offset
if dir.name == str(bpdt_entry.type):
bpdt_entry.sub_part_size = length
found = True
if not found:
raise Exception ('Could not find DIR %s !', dir.name)
# Update SBPDT DIR header in IFWI data
bp_comp = parent.parent
if parent.name == 'BPDT':
bpdt_hdr = BPDT_HEADER.from_buffer (ifwi_data, org_sbpdt_offset)
bpdt_hdr.xor_sum = 0
base_offset = org_sbpdt_offset + sizeof(BPDT_HEADER)
for idx in range(bpdt_hdr.desc_cnt):
bpdt_entry = BPDT_ENTRY.from_buffer(ifwi_data, base_offset + idx * sizeof(BPDT_ENTRY))
bpdt_entry.sub_part_offset += adjust_length
if (bpdt_entry.sub_part_offset + bpdt_entry.sub_part_size) > bp_comp.length:
raise Exception ('Insufficiant space in layout !')
else:
# 'SBPDT', update length in BPDT
bpdt_hdr = BPDT_HEADER.from_buffer (ifwi_data, org_bpdt_offset)
bpdt_hdr.xor_sum = 0
base_offset = org_bpdt_offset + sizeof(BPDT_HEADER)
for idx in range(bpdt_hdr.desc_cnt):
bpdt_entry = BPDT_ENTRY.from_buffer(ifwi_data, base_offset + idx * sizeof(BPDT_ENTRY))
if str(bpdt_entry.type) == "BpdtSbpdt":
bpdt_entry.sub_part_size += adjust_length
if (bpdt_entry.sub_part_offset + bpdt_entry.sub_part_size) > bp_comp.length:
raise Exception ('Insufficiant space in layout !')
# Generate actual final IFWI Data
if adjust_length > 0:
ifwi_data[:] = ifwi_data[:old_dir.offset] + dir_data + \
ifwi_data[old_dir.offset + old_dir.length - adjust_length : bp_comp.offset + bp_comp.length - adjust_length] + \
ifwi_data[bp_comp.offset + bp_comp.length:]
else:
adjust_length = -adjust_length
ifwi_data[:] = ifwi_data[:old_dir.offset] + dir_data + \
ifwi_data[old_dir.offset + old_dir.length + adjust_length: bp_comp.offset + bp_comp.length] + \
b'\xff' * adjust_length + ifwi_data[bp_comp.offset + bp_comp.length:]
return 0
def manipulate_ifwi (action, path, ifwi_data, file_name = '', before = '$'):
print ('%s %s ...' % (action, path))
root = IFWI_PARSER.parse_ifwi_binary (ifwi_data)
ifwi_op = IFWI_MANIPULATE()
if action == "REMOVE":
ret = ifwi_op.remove_component (root, path)
elif action == "ADD":
ret = ifwi_op.add_component (root, path, before, file_name)
elif action == "REPLACE":
ret = ifwi_op.replace_component (root, path, file_name)
elif action == "COPY":
ret = ifwi_op.copy_component (root, 'IFWI/BIOS/BP0/BPDT', ifwi_data)
else:
ret = -100
if ret == 0 and path:
dir_path = '/'.join(path.split('/')[:-1])
dir = IFWI_PARSER.locate_component (root, dir_path)
ifwi_op.refresh_ifwi_for_dir (dir, ifwi_data)
print ('done!')
return ret
def patch_flash_map (image_data, platform_data = 0xffffffff):
comp_bpdt_dict = {
b'RSVD' : "IFWI/BIOS/BP1/SBPDT/BpdtObb/RSVD",
b'IAS1' : "IFWI/BIOS/BP1/SBPDT/BpdtObb/FB",
b'EPLD' : "IFWI/BIOS/BP1/SBPDT/BpdtObb/EPLD",
b'UVAR' : "IFWI/BIOS/BP1/SBPDT/BpdtObb/UVAR",
b'PYLD' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/PLD",
b'VARS' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/VAR",
b'MRCD' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/MRCD",
b'CNFG' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/CFGD",
b'KEYH' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/KEYH",
b'FWUP' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/FWUP",
b'SG02' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/OBB",
b'SG1B' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/IBB",
b'SG1A' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/IBBL",
b'_BPM' : "IFWI/BIOS/BP0/BPDT/BpdtIbb/BPM.met",
}
print ("Patching Slim Bootloader Flash Map table ...")
output_image_data = image_data
ifwi = IFWI_PARSER.parse_ifwi_binary (output_image_data)
if not ifwi:
return -1
pld = IFWI_PARSER.locate_component (ifwi, comp_bpdt_dict[b'PYLD'])
if not pld:
comp_bpdt_dict[b'PYLD'] = "IFWI/BIOS/BP1/SBPDT/BpdtObb/PLD"
bp0 = IFWI_PARSER.locate_component (ifwi, 'IFWI/BIOS/BP0')
bp1 = IFWI_PARSER.locate_component (ifwi, 'IFWI/BIOS/BP1')
if not bp0 or not bp1:
return -2
# Locate FlashMap offset
for part in range(2):
path = comp_bpdt_dict[b'SG1A'].replace("BP0", "BP%d" % part)
comp = IFWI_PARSER.locate_component (ifwi, path)
if not comp:
if part == 0:
raise Exception("Cannot locate %s !" % path)
else:
continue
stage1AOffset = comp.offset
stage1ALength = comp.length
temp = stage1AOffset + stage1ALength - 8
c_uint32.from_buffer (output_image_data, temp - 4).value = platform_data
fla_map_off = (bytes_to_value(output_image_data[temp:temp+4]) + stage1ALength) & 0xFFFFFFFF
fla_map_str = FLASH_MAP.from_buffer (output_image_data, stage1AOffset + fla_map_off)
entry_num = (fla_map_str.length - sizeof(FLASH_MAP)) // sizeof(FLASH_MAP_DESC)
fla_map_str.romsize = bp0.length + bp1.length
if part == 1:
fla_map_str.attributes |= FLASH_MAP.FLASH_MAP_ATTRIBUTES['BACKUP_REGION']
for idx in range (entry_num):
desc = FLASH_MAP_DESC.from_buffer (output_image_data, stage1AOffset + fla_map_off + sizeof(FLASH_MAP) + idx * sizeof(FLASH_MAP_DESC))
path = comp_bpdt_dict[desc.sig]
if part == 1 or (desc.flags & FLASH_MAP.FLASH_MAP_DESC_FLAGS['NON_REDUNDANT']):
path = path.replace("BP0", "BP1")
if part == 1 and (desc.flags & FLASH_MAP.FLASH_MAP_DESC_FLAGS['REDUNDANT']):
desc.flags |= FLASH_MAP.FLASH_MAP_DESC_FLAGS['BACKUP']
if desc.sig == b'RSVD':
desc.offset = bp1.offset + bp1.length - desc.size - bp0.offset
continue
comp = IFWI_PARSER.locate_component (ifwi, path)
if not comp:
if desc.sig == b'KEYH':
continue
raise Exception("Cannot locate component '%s' in BPDT !" % path)
if (desc.size == 0) and (desc.offset == 0):
desc.size = comp.length
desc.offset = comp.offset - bp0.offset
continue
if desc.size != comp.length and comp.name != 'FB':
raise Exception("Mismatch component '%s' length in FlashMap and BPDT !" % comp_bpdt_dict[desc.sig])
if desc.sig not in [b'_BPM'] and (comp.offset & 0xFFF > 0):
raise Exception("Component '%s' %x is not aligned at 4KB boundary, " \
"please adjust padding size for IPAD/OPAD in BoardConfig.py and rebuild !" % (comp_bpdt_dict[desc.sig], comp.offset))
desc.offset = comp.offset - bp0.offset
# Last 4k in bios region is reserved for bootloader, throw Exception if any component falls in that range
if (bp1.offset + bp1.length - 0x1000) <= (desc.offset + desc.size) <= (bp1.offset + bp1.length):
raise Exception("Component '%s' offset is in bootloader reserved region, please try to reduce compoent size !" % comp_bpdt_dict[desc.sig])
limit = bp1.offset + bp1.length - bp0.offset - 0x40000
for idx in range (entry_num):
desc = FLASH_MAP_DESC.from_buffer (output_image_data, stage1AOffset + fla_map_off + sizeof(FLASH_MAP) + idx * sizeof(FLASH_MAP_DESC))
if desc.sig == b'RSVD':
continue
# Last 256K flash space (4GB - 256KB to 4GB) is remapped to CSME read-only SRAM on APL
# Directly access is not available.
if desc.offset >= limit or desc.offset + desc.size > limit:
print("WARNING: Component '%s' in BP%d is located inside CSME memory mapped region, direct access might fail." % (desc.sig, part))
print ("Flash map was patched successfully!")
return 0
def create_ifwi_image (ifwi_in, ifwi_out, bios_out, platform_data, non_redundant, stitch_dir):
redundant_payload = True
ifwi_data = bytearray (get_file_data (ifwi_in))
root = IFWI_PARSER.parse_ifwi_binary (ifwi_data)
if not root:
raise Exception ('Invalid IFWI input image format !')
# Verify if Boot Guard is enabled or not
comp = IFWI_PARSER.locate_component (root, "IFWI/BIOS/BP0/BPDT/BpdtUepType")
if not comp:
raise Exception ('Unsupported base image format !')
data = ifwi_data[comp.offset + 0x30:comp.offset + 0x32]
if (data[0] & 0x0F) != 0x00:
raise Exception ('Unsupported base image type. boot guard might have been enabled in this image !')
print ('Creating %sredundant image ...' % ('non-' if non_redundant else ''))
# Remove all in IBB/OBB
remove_list = [
"IFWI/BIOS/BP0/BPDT/BpdtIbb",
"IFWI/BIOS/BP1/BPDT/BpdtIbb",
"IFWI/BIOS/BP1/SBPDT/BpdtObb"
]
for dir_path in remove_list:
comp = IFWI_PARSER.locate_component (root, dir_path)
if not comp:
continue
for each in comp.child:
if each.name.endswith('.man') or each.name.endswith('.met'):
continue
ret = manipulate_ifwi ('REMOVE', dir_path + '/' + each.name, ifwi_data)
if ret != 0:
raise Exception ('REMOVE failed (error code %d) !' % (ret))
# Copy BP0 BPDT into BP1 BPDT
if not non_redundant:
ret = manipulate_ifwi ('COPY', '', ifwi_data)
if ret != 0:
raise Exception ('COPY failed (error code %d) !' % (ret))
if stitch_dir:
ibb_list = [
('IBBL' , 'IBBL'),
('IBB' , 'IBBM'),
('OBB' , 'OBB'),
('FWUP' , 'FWU'),
('CFGD' , 'CFGDATA'),
('KEYH' , 'KEYHASH'),
('VAR' , 'VAR'),
('MRCD' , 'MRCDATA'),
('PLD' , 'PLD'),
]
obb_list = [
('FB' , 'FB'),
('EPLD' , 'EPLD'),
('UVAR' , 'UVAR'),
('PLD' , 'PLD'),
]
# optional components
opt_list = [
'EPLD', 'UVAR'
]
if redundant_payload:
del obb_list[-1]
else:
del ibb_list[-1]
bp1sbpdt = "IFWI/BIOS/BP1/SBPDT/BpdtObb/"
loop = 1 if non_redundant else 2
for bp in range(loop):
dir = "IFWI/BIOS/BP%d/BPDT/BpdtIbb/" % bp
for comp_name, file_name in ibb_list:
file_path = os.path.join(stitch_dir, 'Stitch_%s.bin' % file_name)
ret = manipulate_ifwi ('ADD', dir + comp_name, ifwi_data, file_path)
if ret != 0:
raise Exception ('ADD failed (error code %d) !' % (ret))
for comp_name, file_name in obb_list:
if file_name == '':
file_path = ''
else:
file_path = os.path.join(stitch_dir, 'Stitch_%s.bin' % file_name)
if (comp_name in opt_list) and not os.path.exists(file_path):
ret = 0
else:
ret = manipulate_ifwi ('ADD', bp1sbpdt + comp_name, ifwi_data, file_path)
if ret != 0:
raise Exception ('ADD failed (error code %d) !' % (ret))
patch_flash_map (ifwi_data, platform_data)
if bios_out:
print ('Creating BIOS image ...')
bios = IFWI_PARSER.locate_component (root, 'IFWI/BIOS')
fd = open (bios_out, 'wb')
fd.write(ifwi_data[bios.offset:bios.offset+bios.length])
fd.close()
print ('Creating IFWI image ...')
fd = open (ifwi_out, 'wb')
fd.write(ifwi_data)
fd.close()
print ('Done!')
def print_ifwi_layout (ifwi_file):
ifwi_parser = IFWI_PARSER ()
ifwi_bin = bytearray (get_file_data (ifwi_file))
ifwi = ifwi_parser.parse_ifwi_binary (ifwi_bin)
if ifwi:
ifwi_parser.print_tree (ifwi)
else:
print ('Invalid IFWI image')
return 0
if __name__ == '__main__':
hexstr = lambda x: int(x, 16)
ap = argparse.ArgumentParser()
ap.add_argument('-i',
'--input-ifwi-file',
dest='ifwi_in',
type=str,
required=True,
help='specify input template IFWI image file path')
ap.add_argument('-o',
'--output-ifwi-file',
dest='ifwi_out',
type=str,
default='',
help='specify generated output IFWI image file path')
ap.add_argument('-b',
'--output-bios-region',
dest='bios_out',
type=str,
default='',
help='specify generated output BIOS region image file path')
ap.add_argument('-s',
'--sitch-zip-file',
dest='stitch_in',
type=str,
default='',
help='specify input sitching zip package file path')
ap.add_argument('-p',
'--platform-data',
dest='plat_data',
type=hexstr,
default=0xFFFFFFFF,
help='specify a platform specific data (HEX, DWORD) for customization')
ap.add_argument('-n',
'--non-redundant',
dest='non_redundant',
action="store_true",
help='specify if the flash layout will be full redundant or not')
if len(sys.argv) == 1:
print('%s' % extra_usage_txt)
args = ap.parse_args()
if args.ifwi_out == '' and args.stitch_in == '':
print_ifwi_layout (args.ifwi_in)
sys.exit (0)
else:
if args.ifwi_out and args.stitch_in == '':
ret = create_ifwi_image (args.ifwi_in, args.ifwi_out, args.bios_out, args.plat_data, args.non_redundant, None)
sys.exit (ret)
# Unpack files from zip
print ("Unpacking sitching ZIP package ...")
output_dir = os.path.dirname(args.ifwi_out)
stitch_dir = os.path.join(output_dir, 'stitch_comp')
if os.path.exists(stitch_dir):
shutil.rmtree(stitch_dir)
zf = zipfile.ZipFile(args.stitch_in, 'r', zipfile.ZIP_DEFLATED)
zf.extractall(stitch_dir)
zf.close()
# Create new IFWI
ret = create_ifwi_image (args.ifwi_in, args.ifwi_out, args.bios_out, args.plat_data, args.non_redundant, stitch_dir)
# Remove extracted files
if os.path.exists(stitch_dir):
shutil.rmtree(stitch_dir)
sys.exit (ret)
| [
[
[
224,
226
],
[
440,
442
],
[
456,
458
],
[
469,
471
],
[
517,
519
],
[
565,
567
],
[
610,
612
],
[
25411,
25413
],
[
25459,
25461
],
[
25506,
25508
],
[
25862,
25864
],
[
3072,
3074
],
[
4443,
4445
],
[
7338,
7340
],
[
21855,
21857
],
[
22258,
22260
],
[
22359,
22361
]
],
[
[
234,
236
]
],
[
[
244,
247
],
[
395,
398
],
[
593,
596
],
[
24907,
24910
],
[
25092,
25095
],
[
25301,
25304
],
[
25929,
25932
]
],
[
[
255,
261
]
],
[
[
269,
277
],
[
23392,
23400
]
],
[
[
285,
292
],
[
25577,
25584
],
[
25614,
25621
]
],
[
[
300,
306
],
[
25542,
25548
],
[
25898,
25904
]
],
[
[
326,
327
],
[
2317,
2328
],
[
2507,
2516
],
[
3143,
3152
],
[
3165,
3174
],
[
3455,
3466
],
[
3646,
3655
],
[
4152,
4163
],
[
4312,
4321
],
[
4736,
4747
],
[
4808,
4819
],
[
4885,
4896
],
[
4962,
4973
],
[
5899,
5910
],
[
5960,
5966
],
[
5967,
5978
],
[
6050,
6060
],
[
6227,
6233
],
[
6234,
6244
],
[
6328,
6339
],
[
6389,
6395
],
[
6396,
6407
],
[
6479,
6489
],
[
6621,
6627
],
[
6628,
6638
],
[
7023,
7029
],
[
7030,
7047
],
[
7051,
7057
],
[
7058,
7076
],
[
7100,
7118
],
[
7220,
7226
],
[
7227,
7245
],
[
8118,
8135
],
[
8399,
8405
],
[
8406,
8423
],
[
8427,
8433
],
[
8434,
8452
],
[
8501,
8507
],
[
8508,
8525
],
[
8824,
8842
],
[
8973,
8979
],
[
8980,
8998
],
[
9076,
9082
],
[
9083,
9100
],
[
9104,
9110
],
[
9111,
9129
],
[
9153,
9160
],
[
9274,
9280
],
[
9281,
9299
],
[
10195,
10204
],
[
10552,
10563
],
[
10636,
10642
],
[
10643,
10654
],
[
10751,
10761
],
[
10798,
10804
],
[
10805,
10815
],
[
11501,
11512
],
[
11633,
11639
],
[
11640,
11651
],
[
11731,
11741
],
[
11785,
11791
],
[
11792,
11802
],
[
12115,
12126
],
[
12245,
12251
],
[
12252,
12263
],
[
12343,
12353
],
[
12397,
12403
],
[
12404,
12414
],
[
13550,
13561
],
[
14121,
14132
],
[
15179,
15190
],
[
15276,
15287
],
[
15433,
15444
],
[
15497,
15508
],
[
15735,
15746
],
[
16058,
16066
],
[
16155,
16169
],
[
16254,
16263
],
[
16368,
16374
],
[
16375,
16384
],
[
16390,
16396
],
[
16397,
16411
],
[
16528,
16537
],
[
16635,
16649
],
[
16712,
16718
],
[
16719,
16728
],
[
16738,
16744
],
[
16745,
16759
],
[
16848,
16857
],
[
16991,
17000
],
[
17067,
17076
],
[
17269,
17280
],
[
18706,
18720
],
[
18783,
18789
],
[
18790,
18799
],
[
18809,
18815
],
[
18816,
18830
],
[
19480,
19493
],
[
19516,
19527
],
[
19694,
19705
],
[
20337,
20348
],
[
22741,
22752
],
[
23088,
23099
],
[
23129,
23142
]
],
[
[
352,
364
]
],
[
[
387,
393
]
],
[
[
426,
437
],
[
533,
544
],
[
623,
634
]
],
[
[
551,
562
],
[
623,
634
]
],
[
[
706,
707
],
[
2317,
2328
],
[
2507,
2516
],
[
3143,
3152
],
[
3165,
3174
],
[
3455,
3466
],
[
3646,
3655
],
[
4152,
4163
],
[
4312,
4321
],
[
4736,
4747
],
[
4808,
4819
],
[
4885,
4896
],
[
4962,
4973
],
[
5899,
5910
],
[
5960,
5966
],
[
5967,
5978
],
[
6050,
6060
],
[
6227,
6233
],
[
6234,
6244
],
[
6328,
6339
],
[
6389,
6395
],
[
6396,
6407
],
[
6479,
6489
],
[
6621,
6627
],
[
6628,
6638
],
[
7023,
7029
],
[
7030,
7047
],
[
7051,
7057
],
[
7058,
7076
],
[
7100,
7118
],
[
7220,
7226
],
[
7227,
7245
],
[
8118,
8135
],
[
8399,
8405
],
[
8406,
8423
],
[
8427,
8433
],
[
8434,
8452
],
[
8501,
8507
],
[
8508,
8525
],
[
8824,
8842
],
[
8973,
8979
],
[
8980,
8998
],
[
9076,
9082
],
[
9083,
9100
],
[
9104,
9110
],
[
9111,
9129
],
[
9153,
9160
],
[
9274,
9280
],
[
9281,
9299
],
[
10195,
10204
],
[
10552,
10563
],
[
10636,
10642
],
[
10643,
10654
],
[
10751,
10761
],
[
10798,
10804
],
[
10805,
10815
],
[
11501,
11512
],
[
11633,
11639
],
[
11640,
11651
],
[
11731,
11741
],
[
11785,
11791
],
[
11792,
11802
],
[
12115,
12126
],
[
12245,
12251
],
[
12252,
12263
],
[
12343,
12353
],
[
12397,
12403
],
[
12404,
12414
],
[
13550,
13561
],
[
14121,
14132
],
[
15179,
15190
],
[
15276,
15287
],
[
15433,
15444
],
[
15497,
15508
],
[
15735,
15746
],
[
16058,
16066
],
[
16155,
16169
],
[
16254,
16263
],
[
16368,
16374
],
[
16375,
16384
],
[
16390,
16396
],
[
16397,
16411
],
[
16528,
16537
],
[
16635,
16649
],
[
16712,
16718
],
[
16719,
16728
],
[
16738,
16744
],
[
16745,
16759
],
[
16848,
16857
],
[
16991,
17000
],
[
17067,
17076
],
[
17269,
17280
],
[
18706,
18720
],
[
18783,
18789
],
[
18790,
18799
],
[
18809,
18815
],
[
18816,
18830
],
[
19480,
19493
],
[
19516,
19527
],
[
19694,
19705
],
[
20337,
20348
],
[
22741,
22752
],
[
23088,
23099
],
[
23129,
23142
]
],
[
[
732,
739
],
[
783,
790
]
],
[
[
920,
935
],
[
24944,
24959
]
],
[
[
2096,
2106
],
[
7575,
7585
],
[
7624,
7634
],
[
8645,
8655
],
[
9381,
9391
],
[
9502,
9512
],
[
9838,
9848
]
],
[
[
2125,
2140
],
[
13606,
13621
]
],
[
[
13421,
13436
],
[
20573,
20588
],
[
20817,
20832
],
[
21931,
21946
],
[
22450,
22465
]
],
[
[
14268,
14283
],
[
22627,
22642
]
],
[
[
19331,
19348
],
[
25184,
25201
],
[
25714,
25731
]
],
[
[
23039,
23056
],
[
25051,
25068
]
],
[
[
23353,
23359
],
[
24520,
24526
]
],
[
[
23387,
23389
],
[
23422,
23424
],
[
23663,
23665
],
[
23905,
23907
],
[
24156,
24158
],
[
24396,
24398
],
[
24665,
24667
],
[
24973,
24975
]
],
[
[
24966,
24970
],
[
24997,
25001
],
[
25021,
25025
],
[
25070,
25074
],
[
25126,
25130
],
[
25144,
25148
],
[
25203,
25207
],
[
25217,
25221
],
[
25232,
25236
],
[
25247,
25251
],
[
25263,
25267
],
[
25427,
25431
],
[
25593,
25597
],
[
25733,
25737
],
[
25747,
25751
],
[
25762,
25766
],
[
25777,
25781
],
[
25793,
25797
]
],
[
[
25178,
25181
],
[
25311,
25314
]
],
[
[
25398,
25408
],
[
25472,
25482
]
],
[
[
25446,
25456
],
[
25521,
25531
],
[
25556,
25566
],
[
25654,
25664
],
[
25813,
25823
],
[
25877,
25887
],
[
25912,
25922
]
],
[
[
25572,
25574
],
[
25640,
25642
],
[
25670,
25672
]
],
[
[
25708,
25711
],
[
25939,
25942
]
]
] |
'''
This is a sample class that you can use to control the mouse pointer.
It uses the pyautogui library. You can set the precision for mouse movement
(how much the mouse moves) and the speed (how fast it moves) by changing
precision_dict and speed_dict.
Calling the move function with the x and y output of the gaze estimation model
will move the pointer.
This class is provided to help get you started; you can choose whether you want to use it or create your own from scratch.
'''
import pyautogui
pyautogui.FAILSAFE = False
class MouseController:
def __init__(self, precision, speed):
precision_dict={'high':100, 'low':1000, 'medium':500}
speed_dict={'fast':1, 'slow':10, 'medium':5}
self.precision=precision_dict[precision]
self.speed=speed_dict[speed]
def move(self, x, y):
pyautogui.moveRel(x*self.precision, -1*y*self.precision, duration=self.speed) | [
[
[
491,
500
],
[
501,
510
],
[
830,
839
]
],
[
[
534,
549
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntOcrVehicleplateIdentifyModel(object):
def __init__(self):
self._image = None
self._type = None
@property
def image(self):
return self._image
@image.setter
def image(self, value):
self._image = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.image:
if hasattr(self.image, 'to_alipay_dict'):
params['image'] = self.image.to_alipay_dict()
else:
params['image'] = self.image
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntOcrVehicleplateIdentifyModel()
if 'image' in d:
o.image = d['image']
if 'type' in d:
o.type = d['type']
return o
| [
[
[
53,
57
]
],
[
[
110,
111
]
],
[
[
120,
151
],
[
1087,
1118
]
]
] |
import json
import web
import calendar
import datetime
import cloudserver
urls = (
"/BuildingFootprint/", "BuildingFootprint",
"/BuildingFootprintDisaggregated/", "BuildingFootprintDisaggregated",
"/PersonalConsumption/", "PersonalConsumption",
"/HistoricalConsumption/", "HistoricalConsumption")
class BuildingFootprint:
def GET(self):
raw_time = web.input()
if "end" not in raw_time:
end = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
else:
end = float(raw_time['end'])
if "start" not in raw_time:
start = calendar.timegm(datetime.datetime.utcnow().utctimetuple())-24*60*60 #1 day
else:
start = float(raw_time['start'])
return cloudserver.db.buildingFootprint(start, end)
class BuildingFootprintDisaggregated:
def GET(self):
raw_time = web.input()
if "end" not in raw_time:
end = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
else:
end = float(raw_time['end'])
if "start" not in raw_time:
start = calendar.timegm(datetime.datetime.utcnow().utctimetuple())-24*60*60 #1 day
else:
start = float(raw_time['start'])
return cloudserver.db.buildingFootprintDisaggregated(start, end)
class PersonalConsumption:
def GET(self):
print("Got to Personal Consumption")
raw_data = web.input()
end = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
if "end" in raw_data:
end = float(raw_data['end'])
start = calendar.timegm(datetime.datetime.utcnow().utctimetuple())-24*60*60 #1 day
if "start" in raw_data:
start = float(raw_data['start'])
user = "Peter Wei"
if "user" in raw_data:
user = raw_data['user']
return cloudserver.db.personalFootprint(user, start, end)
class HistoricalConsumption:
def GET(self):
return cloudserver.db.historicalConsumption()
dataExtraction = web.application(urls, locals())
| [
[
[
7,
11
]
],
[
[
19,
22
],
[
1788,
1791
],
[
357,
360
],
[
787,
790
],
[
1258,
1261
]
],
[
[
30,
38
],
[
406,
414
],
[
546,
554
],
[
836,
844
],
[
976,
984
],
[
1278,
1286
],
[
1404,
1412
]
],
[
[
46,
54
],
[
422,
430
],
[
562,
570
],
[
852,
860
],
[
992,
1000
],
[
1294,
1302
],
[
1420,
1428
]
],
[
[
63,
74
],
[
674,
685
],
[
1104,
1115
],
[
1625,
1636
],
[
1731,
1742
]
],
[
[
75,
79
],
[
1804,
1808
]
],
[
[
309,
326
]
],
[
[
726,
756
]
],
[
[
1169,
1188
]
],
[
[
1683,
1704
]
],
[
[
1771,
1785
]
]
] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from openpose.structures.image_list import to_image_list
from ..backbone import build_backbone
from ..rpn.rpn import build_rpn
from ..roi_heads.roi_heads import build_roi_heads
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
- rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.rpn = build_rpn(cfg, self.backbone.out_channels)
self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
images = to_image_list(images)
features = self.backbone(images.tensors)
proposals, proposal_losses = self.rpn(images, features, targets)
if self.roi_heads:
x, result, detector_losses = self.roi_heads(features, proposals, targets)
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses,result
return result
| [
[
[
131,
136
]
],
[
[
155,
157
],
[
361,
363
]
],
[
[
202,
215
],
[
1632,
1645
]
],
[
[
240,
254
],
[
746,
760
]
],
[
[
277,
286
],
[
785,
794
]
],
[
[
321,
336
],
[
853,
868
]
],
[
[
345,
360
],
[
679,
694
]
]
] |
"""Test DeltaPySimulator functionality pre-execution."""
import unittest
import deltalanguage as dl
from deltalanguage.test._graph_lib import (getg_const_chain,
getg_optional_queues)
class DeltaQueueCreationTest(unittest.TestCase):
"""Test that the simulator creates queues properly."""
def test_queue_types(self):
"""Test that queues of correct types are created (or not) depending on
the type of the source and destination nodes.
"""
graph = getg_const_chain()
dl.DeltaPySimulator(graph)
self.assertEqual(len(graph.nodes[0].out_queues), 0)
self.assertEqual(len(graph.nodes[1].out_queues), 1)
self.assertEqual(len(graph.nodes[2].out_queues), 1)
self.assertEqual(type(graph.nodes[1].out_queues['output']),
dl.runtime.ConstQueue)
self.assertEqual(type(graph.nodes[2].out_queues['output']),
dl.runtime.DeltaQueue)
def test_queue_optionality(self):
"""Test that queues inhere correct optionality depending on the type of
the destination node.
"""
graph = getg_optional_queues()
dl.DeltaPySimulator(graph)
self.assertEqual(graph.nodes[0].out_queues['output'].optional, True)
self.assertEqual(graph.nodes[1].out_queues['output'].optional, False)
if __name__ == "__main__":
unittest.main()
| [
[
[
65,
73
],
[
260,
268
],
[
1425,
1433
]
],
[
[
82,
101
],
[
560,
562
],
[
862,
864
],
[
978,
980
],
[
1209,
1211
]
],
[
[
146,
162
],
[
533,
549
]
],
[
[
207,
227
],
[
1178,
1198
]
],
[
[
237,
259
]
]
] |
from website import create_app
app = create_app()
if __name__== '__main__':
app.run(debug=True)
| [
[
[
20,
30
],
[
38,
48
]
],
[
[
32,
35
],
[
81,
84
]
]
] |
from typing import List
from blogs.api.interface import IBlogApiExecutor
from domain.blog.blog_entry import BlogEntry, BlogEntries
from domain.doc.doc_entry import DocEntries, DocEntry
from dump.blog_to_doc_mapping import BlogDocEntryMapping
from dump.interface import IDumpEntriesAccessor
from files.conf.category_group_def import CategoryGroupDef
from service.external.blog_entry_index_updater import update_blog_entry_summary_file
from service.external.blog_entry_pusher import push_blog_and_photo_entry
from service.local.doc_entry_pusher import push_documents_to_docs
def push_entry_to_docs_and_blog(api_executor: IBlogApiExecutor,
dump_blog_data_accessor: IDumpEntriesAccessor[BlogEntries, BlogEntry],
dump_doc_data_accessor: IDumpEntriesAccessor[DocEntries, DocEntry],
category_group_def: CategoryGroupDef, is_draft: bool,
target_dir_names: List[str] = None):
doc_entries = push_documents_to_docs(dump_doc_data_accessor, category_group_def, target_dir_names)
if doc_entries is None:
return
__push_entry_from_docs_to_blog(api_executor, dump_blog_data_accessor, category_group_def, doc_entries, is_draft)
def push_entry_from_docs_to_blog(api_executor: IBlogApiExecutor,
dump_blog_data_accessor: IDumpEntriesAccessor[BlogEntries, BlogEntry],
dump_doc_data_accessor: IDumpEntriesAccessor[DocEntries, DocEntry],
category_group_def: CategoryGroupDef,
target_doc_entry_ids: List[str], is_draft: bool):
doc_entries: DocEntries = dump_doc_data_accessor.load_entries(target_doc_entry_ids)
__push_entry_from_docs_to_blog(api_executor, dump_blog_data_accessor, category_group_def, doc_entries, is_draft)
def __push_entry_from_docs_to_blog(api_executor: IBlogApiExecutor,
dump_blog_data_accessor: IDumpEntriesAccessor[BlogEntries, BlogEntry],
category_group_def: CategoryGroupDef, doc_entries: DocEntries, is_draft: bool):
blog_doc_mapping = BlogDocEntryMapping()
updated_blog_entry_list: List[BlogEntry] = []
for doc_entry in doc_entries.entry_list:
blog_entry_id_opt = blog_doc_mapping.get_blog_entry_id(doc_entry.id)
old_blog_entry_opt = None if blog_entry_id_opt is None else dump_blog_data_accessor.load_entry(
blog_entry_id_opt)
new_blog_entry_opt = push_blog_and_photo_entry(api_executor, doc_entry, is_draft, old_blog_entry_opt)
if new_blog_entry_opt is None:
print(f'[Info] blog push skip. (dir: {doc_entry.dir_path})')
continue
updated_blog_entry_list.append(new_blog_entry_opt)
blog_doc_mapping.push_entry_pair(new_blog_entry_opt.id, doc_entry.id)
# dump to file
updated_blog_entries = BlogEntries(updated_blog_entry_list)
dump_blog_data_accessor.save_entries(updated_blog_entries)
blog_doc_mapping.dump_file()
update_blog_entry_summary_file(dump_blog_data_accessor, category_group_def, updated_blog_entries)
| [
[
[
19,
23
],
[
979,
983
],
[
1659,
1663
],
[
2256,
2260
]
],
[
[
57,
73
],
[
622,
638
],
[
1310,
1326
],
[
1943,
1959
]
],
[
[
109,
118
],
[
731,
740
],
[
1420,
1429
],
[
2055,
2064
],
[
2261,
2270
]
],
[
[
120,
131
],
[
718,
729
],
[
1407,
1418
],
[
2042,
2053
],
[
2960,
2971
]
],
[
[
165,
175
],
[
820,
830
],
[
1510,
1520
],
[
1704,
1714
],
[
2153,
2163
]
],
[
[
177,
185
],
[
832,
840
],
[
1522,
1530
]
],
[
[
223,
242
],
[
2205,
2224
]
],
[
[
270,
290
],
[
697,
717
],
[
799,
819
],
[
1386,
1406
],
[
1489,
1509
],
[
2021,
2041
]
],
[
[
333,
349
],
[
895,
911
],
[
1586,
1602
],
[
2122,
2138
]
],
[
[
404,
434
],
[
3097,
3127
]
],
[
[
482,
507
],
[
2563,
2588
]
],
[
[
551,
573
],
[
1016,
1038
]
],
[
[
580,
607
]
],
[
[
1267,
1295
]
],
[
[
1898,
1928
],
[
1148,
1178
],
[
1779,
1809
]
]
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-19 08:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20161216_1359'),
('cms', '0014_auto_20161216_1424'),
]
operations = [
]
| [
[
[
96,
112
]
],
[
[
136,
146
],
[
165,
175
]
],
[
[
155,
164
]
]
] |
"""A high-level interface to local Galaxy instances using bioblend."""
from six import StringIO
from planemo.bioblend import ensure_module
from planemo.bioblend import galaxy
DEFAULT_MASTER_API_KEY = "test_key"
def gi(port=None, url=None, key=None):
"""Return a bioblend ``GalaxyInstance`` for Galaxy on this port."""
ensure_module()
if key is None:
key = DEFAULT_MASTER_API_KEY
if port is None:
url = url
else:
url = "http://localhost:%d" % int(port)
return galaxy.GalaxyInstance(
url=url,
key=key
)
def user_api_key(admin_gi):
"""Use an admin authenticated account to generate a user API key."""
ensure_module()
# TODO: thread-safe
users = admin_gi.users
all_users = users.get_users()
user_id = None
for user in all_users:
if user["email"] == "planemo@galaxyproject.org":
user_id = user["id"]
if user_id is None:
# TODO: Allow override with --user_api_key.
galaxy_config = admin_gi.config.get_config()
use_remote_user = bool(galaxy_config["use_remote_user"])
if not use_remote_user:
user_response = users.create_local_user(
"planemo",
"planemo@galaxyproject.org",
"planemo",
)
user_id = user_response["id"]
else:
user_response = users.create_remote_user(
"planemo@galaxyproject.org",
)
user_id = user_response["id"]
return users.create_user_apikey(user_id)
def summarize_history(ctx, gi, history_id):
"""Summarize a history with print() based on similar code in Galaxy for populators.
"""
if not ctx.verbose:
return
if history_id is None:
raise ValueError("summarize_history passed empty history_id")
try:
history_contents = gi.histories.show_history(history_id, contents=True)
except Exception:
print("Failed to fetch history contents in summarize_history.")
return
for history_content in history_contents:
history_content_id = history_content.get('id', None)
print("| %d - %s (HID - NAME) " % (int(history_content['hid']), history_content['name']))
if history_content['history_content_type'] == 'dataset_collection':
history_contents_json = gi.histories.show_dataset_collection(history_id, history_content["id"])
print("| Dataset Collection: %s" % history_contents_json)
continue
try:
dataset_info = gi.histories.show_dataset(history_id, history_content_id)
print("| Dataset State:")
print(_format_for_summary(dataset_info.get("state"), "Dataset state is unknown."))
print("| Dataset Blurb:")
print(_format_for_summary(dataset_info.get("misc_blurb", ""), "Dataset blurb was empty."))
print("| Dataset Info:")
print(_format_for_summary(dataset_info.get("misc_info", ""), "Dataset info is empty."))
print("| Peek:")
print(_format_for_summary(dataset_info.get("peek", ""), "Peek unavilable."))
except Exception:
print("| *PLANEMO ERROR FETCHING DATASET DETAILS*")
try:
provenance_info = _dataset_provenance(gi, history_id, history_content_id)
print("| Dataset Job Standard Output:")
print(_format_for_summary(provenance_info.get("stdout", ""), "Standard output was empty."))
print("| Dataset Job Standard Error:")
print(_format_for_summary(provenance_info.get("stderr", ""), "Standard error was empty."))
except Exception:
print("| *PLANEMO ERROR FETCHING JOB DETAILS*")
print("|")
def _format_for_summary(blob, empty_message, prefix="| "):
contents = "\n".join(["%s%s" % (prefix, line.strip()) for line in StringIO(blob).readlines() if line.rstrip("\n\r")])
return contents or "%s*%s*" % (prefix, empty_message)
def _dataset_provenance(gi, history_id, id):
provenance = gi.histories.show_dataset_provenance(history_id, id)
return provenance
__all__ = (
"DEFAULT_MASTER_API_KEY",
"gi",
"user_api_key",
)
| [
[
[
87,
95
],
[
3888,
3896
]
],
[
[
126,
139
],
[
330,
343
],
[
681,
694
]
],
[
[
169,
175
],
[
512,
518
]
],
[
[
177,
199
],
[
380,
402
]
],
[
[
219,
221
]
],
[
[
580,
592
]
],
[
[
1574,
1591
]
],
[
[
3762,
3781
],
[
2679,
2698
],
[
2812,
2831
],
[
2952,
2971
],
[
3081,
3100
],
[
3411,
3430
],
[
3566,
3585
]
],
[
[
4004,
4023
],
[
3285,
3304
]
],
[
[
4139,
4146
]
]
] |
#!/usr/bin/env python
#
# Python installation script
# Author - @chowmean
from __future__ import print_function
import os.path
import sys
import setuptools
# Project variables
VER_PROP_FILE = os.path.join(os.path.dirname(__file__), 'version.properties')
REQUIREMENTS_FILE = os.path.join(os.path.dirname(__file__), 'requirements.txt')
CLASSIFIERS = [
"Programming Language :: Python",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"Environment :: Plugins",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: Other/Proprietary License",
"Natural Language :: English",
]
# Read version properties file and extract version number.
def get_version():
version = "0.1.4"
try:
with open(VER_PROP_FILE) as f:
for line in f.readlines():
if line.startswith("version="):
version = line.lstrip('version=').strip()
break
except IOError as ioe:
print(ioe, file=sys.stderr)
return version
# Read requirements.txt file and extract the list of dependency.
def get_install_requirements():
# read requirements
requires = []
try:
with open(REQUIREMENTS_FILE) as f:
requires = list(map(lambda l: l.strip(), f.readlines()))
except IOError as ioe:
print(ioe, file=sys.stderr)
sys.exit(1)
return requires
if __name__ == '__main__':
with open('README.md', 'r') as f:
readme = f.read()
setuptools.setup(
name="db_sheet",
version=get_version(),
description="db_sheet: Using Google Spreadsheets as Database.",
author="chowmean",
author_email="gaurav.dev.iiitm@gmail.com",
url="https://github.com/chowmean/DBSheet",
keywords=["DBSheet, db_sheet, google spreadsheets. excel"],
install_requires=get_install_requirements(),
packages=["db_sheet", ],
classifiers=CLASSIFIERS,
long_description=readme,
long_description_content_type="text/markdown",
license="Apache-2.0"
) | [
[
[
98,
112
]
],
[
[
120,
127
],
[
194,
196
],
[
207,
209
],
[
276,
278
],
[
289,
291
]
],
[
[
135,
138
],
[
1064,
1067
],
[
1408,
1411
],
[
1428,
1431
]
],
[
[
146,
156
],
[
1559,
1569
]
],
[
[
178,
191
],
[
817,
830
]
],
[
[
256,
273
],
[
1263,
1280
]
],
[
[
336,
347
],
[
2008,
2019
]
],
[
[
753,
764
],
[
1618,
1629
]
],
[
[
1166,
1190
],
[
1927,
1951
]
],
[
[
1525,
1526
],
[
1545,
1546
]
],
[
[
1536,
1542
],
[
2046,
2052
]
]
] |
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
import itertools
class VoteQuerySet(QuerySet):
def delete(self, *args, **kwargs):
"""Handles updating the related `votes` and `score` fields attached to the model."""
# XXX: circular import
from fields import RatingField
qs = self.distinct().values_list('content_type', 'object_id').order_by('content_type')
to_update = []
for content_type, objects in itertools.groupby(qs, key=lambda x: x[0]):
model_class = ContentType.objects.get(pk=content_type).model_class()
if model_class:
to_update.extend(list(model_class.objects.filter(pk__in=list(objects)[0])))
retval = super(VoteQuerySet, self).delete(*args, **kwargs)
# TODO: this could be improved
for obj in to_update:
for field in getattr(obj, '_djangoratings', []):
getattr(obj, field.name)._update(commit=False)
obj.save()
return retval
class VoteManager(Manager):
def get_query_set(self):
return VoteQuerySet(self.model)
def get_for_user_in_bulk(self, objects, user):
objects = list(objects)
if len(objects) > 0:
ctype = ContentType.objects.get_for_model(objects[0])
votes = list(self.filter(content_type__pk=ctype.id,
object_id__in=[obj._get_pk_val() \
for obj in objects],
user__pk=user.id))
vote_dict = dict([(vote.object_id, vote) for vote in votes])
else:
vote_dict = {}
return vote_dict
class SimilarUserManager(Manager):
def get_recommendations(self, user, model_class, min_score=1):
from djangoratings.models import Vote, IgnoredObject
content_type = ContentType.objects.get_for_model(model_class)
params = dict(
v=Vote._meta.db_table,
sm=self.model._meta.db_table,
m=model_class._meta.db_table,
io=IgnoredObject._meta.db_table,
)
objects = model_class._default_manager.extra(
tables=[params['v']],
where=[
'%(v)s.object_id = %(m)s.id and %(v)s.content_type_id = %%s' % params,
'%(v)s.user_id IN (select to_user_id from %(sm)s where from_user_id = %%s and exclude = 0)' % params,
'%(v)s.score >= %%s' % params,
# Exclude already rated maps
'%(v)s.object_id NOT IN (select object_id from %(v)s where content_type_id = %(v)s.content_type_id and user_id = %%s)' % params,
# IgnoredObject exclusions
'%(v)s.object_id NOT IN (select object_id from %(io)s where content_type_id = %(v)s.content_type_id and user_id = %%s)' % params,
],
params=[content_type.id, user.id, min_score, user.id, user.id]
).distinct()
# objects = model_class._default_manager.filter(pk__in=content_type.votes.extra(
# where=['user_id IN (select to_user_id from %s where from_user_id = %d and exclude = 0)' % (self.model._meta.db_table, user.pk)],
# ).filter(score__gte=min_score).exclude(
# object_id__in=IgnoredObject.objects.filter(content_type=content_type, user=user).values_list('object_id', flat=True),
# ).exclude(
# object_id__in=Vote.objects.filter(content_type=content_type, user=user).values_list('object_id', flat=True)
# ).distinct().values_list('object_id', flat=True))
return objects
def update_recommendations(self):
# TODO: this is mysql only atm
# TODO: this doesnt handle scores that have multiple values (e.g. 10 points, 5 stars)
# due to it calling an agreement as score = score. We need to loop each rating instance
# and express the condition based on the range.
from djangoratings.models import Vote
from django.db import connection, DatabaseError
cursor = connection.cursor()
cursor.execute('BEGIN')
try:
cursor.execute('TRUNCATE TABLE %s' % (self.model._meta.db_table,))
except DatabaseError:
cursor.execute('DELETE FROM %s' % (self.model._meta.db_table,))
cursor.execute("""INSERT INTO %(t1)s
(to_user_id, from_user_id, agrees, disagrees, exclude)
SELECT v1.user_id, v2.user_id,
SUM(if(v2.score = v1.score, 1, 0)) AS agrees,
SUM(if(v2.score != v1.score, 1, 0)) AS disagrees, 0
FROM %(t2)s AS v1
INNER JOIN %(t2)s AS v2
ON v1.user_id != v2.user_id
AND v1.object_id = v2.object_id
AND v1.content_type_id = v2.content_type_id
WHERE v1.user_id is not null
AND v2.user_id is not null
GROUP BY v1.user_id, v2.user_id
HAVING agrees / (disagrees + 0.0001) > 3
ON DUPLICATE KEY UPDATE agrees=values(agrees), disagrees=values(disagrees);""" % dict(
t1=self.model._meta.db_table,
t2=Vote._meta.db_table,
))
cursor.execute('commit')
cursor.close()
| [
[
[
29,
36
],
[
1155,
1162
],
[
1843,
1850
]
],
[
[
72,
80
],
[
178,
186
]
],
[
[
129,
140
],
[
621,
632
],
[
1367,
1378
],
[
2013,
2024
]
],
[
[
148,
157
],
[
552,
561
]
],
[
[
165,
177
],
[
828,
840
],
[
1209,
1221
]
],
[
[
1143,
1154
]
],
[
[
1824,
1842
]
]
] |
"""
Django settings for django_tdd project.
Generated by 'django-admin startproject' using Django 1.9.12.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'axh7uu^+yfch=#hjgozv%trd3ai55m%xb83=39o4n-y#gk$y6o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'agenda',
'test_without_migrations',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_tdd.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_tdd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
[
[
319,
321
],
[
406,
408
],
[
422,
424
],
[
438,
440
],
[
2351,
2353
]
],
[
[
395,
403
],
[
2364,
2372
]
],
[
[
671,
681
]
],
[
[
804,
809
]
],
[
[
818,
831
]
],
[
[
865,
879
]
],
[
[
1118,
1136
]
],
[
[
1610,
1622
]
],
[
[
1644,
1653
]
],
[
[
2129,
2145
]
],
[
[
2256,
2265
]
],
[
[
2501,
2525
]
],
[
[
3004,
3017
]
],
[
[
3029,
3038
]
],
[
[
3048,
3056
]
],
[
[
3065,
3073
]
],
[
[
3082,
3088
]
],
[
[
3200,
3210
]
]
] |
from System import IntPtr
from System.Runtime.InteropServices import Marshal
import Ironclad
from Ironclad import CPyMarshal
from Ironclad.Structs import METH, Py_TPFLAGS, PyGetSetDef, PyMemberDef, PyMethodDef, PyTypeObject
from tests.utils.memory import OffsetPtr
def _new_struct(type_, fields, *values):
struct = type_()
for field, value in zip(fields, values):
getattr(type_, field).SetValue(struct, value)
return struct
_meth_fields = 'ml_name ml_meth ml_flags ml_doc'.split()
new_PyMethodDef = lambda *args: _new_struct(PyMethodDef, _meth_fields, *args)
_getset_fields = 'name get set doc closure'.split()
new_PyGetSetDef = lambda *args: _new_struct(PyGetSetDef, _getset_fields, *args)
_member_fields = 'name type offset flags doc'.split()
new_PyMemberDef = lambda *args: _new_struct(PyMemberDef, _member_fields, *args)
gc_fooler = []
def GC_NotYet(dgt):
gc_fooler.append(dgt)
def GC_Soon():
gc_fooler.remove(dgt)
return GC_Soon
DELEGATE_TYPES = {
METH.OLDARGS: Ironclad.dgt_ptr_ptrptr,
METH.O: Ironclad.dgt_ptr_ptrptr,
METH.NOARGS: Ironclad.dgt_ptr_ptrptr,
METH.VARARGS: Ironclad.dgt_ptr_ptrptr,
METH.KEYWORDS: Ironclad.dgt_ptr_ptrptrptr,
METH.VARARGS | METH.KEYWORDS: Ironclad.dgt_ptr_ptrptrptr,
}
for (k, v) in DELEGATE_TYPES.items():
DELEGATE_TYPES[k | METH.COEXIST] = v
def MakeMethodDef(name, implementation, flags, doc="doc"):
dgt = DELEGATE_TYPES[flags](implementation)
return new_PyMethodDef(name, Marshal.GetFunctionPointerForDelegate(dgt), int(flags), doc), GC_NotYet(dgt)
def MakeGetSetDef(name, get, set, doc, closure=IntPtr.Zero):
deallocs = []
_get = IntPtr.Zero
if get:
getdgt = Ironclad.dgt_ptr_ptrptr(get)
_get = Marshal.GetFunctionPointerForDelegate(getdgt)
deallocs.append(GC_NotYet(getdgt))
_set = IntPtr.Zero
if set:
setdgt = Ironclad.dgt_int_ptrptrptr(set)
_set = Marshal.GetFunctionPointerForDelegate(setdgt)
deallocs.append(GC_NotYet(setdgt))
return new_PyGetSetDef(name, _get, _set, doc, closure), lambda: map(apply, deallocs)
def MakeMemberDef(name, type_, offset, flags, doc="doc"):
return new_PyMemberDef(name, int(type_), offset, flags, doc), lambda: None
MAKETYPEPTR_DEFAULTS = {
"tp_name": "Nemo",
"tp_doc": "Odysseus' reply to the blinded Cyclops",
"ob_refcnt": 1,
"tp_basicsize": 8,
"tp_itemsize": 4,
"tp_flags": Py_TPFLAGS.HAVE_CLASS,
"tp_methods": None,
"tp_members": None,
"tp_getset": None,
"tp_init": None,
"tp_iter": None,
"tp_iternext": None,
"tp_base": IntPtr.Zero,
"tp_bases": IntPtr.Zero,
"tp_as_number": IntPtr.Zero,
}
def GetMapperTypePtrDefaults(mapper):
return {
"ob_type": mapper.PyType_Type,
"tp_alloc": mapper.PyType_GenericAlloc,
"tp_new": mapper.PyType_GenericNew,
"tp_dealloc": mapper.IC_PyBaseObject_Dealloc,
"tp_free": mapper.PyObject_Free,
}
PTR_ARGS = ("ob_type", "tp_base", "tp_bases", "tp_as_number", "tp_as_sequence", "tp_as_mapping")
INT_ARGS = ("ob_refcnt", "tp_basicsize", "tp_itemsize", "tp_flags")
STRING_ARGS = ("tp_name", "tp_doc")
TABLE_ARGS = ("tp_methods", "tp_members", "tp_getset")
FUNC_ARGS = {
"tp_alloc": Ironclad.dgt_ptr_ptrint,
"tp_new": Ironclad.dgt_ptr_ptrptrptr,
"tp_init": Ironclad.dgt_int_ptrptrptr,
"tp_dealloc": Ironclad.dgt_void_ptr,
"tp_free": Ironclad.dgt_void_ptr,
"tp_getattr": Ironclad.dgt_ptr_ptrstr,
"tp_iter": Ironclad.dgt_ptr_ptr,
"tp_iternext": Ironclad.dgt_ptr_ptr,
"tp_call": Ironclad.dgt_ptr_ptrptrptr,
"tp_str": Ironclad.dgt_ptr_ptr,
"tp_repr": Ironclad.dgt_ptr_ptr,
"tp_richcompare": Ironclad.dgt_ptr_ptrptrint,
"tp_compare": Ironclad.dgt_int_ptrptr,
"tp_hash": Ironclad.dgt_int_ptr,
}
def WriteTypeField(typePtr, name, value):
if name in PTR_ARGS:
CPyMarshal.WritePtrField(typePtr, PyTypeObject, name, value)
return lambda: None
if name in INT_ARGS:
CPyMarshal.WriteIntField(typePtr, PyTypeObject, name, int(value))
return lambda: None
if name in STRING_ARGS:
ptr = Marshal.StringToHGlobalAnsi(value)
CPyMarshal.WritePtrField(typePtr, PyTypeObject, name, ptr)
return lambda: Marshal.FreeHGlobal(ptr)
if name in TABLE_ARGS:
ptr, dealloc = MakeItemsTablePtr(value)
CPyMarshal.WritePtrField(typePtr, PyTypeObject, name, ptr)
return dealloc
if name in FUNC_ARGS:
if value is not None:
dgt = FUNC_ARGS[name](value)
CPyMarshal.WriteFunctionPtrField(typePtr, PyTypeObject, name, dgt)
return GC_NotYet(dgt)
return lambda: None
raise KeyError("WriteTypeField can't handle %s, %s" % (name, value))
def MakeTypePtr(mapper, params, allocator=None):
fields = dict(MAKETYPEPTR_DEFAULTS)
fields.update(GetMapperTypePtrDefaults(mapper))
fields.update(params)
deallocs = []
typeSize = Marshal.SizeOf(PyTypeObject)
if allocator:
# pretend this was constructed by a C extension, using the mapper's allocator
# hence mapper should do the deallocation itself
typePtr = allocator.Alloc(typeSize)
else:
typePtr = Marshal.AllocHGlobal(typeSize)
deallocs.append(lambda: Marshal.FreeHGlobal(typePtr))
CPyMarshal.Zero(typePtr, typeSize)
for field, value in fields.items():
deallocs.append(WriteTypeField(typePtr, field, value))
def dealloc():
for f in deallocs:
f()
return typePtr, dealloc
def MakeItemsTablePtr(items):
if not items:
return IntPtr.Zero, lambda: None
itemtype = items[0].__class__
typesize = Marshal.SizeOf(itemtype)
size = typesize * (len(items) + 1)
tablePtr = Marshal.AllocHGlobal(size)
CPyMarshal.Zero(tablePtr, size)
for i, item in enumerate(items):
Marshal.StructureToPtr(item, OffsetPtr(tablePtr, typesize * i), False)
def dealloc():
Marshal.DestroyStructure(tablePtr, itemtype)
Marshal.FreeHGlobal(tablePtr)
return tablePtr, dealloc
NUMSEQMAP_METHODS = {
"nb_negative": Ironclad.dgt_ptr_ptr,
"nb_positive": Ironclad.dgt_ptr_ptr,
"nb_absolute": Ironclad.dgt_ptr_ptr,
"nb_invert": Ironclad.dgt_ptr_ptr,
"nb_int": Ironclad.dgt_ptr_ptr,
"nb_long": Ironclad.dgt_ptr_ptr,
"nb_float": Ironclad.dgt_ptr_ptr,
"nb_oct": Ironclad.dgt_ptr_ptr,
"nb_hex": Ironclad.dgt_ptr_ptr,
"nb_index": Ironclad.dgt_ptr_ptr,
"nb_add": Ironclad.dgt_ptr_ptrptr,
"nb_subtract": Ironclad.dgt_ptr_ptrptr,
"nb_multiply": Ironclad.dgt_ptr_ptrptr,
"nb_divide": Ironclad.dgt_ptr_ptrptr,
"nb_floor_divide": Ironclad.dgt_ptr_ptrptr,
"nb_true_divide": Ironclad.dgt_ptr_ptrptr,
"nb_remainder": Ironclad.dgt_ptr_ptrptr,
"nb_divmod": Ironclad.dgt_ptr_ptrptr,
"nb_lshift": Ironclad.dgt_ptr_ptrptr,
"nb_rshift": Ironclad.dgt_ptr_ptrptr,
"nb_and": Ironclad.dgt_ptr_ptrptr,
"nb_xor": Ironclad.dgt_ptr_ptrptr,
"nb_or": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_add": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_subtract": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_multiply": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_divide": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_floor_divide": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_true_divide": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_remainder": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_lshift": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_rshift": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_and": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_xor": Ironclad.dgt_ptr_ptrptr,
"nb_inplace_or": Ironclad.dgt_ptr_ptrptr,
"nb_nonzero": Ironclad.dgt_int_ptr,
"nb_power": Ironclad.dgt_ptr_ptrptrptr,
"nb_inplace_power": Ironclad.dgt_ptr_ptrptrptr,
"sq_item": Ironclad.dgt_ptr_ptrint,
"sq_concat": Ironclad.dgt_ptr_ptrptr,
"sq_repeat": Ironclad.dgt_ptr_ptrint,
"sq_slice": Ironclad.dgt_ptr_ptrintint,
"sq_ass_item": Ironclad.dgt_int_ptrintptr,
"sq_ass_slice": Ironclad.dgt_int_ptrintintptr,
"sq_length": Ironclad.dgt_int_ptr,
"sq_contains": Ironclad.dgt_int_ptrptr,
"mp_length": Ironclad.dgt_int_ptr,
"mp_subscript": Ironclad.dgt_ptr_ptrptr,
"mp_ass_subscript": Ironclad.dgt_int_ptrptrptr,
}
def MakeNumSeqMapMethods(_type, slots):
size = Marshal.SizeOf(_type)
ptr = Marshal.AllocHGlobal(size)
CPyMarshal.Zero(ptr, size)
deallocs = []
for (slot, func) in slots.items():
dgt = NUMSEQMAP_METHODS[slot](func)
CPyMarshal.WriteFunctionPtrField(ptr, _type, slot, dgt)
deallocs.append(GC_NotYet(dgt))
def dealloc():
for f in deallocs:
f()
Marshal.FreeHGlobal(ptr)
return ptr, dealloc
| [
[
[
21,
27
],
[
1678,
1684
],
[
2736,
2742
],
[
2766,
2772
],
[
2800,
2806
],
[
1723,
1729
],
[
1913,
1919
],
[
5853,
5859
]
],
[
[
72,
79
],
[
1549,
1556
],
[
1811,
1818
],
[
2004,
2011
],
[
4319,
4326
],
[
5174,
5181
],
[
5442,
5449
],
[
5930,
5937
],
[
6017,
6024
],
[
6128,
6135
],
[
8703,
8710
],
[
8736,
8743
],
[
4446,
4453
],
[
5506,
5513
],
[
6230,
6237
],
[
6284,
6291
],
[
9089,
9096
]
],
[
[
90,
98
],
[
1061,
1069
],
[
1099,
1107
],
[
1142,
1150
],
[
1186,
1194
],
[
1231,
1239
],
[
1294,
1302
],
[
3403,
3411
],
[
3443,
3451
],
[
3487,
3495
],
[
3534,
3542
],
[
3573,
3581
],
[
3615,
3623
],
[
3656,
3664
],
[
3698,
3706
],
[
3736,
3744
],
[
3779,
3787
],
[
3817,
3825
],
[
3862,
3870
],
[
3909,
3917
],
[
3950,
3958
],
[
6389,
6397
],
[
6432,
6440
],
[
6475,
6483
],
[
6516,
6524
],
[
6554,
6562
],
[
6593,
6601
],
[
6633,
6641
],
[
6671,
6679
],
[
6709,
6717
],
[
6749,
6757
],
[
6793,
6801
],
[
6839,
6847
],
[
6885,
6893
],
[
6929,
6937
],
[
6979,
6987
],
[
7028,
7036
],
[
7075,
7083
],
[
7119,
7127
],
[
7163,
7171
],
[
7207,
7215
],
[
7248,
7256
],
[
7289,
7297
],
[
7329,
7337
],
[
7384,
7392
],
[
7438,
7446
],
[
7492,
7500
],
[
7544,
7552
],
[
7602,
7610
],
[
7659,
7667
],
[
7714,
7722
],
[
7766,
7774
],
[
7818,
7826
],
[
7867,
7875
],
[
7916,
7924
],
[
7964,
7972
],
[
8015,
8023
],
[
8054,
8062
],
[
8108,
8116
],
[
8159,
8167
],
[
8202,
8210
],
[
8245,
8253
],
[
8287,
8295
],
[
8335,
8343
],
[
8384,
8392
],
[
8433,
8441
],
[
8475,
8483
],
[
8524,
8532
],
[
8567,
8575
],
[
8617,
8625
],
[
1766,
1774
],
[
1956,
1964
]
],
[
[
121,
131
],
[
4055,
4065
],
[
4180,
4190
],
[
4363,
4373
],
[
4557,
4567
],
[
4753,
4763
],
[
5541,
5551
],
[
6049,
6059
],
[
8768,
8778
],
[
8908,
8918
]
],
[
[
162,
166
],
[
1047,
1051
],
[
1091,
1095
],
[
1129,
1133
],
[
1172,
1176
],
[
1216,
1220
],
[
1264,
1268
],
[
1279,
1283
],
[
1388,
1392
]
],
[
[
168,
178
],
[
2535,
2545
]
],
[
[
180,
191
],
[
709,
720
]
],
[
[
193,
204
],
[
847,
858
]
],
[
[
206,
217
],
[
575,
586
]
],
[
[
219,
231
],
[
4089,
4101
],
[
4214,
4226
],
[
4397,
4409
],
[
4591,
4603
],
[
4795,
4807
],
[
5189,
5201
]
],
[
[
266,
275
],
[
6157,
6166
]
],
[
[
291,
302
],
[
563,
574
],
[
697,
708
],
[
835,
846
]
],
[
[
473,
485
],
[
588,
600
]
],
[
[
531,
546
],
[
1527,
1542
]
],
[
[
612,
626
],
[
722,
736
]
],
[
[
665,
680
],
[
2106,
2121
]
],
[
[
748,
762
],
[
860,
874
]
],
[
[
803,
818
],
[
2259,
2274
]
],
[
[
886,
895
],
[
927,
936
],
[
978,
987
]
],
[
[
906,
915
],
[
1611,
1620
],
[
1882,
1891
],
[
2075,
2084
],
[
4840,
4849
],
[
8989,
8998
]
],
[
[
1023,
1037
],
[
1340,
1354
],
[
1369,
1383
],
[
1477,
1491
]
],
[
[
1331,
1332
],
[
1384,
1385
]
],
[
[
1334,
1335
],
[
1404,
1405
]
],
[
[
1411,
1424
]
],
[
[
1635,
1648
]
],
[
[
2193,
2206
]
],
[
[
2338,
2358
],
[
5031,
5051
]
],
[
[
2823,
2847
],
[
5072,
5096
]
],
[
[
3112,
3120
],
[
4036,
4044
]
],
[
[
3210,
3218
],
[
4161,
4169
]
],
[
[
3279,
3290
],
[
4291,
4302
]
],
[
[
3316,
3326
],
[
4487,
4497
]
],
[
[
3372,
3381
],
[
4656,
4665
],
[
4717,
4726
]
],
[
[
3982,
3996
],
[
5648,
5662
]
],
[
[
4967,
4978
]
],
[
[
5792,
5809
],
[
4523,
4540
]
],
[
[
6347,
6364
],
[
8869,
8886
]
],
[
[
8655,
8675
]
]
] |
from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from ops import *
from utils import *
class pix2pix(object):
def __init__(self, sess, image_size=256,
batch_size=1, sample_size=1, output_size=256,
gf_dim=64, df_dim=64, L1_lambda=100,
input_c_dim=3, output_c_dim=3, dataset_name='facades',
checkpoint_dir=None, sample_dir=None):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [256]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
input_c_dim: (optional) Dimension of input image color. For grayscale input, set to 1. [3]
output_c_dim: (optional) Dimension of output image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.is_grayscale = (input_c_dim == 1)
self.batch_size = batch_size
self.image_size = image_size
self.sample_size = sample_size
self.output_size = output_size
self.gf_dim = gf_dim
self.df_dim = df_dim
self.input_c_dim = input_c_dim
self.output_c_dim = output_c_dim
self.L1_lambda = L1_lambda
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn_e2 = batch_norm(name='g_bn_e2')
self.g_bn_e3 = batch_norm(name='g_bn_e3')
self.g_bn_e4 = batch_norm(name='g_bn_e4')
self.g_bn_e5 = batch_norm(name='g_bn_e5')
self.g_bn_e6 = batch_norm(name='g_bn_e6')
self.g_bn_e7 = batch_norm(name='g_bn_e7')
self.g_bn_e8 = batch_norm(name='g_bn_e8')
self.g_bn_d1 = batch_norm(name='g_bn_d1')
self.g_bn_d2 = batch_norm(name='g_bn_d2')
self.g_bn_d3 = batch_norm(name='g_bn_d3')
self.g_bn_d4 = batch_norm(name='g_bn_d4')
self.g_bn_d5 = batch_norm(name='g_bn_d5')
self.g_bn_d6 = batch_norm(name='g_bn_d6')
self.g_bn_d7 = batch_norm(name='g_bn_d7')
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.build_model()
def build_model(self):
self.real_data = tf.placeholder(tf.float32,
[self.batch_size, self.image_size, self.image_size,
self.input_c_dim + self.output_c_dim],
name='real_A_and_B_images')
self.real_B = self.real_data[:, :, :, :self.input_c_dim]
self.real_A = self.real_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim]
self.fake_B = self.generator(self.real_A)
self.real_AB = tf.concat([self.real_A, self.real_B], 3)
self.fake_AB = tf.concat([self.real_A, self.fake_B], 3)
self.D, self.D_logits = self.discriminator(self.real_AB, reuse=False)
self.D_, self.D_logits_ = self.discriminator(self.fake_AB, reuse=True)
self.fake_B_sample = self.sampler(self.real_A)
self.d_sum = tf.summary.histogram("d", self.D)
self.d__sum = tf.summary.histogram("d_", self.D_)
self.fake_B_sum = tf.summary.image("fake_B", self.fake_B)
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits, labels=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.zeros_like(self.D_)))
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.ones_like(self.D_))) \
+ self.L1_lambda * tf.reduce_mean(tf.abs(self.real_B - self.fake_B))
self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def load_random_samples(self):
data = np.random.choice(glob('./datasets/{}/val/*.jpg'.format(self.dataset_name)), self.batch_size)
sample = [load_data(sample_file) for sample_file in data]
if (self.is_grayscale):
sample_images = np.array(sample).astype(np.float32)[:, :, :, None]
else:
sample_images = np.array(sample).astype(np.float32)
return sample_images
def sample_model(self, sample_dir, epoch, idx):
sample_images = self.load_random_samples()
samples, d_loss, g_loss = self.sess.run(
[self.fake_B_sample, self.d_loss, self.g_loss],
feed_dict={self.real_data: sample_images}
)
save_images(samples, [self.batch_size, 1],
'./{}/train_{:02d}_{:04d}.png'.format(sample_dir, epoch, idx))
print("[Sample] d_loss: {:.8f}, g_loss: {:.8f}".format(d_loss, g_loss))
def train(self, args):
"""Train pix2pix"""
d_optim = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \
.minimize(self.g_loss, var_list=self.g_vars)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.g_sum = tf.summary.merge([self.d__sum,
self.fake_B_sum, self.d_loss_fake_sum, self.g_loss_sum])
self.d_sum = tf.summary.merge([self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
self.writer = tf.summary.FileWriter("./logs", self.sess.graph)
counter = 1
start_time = time.time()
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in xrange(args.epoch):
data = glob('./datasets/{}/train/*.jpg'.format(self.dataset_name))
#np.random.shuffle(data)
batch_idxs = min(len(data), args.train_size) // self.batch_size
for idx in xrange(0, batch_idxs):
batch_files = data[idx*self.batch_size:(idx+1)*self.batch_size]
batch = [load_data(batch_file) for batch_file in batch_files]
if (self.is_grayscale):
batch_images = np.array(batch).astype(np.float32)[:, :, :, None]
else:
batch_images = np.array(batch).astype(np.float32)
# Update D network
_, summary_str = self.sess.run([d_optim, self.d_sum],
feed_dict={ self.real_data: batch_images })
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={ self.real_data: batch_images })
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={ self.real_data: batch_images })
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({self.real_data: batch_images})
errD_real = self.d_loss_real.eval({self.real_data: batch_images})
errG = self.g_loss.eval({self.real_data: batch_images})
counter += 1
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
% (epoch, idx, batch_idxs,
time.time() - start_time, errD_fake+errD_real, errG))
if np.mod(counter, 100) == 1:
self.sample_model(args.sample_dir, epoch, idx)
if np.mod(counter, 500) == 2:
self.save(args.checkpoint_dir, counter)
def discriminator(self, image, y=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
# image is 256 x 256 x (input_c_dim + output_c_dim)
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse == False
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
# h0 is (128 x 128 x self.df_dim)
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
# h1 is (64 x 64 x self.df_dim*2)
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
# h2 is (32x 32 x self.df_dim*4)
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, d_h=1, d_w=1, name='d_h3_conv')))
# h3 is (16 x 16 x self.df_dim*8)
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h4), h4
def generator(self, image, y=None):
with tf.variable_scope("generator") as scope:
s = self.output_size
s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)
# image is (256 x 256 x input_c_dim)
e1 = conv2d(image, self.gf_dim, name='g_e1_conv')
# e1 is (128 x 128 x self.gf_dim)
e2 = self.g_bn_e2(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv'))
# e2 is (64 x 64 x self.gf_dim*2)
e3 = self.g_bn_e3(conv2d(lrelu(e2), self.gf_dim*4, name='g_e3_conv'))
# e3 is (32 x 32 x self.gf_dim*4)
e4 = self.g_bn_e4(conv2d(lrelu(e3), self.gf_dim*8, name='g_e4_conv'))
# e4 is (16 x 16 x self.gf_dim*8)
e5 = self.g_bn_e5(conv2d(lrelu(e4), self.gf_dim*8, name='g_e5_conv'))
# e5 is (8 x 8 x self.gf_dim*8)
e6 = self.g_bn_e6(conv2d(lrelu(e5), self.gf_dim*8, name='g_e6_conv'))
# e6 is (4 x 4 x self.gf_dim*8)
e7 = self.g_bn_e7(conv2d(lrelu(e6), self.gf_dim*8, name='g_e7_conv'))
# e7 is (2 x 2 x self.gf_dim*8)
e8 = self.g_bn_e8(conv2d(lrelu(e7), self.gf_dim*8, name='g_e8_conv'))
# e8 is (1 x 1 x self.gf_dim*8)
self.d1, self.d1_w, self.d1_b = deconv2d(tf.nn.relu(e8),
[self.batch_size, s128, s128, self.gf_dim*8], name='g_d1', with_w=True)
d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)
d1 = tf.concat([d1, e7], 3)
# d1 is (2 x 2 x self.gf_dim*8*2)
self.d2, self.d2_w, self.d2_b = deconv2d(tf.nn.relu(d1),
[self.batch_size, s64, s64, self.gf_dim*8], name='g_d2', with_w=True)
d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)
d2 = tf.concat([d2, e6], 3)
# d2 is (4 x 4 x self.gf_dim*8*2)
self.d3, self.d3_w, self.d3_b = deconv2d(tf.nn.relu(d2),
[self.batch_size, s32, s32, self.gf_dim*8], name='g_d3', with_w=True)
d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)
d3 = tf.concat([d3, e5], 3)
# d3 is (8 x 8 x self.gf_dim*8*2)
self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(d3),
[self.batch_size, s16, s16, self.gf_dim*8], name='g_d4', with_w=True)
d4 = self.g_bn_d4(self.d4)
d4 = tf.concat([d4, e4], 3)
# d4 is (16 x 16 x self.gf_dim*8*2)
self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4),
[self.batch_size, s8, s8, self.gf_dim*4], name='g_d5', with_w=True)
d5 = self.g_bn_d5(self.d5)
d5 = tf.concat([d5, e3], 3)
# d5 is (32 x 32 x self.gf_dim*4*2)
self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5),
[self.batch_size, s4, s4, self.gf_dim*2], name='g_d6', with_w=True)
d6 = self.g_bn_d6(self.d6)
d6 = tf.concat([d6, e2], 3)
# d6 is (64 x 64 x self.gf_dim*2*2)
self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6),
[self.batch_size, s2, s2, self.gf_dim], name='g_d7', with_w=True)
d7 = self.g_bn_d7(self.d7)
d7 = tf.concat([d7, e1], 3)
# d7 is (128 x 128 x self.gf_dim*1*2)
self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7),
[self.batch_size, s, s, self.output_c_dim], name='g_d8', with_w=True)
# d8 is (256 x 256 x output_c_dim)
return tf.nn.tanh(self.d8)
def sampler(self, image, y=None):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
s = self.output_size
s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)
# image is (256 x 256 x input_c_dim)
e1 = conv2d(image, self.gf_dim, name='g_e1_conv')
# e1 is (128 x 128 x self.gf_dim)
e2 = self.g_bn_e2(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv'))
# e2 is (64 x 64 x self.gf_dim*2)
e3 = self.g_bn_e3(conv2d(lrelu(e2), self.gf_dim*4, name='g_e3_conv'))
# e3 is (32 x 32 x self.gf_dim*4)
e4 = self.g_bn_e4(conv2d(lrelu(e3), self.gf_dim*8, name='g_e4_conv'))
# e4 is (16 x 16 x self.gf_dim*8)
e5 = self.g_bn_e5(conv2d(lrelu(e4), self.gf_dim*8, name='g_e5_conv'))
# e5 is (8 x 8 x self.gf_dim*8)
e6 = self.g_bn_e6(conv2d(lrelu(e5), self.gf_dim*8, name='g_e6_conv'))
# e6 is (4 x 4 x self.gf_dim*8)
e7 = self.g_bn_e7(conv2d(lrelu(e6), self.gf_dim*8, name='g_e7_conv'))
# e7 is (2 x 2 x self.gf_dim*8)
e8 = self.g_bn_e8(conv2d(lrelu(e7), self.gf_dim*8, name='g_e8_conv'))
# e8 is (1 x 1 x self.gf_dim*8)
self.d1, self.d1_w, self.d1_b = deconv2d(tf.nn.relu(e8),
[self.batch_size, s128, s128, self.gf_dim*8], name='g_d1', with_w=True)
d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)
d1 = tf.concat([d1, e7], 3)
# d1 is (2 x 2 x self.gf_dim*8*2)
self.d2, self.d2_w, self.d2_b = deconv2d(tf.nn.relu(d1),
[self.batch_size, s64, s64, self.gf_dim*8], name='g_d2', with_w=True)
d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)
d2 = tf.concat([d2, e6], 3)
# d2 is (4 x 4 x self.gf_dim*8*2)
self.d3, self.d3_w, self.d3_b = deconv2d(tf.nn.relu(d2),
[self.batch_size, s32, s32, self.gf_dim*8], name='g_d3', with_w=True)
d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)
d3 = tf.concat([d3, e5], 3)
# d3 is (8 x 8 x self.gf_dim*8*2)
self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(d3),
[self.batch_size, s16, s16, self.gf_dim*8], name='g_d4', with_w=True)
d4 = self.g_bn_d4(self.d4)
d4 = tf.concat([d4, e4], 3)
# d4 is (16 x 16 x self.gf_dim*8*2)
self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4),
[self.batch_size, s8, s8, self.gf_dim*4], name='g_d5', with_w=True)
d5 = self.g_bn_d5(self.d5)
d5 = tf.concat([d5, e3], 3)
# d5 is (32 x 32 x self.gf_dim*4*2)
self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5),
[self.batch_size, s4, s4, self.gf_dim*2], name='g_d6', with_w=True)
d6 = self.g_bn_d6(self.d6)
d6 = tf.concat([d6, e2], 3)
# d6 is (64 x 64 x self.gf_dim*2*2)
self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6),
[self.batch_size, s2, s2, self.gf_dim], name='g_d7', with_w=True)
d7 = self.g_bn_d7(self.d7)
d7 = tf.concat([d7, e1], 3)
# d7 is (128 x 128 x self.gf_dim*1*2)
self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7),
[self.batch_size, s, s, self.output_c_dim], name='g_d8', with_w=True)
# d8 is (256 x 256 x output_c_dim)
return tf.nn.tanh(self.d8)
def save(self, checkpoint_dir, step):
model_name = "pix2pix.model"
model_dir = "%s_%s_%s" % (self.dataset_name, self.batch_size, self.output_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoint...")
model_dir = "%s_%s_%s" % (self.dataset_name, self.batch_size, self.output_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
def test(self, args):
"""Test pix2pix"""
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
sample_files = glob('./datasets/{}/val/*.jpg'.format(self.dataset_name))
# sort testing input
n = [int(i) for i in map(lambda x: x.split('/')[-1].split('.jpg')[0], sample_files)]
sample_files = [x for (y, x) in sorted(zip(n, sample_files))]
# load testing input
print("Loading testing images ...")
sample = [load_data(sample_file, is_test=True) for sample_file in sample_files]
if (self.is_grayscale):
sample_images = np.array(sample).astype(np.float32)[:, :, :, None]
else:
sample_images = np.array(sample).astype(np.float32)
sample_images = [sample_images[i:i+self.batch_size]
for i in xrange(0, len(sample_images), self.batch_size)]
sample_images = np.array(sample_images)
print(sample_images.shape)
start_time = time.time()
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for i, sample_image in enumerate(sample_images):
idx = i+1
print("sampling image ", idx)
samples = self.sess.run(
self.fake_B_sample,
feed_dict={self.real_data: sample_image}
)
save_images(samples, [self.batch_size, 1],
'./{}/test_{:04d}.png'.format(args.test_dir, idx))
| [
[
[
23,
31
]
],
[
[
39,
41
],
[
17043,
17045
],
[
17099,
17101
],
[
17143,
17145
],
[
17231,
17233
],
[
17510,
17512
],
[
17684,
17686
],
[
17771,
17773
]
],
[
[
49,
53
],
[
6354,
6358
],
[
8433,
8437
],
[
18887,
18891
]
],
[
[
71,
75
],
[
4754,
4758
],
[
6565,
6569
],
[
18036,
18040
]
],
[
[
83,
99
],
[
2606,
2608
],
[
2621,
2623
],
[
3115,
3117
],
[
3179,
3181
],
[
3455,
3457
],
[
3511,
3513
],
[
3573,
3575
],
[
3641,
3643
],
[
3656,
3658
],
[
3725,
3727
],
[
3775,
3777
],
[
3790,
3792
],
[
3860,
3862
],
[
3907,
3909
],
[
3922,
3924
],
[
3992,
3994
],
[
4061,
4063
],
[
4076,
4078
],
[
4143,
4145
],
[
4225,
4227
],
[
4362,
4364
],
[
4429,
4431
],
[
4488,
4490
],
[
4668,
4670
],
[
5680,
5682
],
[
5821,
5823
],
[
5963,
5965
],
[
6050,
6052
],
[
6171,
6173
],
[
6263,
6265
],
[
8780,
8782
],
[
8928,
8930
],
[
9011,
9013
],
[
9581,
9583
],
[
9655,
9657
],
[
9731,
9733
],
[
11026,
11028
],
[
11147,
11149
],
[
11206,
11208
],
[
11329,
11331
],
[
11448,
11450
],
[
11507,
11509
],
[
11630,
11632
],
[
11749,
11751
],
[
11808,
11810
],
[
11931,
11933
],
[
12089,
12091
],
[
12214,
12216
],
[
12370,
12372
],
[
12495,
12497
],
[
12651,
12653
],
[
12776,
12778
],
[
12930,
12932
],
[
13057,
13059
],
[
13226,
13228
],
[
13299,
13301
],
[
14630,
14632
],
[
14751,
14753
],
[
14810,
14812
],
[
14933,
14935
],
[
15052,
15054
],
[
15111,
15113
],
[
15234,
15236
],
[
15353,
15355
],
[
15412,
15414
],
[
15535,
15537
],
[
15693,
15695
],
[
15818,
15820
],
[
15974,
15976
],
[
16099,
16101
],
[
16255,
16257
],
[
16380,
16382
],
[
16534,
16536
],
[
16661,
16663
],
[
16830,
16832
],
[
17566,
17568
],
[
17947,
17949
]
],
[
[
107,
118
],
[
4737,
4739
],
[
4957,
4959
],
[
4981,
4983
],
[
5050,
5052
],
[
5074,
5076
],
[
7018,
7020
],
[
7041,
7043
],
[
7125,
7127
],
[
7148,
7150
],
[
8507,
8509
],
[
8621,
8623
],
[
18510,
18512
],
[
18534,
18536
],
[
18603,
18605
],
[
18627,
18629
],
[
18806,
18808
]
],
[
[
141,
147
],
[
6526,
6532
],
[
6762,
6768
],
[
18734,
18740
]
],
[
[
165,
166
]
],
[
[
185,
186
],
[
1620,
1630
],
[
1666,
1676
],
[
1712,
1722
],
[
1761,
1771
],
[
1811,
1821
],
[
1861,
1871
],
[
1911,
1921
],
[
1961,
1971
],
[
2011,
2021
],
[
2061,
2071
],
[
2112,
2122
],
[
2162,
2172
],
[
2212,
2222
],
[
2262,
2272
],
[
2312,
2322
],
[
2362,
2372
],
[
2412,
2422
],
[
4848,
4857
],
[
5400,
5411
],
[
6890,
6899
],
[
9068,
9073
],
[
9074,
9080
],
[
9183,
9188
],
[
9200,
9206
],
[
9309,
9314
],
[
9326,
9332
],
[
9434,
9439
],
[
9451,
9457
],
[
9574,
9580
],
[
9993,
9999
],
[
10114,
10120
],
[
10121,
10126
],
[
10242,
10248
],
[
10249,
10254
],
[
10370,
10376
],
[
10377,
10382
],
[
10498,
10504
],
[
10505,
10510
],
[
10624,
10630
],
[
10631,
10636
],
[
10750,
10756
],
[
10757,
10762
],
[
10876,
10882
],
[
10883,
10888
],
[
11017,
11025
],
[
11320,
11328
],
[
11621,
11629
],
[
11922,
11930
],
[
12205,
12213
],
[
12486,
12494
],
[
12767,
12775
],
[
13048,
13056
],
[
13597,
13603
],
[
13718,
13724
],
[
13725,
13730
],
[
13846,
13852
],
[
13853,
13858
],
[
13974,
13980
],
[
13981,
13986
],
[
14102,
14108
],
[
14109,
14114
],
[
14228,
14234
],
[
14235,
14240
],
[
14354,
14360
],
[
14361,
14366
],
[
14480,
14486
],
[
14487,
14492
],
[
14621,
14629
],
[
14924,
14932
],
[
15225,
15233
],
[
15526,
15534
],
[
15809,
15817
],
[
16090,
16098
],
[
16371,
16379
],
[
16652,
16660
],
[
18379,
18388
],
[
19314,
19325
]
],
[
[
194,
201
]
]
] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from copy import copy, deepcopy
import numpy as np
import torch
import torch.distributed as torch_distrib
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.step_result import EvalResult, Result
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.trainer.supporters import TensorRunningAccum, Accumulator
from pytorch_lightning.utilities import parsing, AMPType
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_utils import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warning_utils import WarningCache
class TrainLoop:
def __init__(self, trainer):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
def on_trainer_init(
self, max_epochs, min_epochs, max_steps, min_steps, num_sanity_val_steps, automatic_optimization
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
self.trainer.max_epochs = max_epochs
self.trainer.min_epochs = min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
if self.trainer.current_epoch >= self.trainer.max_epochs:
return True
if self.trainer.limit_train_batches == 0:
return True
return False
def on_train_start(self):
# clear cache before training
if self.trainer.on_gpu and self.trainer.root_gpu is not None:
# use context because of:
# https://discuss.pytorch.org/t/out-of-memory-when-i-use-torch-cuda-empty-cache/57898
with torch.cuda.device(f"cuda:{self.trainer.root_gpu}"):
torch.cuda.empty_cache()
# hook
self.trainer.call_hook("on_train_start")
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# bind logger and other properties
self.trainer.model_connector.copy_trainer_model_properties(model)
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
def setup_training(self, model: LightningModule):
"""Sanity check a few things before starting actual training.
Args:
model: The model to run sanity test on.
"""
# --------------------------
# Setup??
# --------------------------
ref_model = model
if self.trainer.data_parallel:
ref_model = model.module
# set the ranks and devices
self.trainer.accelerator_backend.dist.rank = self.trainer.global_rank
self.trainer.accelerator_backend.dist.device = ref_model.device
# give model convenience properties
ref_model.trainer = self.trainer
# set local properties on the model
self.trainer.model_connector.copy_trainer_model_properties(ref_model)
# init amp. Must be done here instead of __init__ to allow ddp to work
if self.trainer.amp_backend == AMPType.NATIVE and self.trainer.precision == 16 and not self.trainer.use_tpu:
self.trainer.scaler = torch.cuda.amp.GradScaler()
# log hyper-parameters
if self.trainer.logger is not None:
# save exp to get started (this is where the first experiment logs are written)
self.trainer.logger.log_hyperparams(ref_model.hparams_initial)
self.trainer.logger.log_graph(ref_model)
self.trainer.logger.save()
# wait for all to join if on distributed
self.trainer.accelerator_backend.barrier("setup_training")
# register auto-resubmit when on SLURM
self.trainer.slurm_connector.register_slurm_signal_handlers()
# --------------------------
# Pre-train
# --------------------------
# on pretrain routine start
self.trainer.on_pretrain_routine_start(ref_model)
if self.trainer.is_function_implemented("on_pretrain_routine_start"):
ref_model.on_pretrain_routine_start()
# print model summary
if self.trainer.is_global_zero and self.trainer.weights_summary is not None and not self.trainer.testing:
if self.trainer.weights_summary in ModelSummary.MODES:
ref_model.summarize(mode=self.trainer.weights_summary)
else:
raise MisconfigurationException("weights_summary can be None, " + ", ".join(ModelSummary.MODES))
# track model now.
# if cluster resets state, the model will update with the saved weights
self.trainer.model = model
# restore training and model before hpc is called
self.trainer.checkpoint_connector.restore_weights(model)
# on pretrain routine end
self.trainer.on_pretrain_routine_end(ref_model)
if self.trainer.is_function_implemented("on_pretrain_routine_end"):
ref_model.on_pretrain_routine_end()
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_save=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator_backend.on_train_end()
# clear mem
if self.trainer.on_gpu:
model = self.trainer.get_model()
model.cpu()
torch.cuda.empty_cache()
def check_checkpoint_callback(self, should_save, is_last=False):
# TODO bake this logic into the checkpoint callback
if should_save and self.trainer.checkpoint_connector.has_trained:
checkpoint_callbacks = [c for c in self.trainer.callbacks if isinstance(c, ModelCheckpoint)]
if is_last and any(c.save_last for c in checkpoint_callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
[c.on_validation_end(self.trainer, model) for c in checkpoint_callbacks]
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# set seed for distributed sampler (enables shuffling for each epoch)
try:
self.trainer.train_dataloader.sampler.set_epoch(epoch)
except Exception:
pass
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, epoch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_batch_end')
self.trainer.call_hook('on_train_batch_end', epoch_end_outputs, batch, batch_idx, dataloader_idx)
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, epoch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, epoch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(epoch_end_outputs):
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
# don't allow EvalResult in the training_step
if isinstance(training_step_output, EvalResult):
raise MisconfigurationException(
"training_step cannot return EvalResult, " "use a dict or TrainResult instead"
)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
with self.trainer.profiler.profile("optimizer_step"):
# optimizer step lightningModule hook
self.trainer.accelerator_backend.optimizer_step(
optimizer, batch_idx, opt_idx, train_step_and_backward_closure
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# get model
model = self.trainer.get_model()
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
# enable profiling for the dataloader
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# only track outputs when user implements training_epoch_end
# otherwise we will build up unnecessary memory
epoch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, epoch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation(test_mode=False)
# reset stage to train
self.trainer.logger_connector.set_stage("train")
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1:
accumulation_done = self._accumulated_batches_reached()
# Ensure accumulation across batches has completed before breaking loop
if accumulation_done:
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if (batch_idx + 1) >= self.trainer.num_training_batches:
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output,
self.checkpoint_accumulator,
self.early_stopping_accumulator,
self.num_optimizers
)
# when no val loop is present or fast-dev-run still need to call checkpoints
self.check_checkpoint_callback(not (should_check_val or is_overridden('validation_step', model)))
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
using_results_obj = False
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# perform dpp sync only when performing optimizer_step
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch,
batch_idx,
opt_idx,
optimizer,
self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch,
batch_idx,
opt_idx,
self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
self.zero_grad_handler(batch_idx, optimizer, opt_idx)
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# hook + clear gradients
self.zero_grad_handler(batch_idx, optimizer, opt_idx)
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self):
if isinstance(self.trainer.model, torch.nn.parallel.DistributedDataParallel):
yield self.trainer.model.no_sync()
else:
yield
def _process_closure_result(
self, batch_outputs: list, opt_idx: int
) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
self.trainer.call_hook('on_epoch_end')
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.logger_connector.on_train_epoch_end()
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
should_check_val = is_val_check_batch or self.trainer.should_stop
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
should_check_val = can_check_val and (should_check_val or is_last_batch_for_infinite_dataset)
return should_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs or self.trainer.fast_dev_run:
if self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
epoch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if is_overridden("training_epoch_end", model=self.trainer.get_model()) or auto_reduce_tng_result:
epoch_end_outputs.append(optimizer_idx_outputs)
return epoch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
def zero_grad_handler(self, batch_idx, optimizer, opt_idx):
if self.automatic_optimization:
# hook
self.on_before_zero_grad(optimizer)
optimizers = enumerate([optimizer])
else:
optimizers = self.get_optimizers_iterable()
for idx, optimizer in optimizers:
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
| [
[
[
610,
624
],
[
30081,
30095
]
],
[
[
642,
646
],
[
18177,
18181
],
[
19489,
19493
]
],
[
[
648,
656
],
[
24062,
24070
]
],
[
[
665,
676
],
[
11586,
11588
],
[
11894,
11896
]
],
[
[
684,
689
],
[
3663,
3668
],
[
3731,
3736
],
[
5420,
5425
],
[
8217,
8222
],
[
12671,
12676
],
[
16000,
16005
],
[
16965,
16970
],
[
17774,
17779
],
[
30178,
30183
],
[
32731,
32736
]
],
[
[
697,
731
]
],
[
[
773,
788
],
[
8533,
8548
]
],
[
[
834,
849
],
[
4432,
4447
]
],
[
[
892,
904
],
[
6527,
6539
],
[
6728,
6740
]
],
[
[
952,
962
],
[
19276,
19286
]
],
[
[
964,
970
],
[
11146,
11152
],
[
12152,
12158
],
[
13962,
13968
],
[
15524,
15530
],
[
21043,
21049
],
[
37891,
37897
]
],
[
[
1016,
1028
],
[
2395,
2407
]
],
[
[
1078,
1096
],
[
1911,
1929
],
[
9589,
9607
]
],
[
[
1098,
1109
],
[
9758,
9769
],
[
9810,
9821
]
],
[
[
1150,
1157
],
[
4093,
4100
]
],
[
[
1159,
1166
],
[
5308,
5315
]
],
[
[
1219,
1233
],
[
8642,
8656
]
],
[
[
1235,
1249
],
[
19159,
19173
]
],
[
[
1301,
1326
],
[
6658,
6683
],
[
12894,
12919
],
[
19307,
19332
]
],
[
[
1374,
1390
],
[
17136,
17152
]
],
[
[
1443,
1456
],
[
25660,
25673
],
[
38073,
38086
]
],
[
[
1505,
1518
],
[
14739,
14752
],
[
16531,
16544
],
[
26263,
26276
],
[
26438,
26451
],
[
26654,
26667
],
[
29903,
29916
]
],
[
[
1573,
1585
],
[
1825,
1837
]
],
[
[
1594,
1603
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class RoleProfile(Document):
def autoname(self):
"""set name as Role Profile name"""
self.name = self.role_profile
def on_update(self):
"""Changes in role_profile reflected across all its user"""
users = frappe.get_all("User", filters={"role_profile_name": self.name})
roles = [role.role for role in self.roles]
for d in users:
user = frappe.get_doc("User", d)
user.set("roles", [])
user.add_roles(*roles)
| [
[
[
157,
173
]
],
[
[
182,
188
],
[
449,
455
],
[
587,
593
]
],
[
[
223,
231
],
[
252,
260
]
],
[
[
240,
251
]
]
] |
from output.models.ms_data.complex_type.ct_f013_xsd.ct_f013 import (
FooType,
MyType,
Root,
)
__all__ = [
"FooType",
"MyType",
"Root",
]
| [
[
[
73,
80
]
],
[
[
86,
92
]
],
[
[
98,
102
]
],
[
[
107,
114
]
]
] |
import datetime
class Filter:
def __init__(self, name):
self._name = name
def _as_params(self):
return {}
class SingleFilter(Filter):
def __call__(self, value):
self._value = value
return self
def _as_params(self):
return {"filter[{}]".format(self._name): self._value}
class SingleListFilter(Filter):
def __call__(self, value):
self._value = value
return self
def _as_params(self):
return {"filter[{}][]".format(self._name): self._value}
class MultiFilter(Filter):
def __call__(self, values):
self._values = values
return self
def _as_params(self):
return {"filter[{}][0]".format(self._name): self._values}
class RangeFilter(Filter):
def __call__(self, value_from, value_to):
self._value_from = value_from
self._value_to = value_to
return self
def _as_params(self):
return {
"filter[{}][from]".format(self._name): self._value_from,
"filter[{}][to]".format(self._name): self._value_to,
}
class DateRangeFilter(RangeFilter):
def __call__(self, value_from: datetime.datetime, value_to: datetime.datetime):
self._value_from = int(value_from.timestamp())
self._value_to = int(value_to.timestamp())
return self
class EventsFiltersByPipelineAndStatus(Filter):
def __call__(self, pipline_id, status_id):
self._pipline_id = pipline_id
self._status_id = status_id
return self
def _as_params(self):
return {
"filter[value_before][leads_statuses][0][pipeline_id]": self._pipline_id,
"filter[value_before][leads_statuses][0][status_id]": self._status_id
} | [
[
[
7,
15
],
[
1167,
1175
],
[
1196,
1204
]
],
[
[
24,
30
],
[
154,
160
],
[
356,
362
],
[
555,
561
],
[
759,
765
],
[
1383,
1389
]
],
[
[
141,
153
]
],
[
[
339,
355
]
],
[
[
543,
554
]
],
[
[
747,
758
],
[
1118,
1129
]
],
[
[
1102,
1117
]
],
[
[
1350,
1382
]
]
] |
import datetime
import hashlib
import json
import logging
import os.path
import sys
from typing import TYPE_CHECKING
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.metadata import get_default_environment
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.utils.filesystem import adjacent_tmp_file, check_path_owner, replace
from pip._internal.utils.misc import ensure_dir
if TYPE_CHECKING:
import optparse
from typing import Any, Dict
from pip._internal.network.session import PipSession
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
def _get_statefile_name(key):
# type: (str) -> str
key_bytes = key.encode()
name = hashlib.sha224(key_bytes).hexdigest()
return name
class SelfCheckState:
def __init__(self, cache_dir):
# type: (str) -> None
self.state = {} # type: Dict[str, Any]
self.statefile_path = None
# Try to load the existing state
if cache_dir:
self.statefile_path = os.path.join(
cache_dir, "selfcheck", _get_statefile_name(self.key)
)
try:
with open(self.statefile_path, encoding="utf-8") as statefile:
self.state = json.load(statefile)
except (OSError, ValueError, KeyError):
# Explicitly suppressing exceptions, since we don't want to
# error out if the cache file is invalid.
pass
@property
def key(self):
# type: () -> str
return sys.prefix
def save(self, pypi_version, current_time):
# type: (str, datetime.datetime) -> None
# If we do not have a path to cache in, don't bother saving.
if not self.statefile_path:
return
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
state = {
# Include the key so it's easy to tell which pip wrote the
# file.
"key": self.key,
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
text = json.dumps(state, sort_keys=True, separators=(",", ":"))
with adjacent_tmp_file(self.statefile_path) as f:
f.write(text.encode())
try:
# Since we have a prefix-specific state file, we can just
# overwrite whatever is there, no need to check.
replace(f.name, self.statefile_path)
except OSError:
# Best effort.
pass
def was_installed_by_pip(pkg):
# type: (str) -> bool
"""Checks whether pkg was installed by pip
This is used not to display the upgrade message when pip is in fact
installed by system package manager, such as dnf on Fedora.
"""
dist = get_default_environment().get_distribution(pkg)
return dist is not None and "pip" == dist.installer
def pip_self_version_check(session, options):
# type: (PipSession, optparse.Values) -> None
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_dist = get_default_environment().get_distribution("pip")
if not installed_dist:
return
pip_version = installed_dist.version
pypi_version = None
try:
state = SelfCheckState(cache_dir=options.cache_dir)
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
# Lets use PackageFinder to see what the latest pip version is
link_collector = LinkCollector.create(
session,
options=options,
suppress_no_index=True,
)
# Pass allow_yanked=False so we don't suggest upgrading to a
# yanked version.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=False, # Explicitly set to False
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
best_candidate = finder.find_best_candidate("pip").best_candidate
if best_candidate is None:
return
pypi_version = str(best_candidate.version)
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = parse_version(pypi_version)
local_version_is_older = (
pip_version < remote_version and
pip_version.base_version != remote_version.base_version and
was_installed_by_pip('pip')
)
# Determine if our pypi_version is older
if not local_version_is_older:
return
# We cannot tell how the current pip is available in the current
# command context, so be pragmatic here and suggest the command
# that's always available. This does not accommodate spaces in
# `sys.executable`.
pip_cmd = f"{sys.executable} -m pip"
logger.warning(
"You are using pip version %s; however, version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
| [
[
[
7,
15
],
[
3970,
3978
],
[
4148,
4156
]
],
[
[
23,
30
],
[
876,
883
]
],
[
[
38,
42
],
[
1427,
1431
],
[
2590,
2594
]
],
[
[
50,
57
],
[
751,
758
]
],
[
[
65,
72
],
[
1200,
1202
],
[
2051,
2053
],
[
2276,
2278
]
],
[
[
80,
83
],
[
1730,
1733
],
[
6111,
6114
]
],
[
[
103,
116
],
[
570,
583
]
],
[
[
160,
182
],
[
5506,
5519
]
],
[
[
226,
239
],
[
4619,
4632
]
],
[
[
287,
300
],
[
5053,
5066
]
],
[
[
336,
359
],
[
3265,
3288
],
[
3718,
3741
]
],
[
[
409,
429
],
[
4887,
4907
]
],
[
[
473,
490
],
[
2661,
2678
]
],
[
[
492,
508
],
[
2034,
2050
]
],
[
[
510,
517
],
[
2898,
2905
]
],
[
[
555,
565
],
[
2265,
2275
]
],
[
[
596,
604
]
],
[
[
628,
631
]
],
[
[
633,
637
]
],
[
[
685,
695
]
],
[
[
698,
716
],
[
2501,
2519
],
[
4235,
4253
]
],
[
[
742,
748
],
[
6143,
6149
],
[
6430,
6436
]
],
[
[
785,
804
],
[
1254,
1273
]
],
[
[
938,
952
],
[
3902,
3916
]
],
[
[
3009,
3029
],
[
5699,
5719
]
],
[
[
3375,
3397
]
]
] |
from treehopper.libraries.displays import LedDriver
from treehopper.libraries.io.expander.shift_register import ChainableShiftRegisterOutput
class LedShiftRegister(ChainableShiftRegisterOutput, LedDriver):
def __init__(self):
super().__init__()
| [
[
[
42,
51
],
[
196,
205
]
],
[
[
112,
140
],
[
166,
194
]
],
[
[
149,
165
]
]
] |
from userbot import bot
from telethon import events
import asyncio
from datetime import datetime
from telethon.tl.types import User, Chat, Channel
from uniborg.util import admin_cmd
@bot.on(admin_cmd(pattern=r"stats"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
u = 0
g = 0
c = 0
bc = 0
b = 0
dialogs = await bot.get_dialogs(
limit=None,
ignore_migrated=True
)
for d in dialogs:
currrent_entity = d.entity
if type(currrent_entity) is User:
if currrent_entity.bot:
b += 1
else:
u += 1
elif type(currrent_entity) is Chat:
g += 1
elif type(currrent_entity) is Channel:
if currrent_entity.broadcast:
bc += 1
else:
c += 1
else:
print(d)
end = datetime.now()
ms = (end - start).seconds
await event.edit("""
=================================
`Your Stats Obtained in {} seconds`
`You have {} Private Messages`
`You are in {} Groups`
`You are in {} Super Groups`
`You Are in {} Channels`
`And finally Bots = {}`
===================================""".format(ms, u, g, c, bc, b))
| [
[
[
20,
23
],
[
184,
187
],
[
376,
379
]
],
[
[
45,
51
]
],
[
[
59,
66
]
],
[
[
88,
96
],
[
290,
298
],
[
909,
917
]
],
[
[
127,
131
],
[
541,
545
]
],
[
[
133,
137
],
[
685,
689
]
],
[
[
139,
146
],
[
748,
755
]
],
[
[
172,
181
],
[
191,
200
]
],
[
[
220,
1249
]
]
] |
# This file will track detections
import tqdm
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
from cs231aApproachingOdt import utils as myutils
from PIL import Image
import os
import torch
import torchvision.ops.boxes as bops
def match_detections(prev_path, prev_detection, new_path, new_detection, size=(640, 480)):
prev_range = [*range(len(prev_detection))]
new_range = [*range(len(new_detection))]
permutations = myutils.unique_permutations(prev_range, new_range)
fig, ax = plt.subplots(1, 2)
prev_img = myutils.load_resize(prev_path, size)
new_img = myutils.load_resize(new_path, size)
matching_pairs = []
for old, new in permutations:
[a.cla() for a in ax]
draw_detection(prev_img, prev_detection[old], ax[0])
ax[0].set_title(f"{os.path.basename(prev_path)}")
draw_detection(new_img, new_detection[new], ax[1])
ax[1].set_title(f"{os.path.basename(new_path)}")
#plt.pause(0.1)
iou = get_iou(prev_detection[old], new_detection[new])
if iou < 0.7:
continue
prev_crop = crop_detection(prev_img, prev_detection[old])
new_crop = crop_detection(new_img, new_detection[new])
#keypoint_matching(prev_crop, new_crop)
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
is_match = template_matching(new_crop, prev_crop, methods[3])
if is_match == True:
matching_pairs.append((old, new))
plt.close(fig)
return matching_pairs
def get_iou(prev_detection, new_detection):
box1 = new_detection[:4].reshape((1, 4))
box2 = prev_detection[:4].reshape((1, 4))
iou = bops.box_iou(box1, box2)
return iou
def template_matching(img1, template, method):
fig_template, ax = plt.subplots()
template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img = img1_gray.copy()
w_t, h_t = template_gray.shape[::-1]
w_i, h_i = img1_gray.shape[::-1]
if (w_t > w_i) or (h_t > h_i):
return False
method = eval(method)
# Apply template Matching
res = cv2.matchTemplate(img1_gray, template_gray, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
#print(f"\n{min_val}, {max_val}, {min_loc}, {max_loc}")
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
# bottom_right = (top_left[0] + w, top_left[1] + h)
# cv2.rectangle(img, top_left, bottom_right, 255, 2)
# plt.subplot(121), plt.imshow(res, cmap='gray')
# plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
# plt.subplot(122), plt.imshow(img, cmap='gray')
# plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
# plt.suptitle(method)
# plt.show()
# plt.close(fig_template)
if max_val > 0.9:
return True
else:
return False
def keypoint_matching(img1, img2):
# Source: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
myutils.show(img1_gray)
orb = cv2.ORB_create()
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img1_gray, None)
kp2, des2 = orb.detectAndCompute(img2_gray, None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
fig_match, ax_match = plt.subplot()
plt.imshow(img3)
plt.show()
plt.close(fig_match)
def crop_detection(img, detection):
x1, y1, x2, y2, conf, cls_conf, cls_pred = detection
crop = img[int(y1):int(y2), int(x1):int(x2)]
return crop
def draw_detection(img, detection, ax):
ax.imshow(myutils.bgr2rgb(img))
x1, y1, x2, y2, conf, cls_conf, cls_pred = detection
box_w = x2 - x1
box_h = y2 - y1
# Create a Rectangle patch
bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor="red", facecolor="none")
# Add the bbox to the plot
ax.add_patch(bbox)
ax.set_xticks([])
ax.set_yticks([])
def tracking_by_detection(img_folder, image_paths, img_detections, size=(640, 480)):
# Iterate through images and save plot of detections
print("In Tracking By Detection")
path_detections_zip = zip(image_paths, img_detections)
num_images = len(image_paths)
tqdm_pbar = tqdm.tqdm(path_detections_zip, total=num_images)
tracks_dict = dict()
for img_i, (path, detections) in enumerate(tqdm_pbar):
tqdm_pbar.set_postfix({"Processing ": path})
if img_i == 0:
print("Initialize Detections")
continue
matching_pairs = match_detections(prev_path=image_paths[img_i - 1], prev_detection=img_detections[img_i - 1],
new_path=path, new_detection=detections, size=size)
print(matching_pairs)
tracks_dict[path] = matching_pairs
myutils.pickle_save(os.path.join(img_folder, "output/tracks.pickle"), (tracks_dict, img_detections))
return tracks_dict | [
[
[
41,
45
],
[
4952,
4956
]
],
[
[
53,
56
],
[
1972,
1975
],
[
1995,
1998
],
[
2031,
2034
],
[
2050,
2053
],
[
2299,
2302
],
[
2392,
2395
],
[
2556,
2559
],
[
2571,
2574
],
[
3289,
3292
],
[
3308,
3311
],
[
3344,
3347
],
[
3363,
3366
],
[
3421,
3424
],
[
3636,
3639
],
[
3650,
3653
],
[
3889,
3892
],
[
3953,
3956
]
],
[
[
64,
88
],
[
571,
574
],
[
1638,
1641
],
[
1936,
1939
],
[
4024,
4027
],
[
4042,
4045
],
[
4063,
4066
],
[
4078,
4081
]
],
[
[
96,
125
],
[
4474,
4481
]
],
[
[
156,
167
]
],
[
[
201,
217
],
[
505,
512
],
[
605,
612
],
[
656,
663
],
[
3387,
3394
],
[
4313,
4320
],
[
5500,
5507
]
],
[
[
234,
239
]
],
[
[
247,
249
],
[
869,
871
],
[
987,
989
],
[
5520,
5522
]
],
[
[
258,
263
]
],
[
[
271,
300
],
[
1825,
1829
]
],
[
[
306,
322
],
[
5252,
5268
]
],
[
[
1684,
1691
],
[
1055,
1062
]
],
[
[
1870,
1887
],
[
1505,
1522
]
],
[
[
3164,
3181
]
],
[
[
4104,
4118
],
[
1168,
1182
],
[
1233,
1247
]
],
[
[
4263,
4277
],
[
789,
803
],
[
909,
923
]
],
[
[
4667,
4688
]
]
] |
# Tot's reward lv 40
sm.completeQuest(5521)
# Lv. 40 Equipment box
sm.giveItem(2431877, 1)
sm.dispose()
| [] |
import csv
from django.contrib.auth import get_user_model
from django.db.models.aggregates import Sum
from django.http.response import HttpResponse
from django.utils.decorators import method_decorator
from djoser.serializers import SetPasswordSerializer
from djoser.views import TokenCreateView
from drf_yasg.utils import swagger_auto_schema
from rest_framework import filters, status
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from api import serializers
from api.decorators import multi_method_decorator
from api.docs.schemas import (EmptyAutoSchema, follower_params,
recipe_request_body)
from api.filters import GlobalFilterBackend
from api.pagination import FollowPagination, LimitPagination
from api.permissions import (IsAdminOrReadIfAuthenticatedObjPerm,
IsAdminOrReadOnly, RecipePermission)
from food.models import Ingredient, IngredientInRecipe, Recipe, Tag
from interactions.models import Favorite, Follow, Purchase
User = get_user_model()
class CustomTokenCreateView(TokenCreateView):
def _action(self, serializer):
response = super()._action(serializer)
response.status_code = status.HTTP_201_CREATED
return response
@multi_method_decorator(
names=['update', 'partial_update', 'destroy'],
decorator=swagger_auto_schema(auto_schema=None)
)
class CustomUserViewSet(ModelViewSet):
queryset = User.objects.all().order_by('id')
serializer_class = serializers.CustomUserSerializer
pagination_class = LimitPagination
permission_classes = (IsAdminOrReadIfAuthenticatedObjPerm,)
def get_serializer_class(self):
if self.action in ('list', 'retrieve', 'me'):
return serializers.CustomUserGetSerializer
elif self.action == 'set_password':
return SetPasswordSerializer
elif self.action == 'subscriptions':
return serializers.SubscriptionsSerializer
elif self.action == 'subscribe':
return serializers.FollowSerializer
return self.serializer_class
@action(['get'], detail=False,
permission_classes=(IsAuthenticated,))
@swagger_auto_schema(auto_schema=EmptyAutoSchema)
def me(self, request, pk=None):
serializer = self.get_serializer(self.request.user)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(['post'], detail=False, permission_classes=(IsAuthenticated,))
@swagger_auto_schema(request_body=SetPasswordSerializer,
responses={204: 'No Content'})
def set_password(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.request.user.set_password(
serializer.validated_data['new_password']
)
self.request.user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(['get'], detail=False, pagination_class=FollowPagination,
permission_classes=[IsAuthenticated])
@swagger_auto_schema(responses={201: serializers.SubscriptionsSerializer})
def subscriptions(self, request):
queryset = Follow.objects.filter(user=request.user)
if not queryset.exists():
return Response({'error': 'Вы еще ни на кого не подписаны'},
status=status.HTTP_400_BAD_REQUEST)
page = self.paginate_queryset(queryset)
if page:
serializer = self.get_serializer(
page,
many=True,
context={'request': request}
)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True,
context={'request': request})
return Response(serializer.data)
@action(['get'], detail=True, permission_classes=[IsAuthenticated])
@swagger_auto_schema(manual_parameters=follower_params,
responses={201: serializers.SubscriptionsSerializer})
def subscribe(self, request, pk=None):
user, author = self.following_validate(request, pk)
if not author:
return Response({'error': user},
status=status.HTTP_400_BAD_REQUEST)
data = {'user': user.id, 'author': author.id}
serializer = self.get_serializer(
data=data, context={'request': request}
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
@subscribe.mapping.delete
def delete_subscribe(self, request, pk=None):
user, author, subscribe = self.following_validate(request, pk,
delete=True)
if not author or not subscribe:
return Response({'error': user},
status=status.HTTP_400_BAD_REQUEST)
subscribe.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def following_validate(self, request, pk, delete=False):
user = request.user
if not User.objects.filter(id=pk).exists():
if delete:
return 'Такого пользователя еще нет', False, False
return 'Такого пользователя еще нет', False
author = get_object_or_404(User, id=pk)
if delete:
if not Follow.objects.filter(user=user, author=author).exists():
return ('У вас еще нет этого пользователя в подписках',
True, False)
else:
return (user, author,
get_object_or_404(Follow, user=user,
author=author))
return user, author
@multi_method_decorator(
names=['create', 'update', 'partial_update', 'destroy'],
decorator=swagger_auto_schema(auto_schema=None)
)
class TagViewSet(ModelViewSet):
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
permission_classes = (IsAdminOrReadOnly,)
@multi_method_decorator(
names=['create', 'update', 'partial_update', 'destroy'],
decorator=swagger_auto_schema(auto_schema=None)
)
class IngredientsViewSet(ModelViewSet):
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
permission_classes = (IsAdminOrReadOnly,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', )
@method_decorator(
swagger_auto_schema(
request_body=recipe_request_body,
responses={201: serializers.RecipeSerializer}
),
name='create'
)
@method_decorator(
swagger_auto_schema(
request_body=recipe_request_body,
responses={200: serializers.RecipeSerializer}
),
name='update'
)
@method_decorator(
swagger_auto_schema(auto_schema=None),
name='partial_update'
)
class RecipeViewSet(ModelViewSet):
queryset = Recipe.objects.all()
serializer_class = serializers.RecipeSerializer
pagination_class = LimitPagination
permission_classes = (RecipePermission,)
filter_backends = (GlobalFilterBackend,)
filterset_fields = ('author', )
def get_serializer_class(self):
if self.action == 'favorite':
return serializers.FavoriteSerializer
elif self.action == 'shopping_cart':
return serializers.PurchaseSerializer
return self.serializer_class
@action(['get'], detail=True, permission_classes=[IsAuthenticated])
@swagger_auto_schema(responses={201: serializers.RecipeLiteSerializer})
def favorite(self, request, pk=None):
return self.alt_endpoint_create(request, pk)
@favorite.mapping.delete
def delete_favorite(self, request, pk=None):
return self.alt_endpoint_delete(request, pk, favorite=True)
@action(['get'], detail=True, permission_classes=[IsAuthenticated])
@swagger_auto_schema(responses={201: serializers.RecipeLiteSerializer})
def shopping_cart(self, request, pk=None):
return self.alt_endpoint_create(request, pk)
@shopping_cart.mapping.delete
def delete_shopping_cart(self, request, pk=None):
return self.alt_endpoint_delete(request, pk, cart=True)
@action(['get'], detail=False, permission_classes=(IsAuthenticated,))
@swagger_auto_schema(auto_schema=EmptyAutoSchema,
responses={200: 'Download', 401: 'NotAuthorized'})
def download_shopping_cart(self, request):
ingredients = (
IngredientInRecipe.objects
.select_related('ingredient', 'recipe')
.prefetch_related('purchases')
.filter(recipe__purchases__user=request.user)
.values_list('ingredient__name', 'ingredient__measurement_unit')
.annotate(amount=Sum('amount'))
)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = ('attachment;'
'filename="Your_shopping_list.csv"')
writer = csv.writer(response)
writer.writerow(['Ингредиент', 'Единица измерения', 'Количество'])
for ingredient in ingredients:
writer.writerow(ingredient)
return response
def alt_endpoint_create(self, request, pk):
verdict, recipe, user = self.recipe_validate(request, pk)
if not verdict:
return recipe
data = {
'user': user.id,
'recipe': recipe.id,
}
serializer = self.get_serializer(data=data,
context={'request': request})
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def alt_endpoint_delete(self, request, pk, favorite=False, cart=False):
verdict, obj = self.recipe_validate(request, pk, delete=True,
favorite=favorite, cart=cart)
if not verdict:
return obj
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def recipe_validate(self, request, pk, delete=False,
favorite=False, cart=False):
user = request.user
if not Recipe.objects.filter(id=pk).exists():
return False, Response({'error': 'Такого рецепта еще нет'},
status=status.HTTP_400_BAD_REQUEST), None
recipe = get_object_or_404(Recipe, id=pk)
if delete:
model_answer = {
'favorite': (Favorite, 'избранном'),
'cart': (Purchase, 'списке покупок')
}
if favorite:
model, answer = model_answer.get('favorite')
if cart:
model, answer = model_answer.get('cart')
if not model.objects.filter(user=user, recipe=recipe).exists():
return False, Response(
{'error': f'Такого рецепта еще нет в вашем {answer}'},
status=status.HTTP_400_BAD_REQUEST
)
return True, get_object_or_404(model, user=user, recipe=recipe)
return True, recipe, user
| [
[
[
7,
10
],
[
9286,
9289
]
],
[
[
44,
58
],
[
1200,
1214
]
],
[
[
99,
102
],
[
9048,
9051
]
],
[
[
136,
148
],
[
9093,
9105
]
],
[
[
185,
201
],
[
6713,
6729
],
[
6880,
6896
],
[
7047,
7063
]
],
[
[
233,
254
],
[
2682,
2703
],
[
2015,
2036
]
],
[
[
280,
295
],
[
1247,
1262
]
],
[
[
323,
342
],
[
1519,
1538
],
[
2355,
2374
],
[
2649,
2668
],
[
3247,
3266
],
[
4127,
4146
],
[
6109,
6128
],
[
6411,
6430
],
[
6735,
6754
],
[
6902,
6921
],
[
7069,
7088
],
[
7759,
7778
],
[
8150,
8169
],
[
8554,
8573
]
],
[
[
370,
377
],
[
6656,
6663
]
],
[
[
379,
385
],
[
1379,
1385
],
[
2548,
2554
],
[
3093,
3099
],
[
3561,
3567
],
[
4467,
4473
],
[
4779,
4785
],
[
5147,
5153
],
[
5234,
5240
],
[
9989,
9995
],
[
10334,
10340
],
[
10669,
10675
],
[
11305,
11311
]
],
[
[
424,
430
],
[
2269,
2275
],
[
2574,
2580
],
[
3127,
3133
],
[
4055,
4061
],
[
7687,
7693
],
[
8078,
8084
],
[
8480,
8486
]
],
[
[
467,
484
],
[
5567,
5584
],
[
5884,
5901
],
[
10721,
10738
],
[
11376,
11393
]
],
[
[
524,
539
],
[
2331,
2346
],
[
2625,
2640
],
[
3224,
3239
],
[
4104,
4119
],
[
7736,
7751
],
[
8127,
8142
],
[
8530,
8545
]
],
[
[
576,
584
],
[
2515,
2523
],
[
3077,
3085
],
[
3472,
3480
],
[
4023,
4031
],
[
4406,
4414
],
[
4746,
4754
],
[
5086,
5094
],
[
5218,
5226
],
[
9956,
9964
],
[
10318,
10326
],
[
10581,
10589
],
[
11193,
11201
]
],
[
[
621,
633
],
[
1583,
1595
],
[
6166,
6178
],
[
6476,
6488
],
[
7156,
7168
]
],
[
[
651,
662
],
[
1670,
1681
],
[
3283,
3294
],
[
4223,
4234
],
[
6237,
6248
],
[
6554,
6565
],
[
6822,
6833
],
[
6989,
7000
],
[
7230,
7241
],
[
7795,
7806
],
[
8186,
8197
],
[
1916,
1927
],
[
2101,
2112
],
[
2197,
2208
],
[
7518,
7529
],
[
7613,
7624
]
],
[
[
690,
712
],
[
1430,
1452
],
[
6010,
6032
],
[
6312,
6334
]
],
[
[
743,
758
],
[
2387,
2402
],
[
8586,
8601
]
],
[
[
760,
775
],
[
4165,
4180
]
],
[
[
807,
826
],
[
6777,
6796
],
[
6944,
6963
]
],
[
[
852,
871
],
[
7366,
7385
]
],
[
[
899,
915
],
[
3174,
3190
]
],
[
[
917,
932
],
[
1726,
1741
],
[
7282,
7297
]
],
[
[
962,
997
],
[
1768,
1803
]
],
[
[
1028,
1045
],
[
6289,
6306
],
[
6613,
6630
]
],
[
[
1047,
1063
],
[
7324,
7340
]
],
[
[
1089,
1099
],
[
6506,
6516
]
],
[
[
1101,
1119
],
[
8762,
8780
]
],
[
[
1121,
1127
],
[
7186,
7192
],
[
10516,
10522
],
[
10739,
10745
]
],
[
[
1129,
1132
],
[
6196,
6199
]
],
[
[
1165,
1173
],
[
10832,
10840
]
],
[
[
1175,
1181
],
[
3378,
3384
],
[
5637,
5643
],
[
5902,
5908
]
],
[
[
1183,
1191
],
[
10881,
10889
]
],
[
[
1193,
1197
],
[
1613,
1617
],
[
5367,
5371
],
[
5585,
5589
]
],
[
[
1225,
1246
]
],
[
[
1565,
1582
]
],
[
[
6155,
6165
]
],
[
[
6457,
6475
]
],
[
[
7142,
7155
]
]
] |
#!/usr/bin/env python
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from hashlib import md5
from six.moves import urllib
from swift.common import swob
from swift.common.middleware import copy
from swift.common.storage_policy import POLICIES
from swift.common.swob import Request, HTTPException
from swift.common.utils import closing_if_possible
from test.unit import patch_policies, debug_logger, FakeMemcache, FakeRing
from test.unit.common.middleware.helpers import FakeSwift
from test.unit.proxy.controllers.test_obj import set_http_connect, \
PatchedObjControllerApp
class TestCopyConstraints(unittest.TestCase):
def test_validate_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/subdir/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': '/c/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'bad_object'})
self.assertRaises(HTTPException,
copy._check_copy_from_header, req)
def test_validate_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/subdir/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': '/c/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'bad_object'})
self.assertRaises(HTTPException,
copy._check_destination_header, req)
class TestServerSideCopyMiddleware(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.ssc = copy.filter_factory({})(self.app)
self.ssc.logger = self.app.logger
def tearDown(self):
self.assertEqual(self.app.unclosed_requests, {})
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Bruce Wayne")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = ''
caught_exc = None
try:
# appease the close-checker
with closing_if_possible(body_iter):
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_ssc(self, req, **kwargs):
return self.call_app(req, app=self.ssc, **kwargs)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
def test_no_object_in_path_pass_through(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c', method='PUT')
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_object_pass_through_methods(self):
for method in ['DELETE', 'GET', 'HEAD', 'REPLICATE']:
self.app.register(method, '/v1/a/c/o', swob.HTTPOk, {})
req = Request.blank('/v1/a/c/o', method=method)
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertNotIn('swift.orig_req_method', req.environ)
def test_basic_put_with_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
self.assertEqual(self.app.swift_sources[0], 'SSC')
self.assertEqual(self.app.swift_sources[1], 'SSC')
# For basic test cases, assert orig_req_method behavior
self.assertNotIn('swift.orig_req_method', req.environ)
def test_static_large_object_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Static-Large-Object': 'True',
'Etag': 'should not be sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o2?multipart-manifest=put',
swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2?multipart-manifest=get',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(2, len(self.app.calls))
self.assertEqual('GET', self.app.calls[0][0])
get_path, qs = self.app.calls[0][1].split('?')
params = urllib.parse.parse_qs(qs)
self.assertDictEqual(
{'format': ['raw'], 'multipart-manifest': ['get']}, params)
self.assertEqual(get_path, '/v1/a/c/o')
self.assertEqual(self.app.calls[1],
('PUT', '/v1/a/c/o2?multipart-manifest=put'))
req_headers = self.app.headers[1]
self.assertNotIn('X-Static-Large-Object', req_headers)
self.assertNotIn('Etag', req_headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
def test_static_large_object(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Static-Large-Object': 'True',
'Etag': 'should not be sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o2',
swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o2')])
req_headers = self.app.headers[1]
self.assertNotIn('X-Static-Large-Object', req_headers)
self.assertNotIn('Etag', req_headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
def test_basic_put_with_x_copy_from_across_container(self):
self.app.register('GET', '/v1/a/c1/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c2/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c1/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c2/o2', self.authorized[1].path)
def test_basic_put_with_x_copy_from_across_container_and_account(self):
self.app.register('GET', '/v1/a1/c1/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {},
'passed')
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o1') in headers)
self.assertTrue(('X-Copied-From-Account', 'a1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c1/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path)
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '10',
'X-Copy-From': 'c1/o1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '400 Bad Request')
def test_copy_non_zero_content_length_with_account(self):
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '10',
'X-Copy-From': 'c1/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '400 Bad Request')
def test_copy_with_slashes_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_slashes_in_x_copy_from_and_account(self):
self.app.register('GET', '/v1/a1/c1/o/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o/o1') in headers)
self.assertTrue(('X-Copied-From-Account', 'a1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c1/o/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path)
def test_copy_with_spaces_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o o2', path)
self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_spaces_in_x_copy_from_and_account(self):
self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
# space in soure path
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o o2', path)
self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_leading_slash_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
# repeat tests with leading /
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o/o2', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o/o2', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_with_no_object_in_x_copy_from_and_account(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_with_bad_x_copy_from_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': '/i/am/bad'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
def test_copy_server_error_reading_source_and_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_not_found_reading_source_and_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_with_object_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_object_metadata_and_account(self):
self.app.register('GET', '/v1/a1/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_basic_COPY(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'etag': 'is sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o-copy', self.authorized[1].path)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o-copy')])
self.assertIn('etag', self.app.headers[1])
self.assertEqual(self.app.headers[1]['etag'], 'is sent')
# For basic test cases, assert orig_req_method behavior
self.assertEqual(req.environ['swift.orig_req_method'], 'COPY')
def test_basic_DLO(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'x-object-manifest': 'some/path',
'etag': 'is not sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o-copy')])
self.assertNotIn('x-object-manifest', self.app.headers[1])
self.assertNotIn('etag', self.app.headers[1])
def test_basic_DLO_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'x-object-manifest': 'some/path',
'etag': 'is sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o?multipart-manifest=get', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(2, len(self.app.calls))
self.assertEqual('GET', self.app.calls[0][0])
get_path, qs = self.app.calls[0][1].split('?')
params = urllib.parse.parse_qs(qs)
self.assertDictEqual(
{'format': ['raw'], 'multipart-manifest': ['get']}, params)
self.assertEqual(get_path, '/v1/a/c/o')
self.assertEqual(self.app.calls[1], ('PUT', '/v1/a/c/o-copy'))
self.assertIn('x-object-manifest', self.app.headers[1])
self.assertEqual(self.app.headers[1]['x-object-manifest'], 'some/path')
self.assertIn('etag', self.app.headers[1])
self.assertEqual(self.app.headers[1]['etag'], 'is sent')
def test_COPY_source_metadata(self):
source_headers = {
'x-object-sysmeta-test1': 'copy me',
'x-object-meta-test2': 'copy me too',
'x-object-transient-sysmeta-test3': 'ditto',
'x-object-sysmeta-container-update-override-etag': 'etag val',
'x-object-sysmeta-container-update-override-size': 'size val',
'x-object-sysmeta-container-update-override-foo': 'bar',
'x-delete-at': 'delete-at-time'}
get_resp_headers = source_headers.copy()
get_resp_headers['etag'] = 'source etag'
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
headers=get_resp_headers, body='passed')
def verify_headers(expected_headers, unexpected_headers,
actual_headers):
for k, v in actual_headers:
if k.lower() in expected_headers:
expected_val = expected_headers.pop(k.lower())
self.assertEqual(expected_val, v)
self.assertNotIn(k.lower(), unexpected_headers)
self.assertFalse(expected_headers)
# use a COPY request
self.app.register('PUT', '/v1/a/c/o-copy0', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy0'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy0', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
req = Request.blank('/v1/a/c/o-copy0', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(source_headers.copy(), [], resp_headers)
# use a COPY request with a Range header
self.app.register('PUT', '/v1/a/c/o-copy1', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy1',
'Range': 'bytes=1-2'})
status, resp_headers, body = self.call_ssc(req)
expected_headers = source_headers.copy()
unexpected_headers = (
'x-object-sysmeta-container-update-override-etag',
'x-object-sysmeta-container-update-override-size',
'x-object-sysmeta-container-update-override-foo')
for h in unexpected_headers:
expected_headers.pop(h)
self.assertEqual('201 Created', status)
verify_headers(expected_headers, unexpected_headers, resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy1', path)
verify_headers(
expected_headers, unexpected_headers, put_headers.items())
# etag should not be copied with a Range request
self.assertNotIn('etag', put_headers)
req = Request.blank('/v1/a/c/o-copy1', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(expected_headers, unexpected_headers, resp_headers)
# use a PUT with x-copy-from
self.app.register('PUT', '/v1/a/c/o-copy2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o-copy2', method='PUT',
headers={'Content-Length': 0,
'X-Copy-From': 'c/o'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy2', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
req = Request.blank('/v1/a/c/o-copy2', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(source_headers.copy(), [], resp_headers)
# copy to same path as source
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'Content-Length': 0,
'X-Copy-From': 'c/o'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
def test_COPY_no_destination_header(self):
req = Request.blank(
'/v1/a/c/o', method='COPY', headers={'Content-Length': 0})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(len(self.authorized), 0)
def test_basic_COPY_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o2',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o2', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o2', self.authorized[1].path)
def test_COPY_across_containers(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c2/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c2/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c2/o', self.authorized[1].path)
def test_COPY_source_with_slashes_in_name(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_source_with_slashes_in_name(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_source_with_slashes_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_source_with_slashes_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_account_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_account_bad_destination_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'Destination-Account': '/i/am/bad'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_with_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed")
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_with_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed")
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'X-Object-Meta-Ours': 'okay',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_source_zero_content_length(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_source_zero_content_length(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_newest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'Last-Modified': '123'}, "passed")
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_newest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'Last-Modified': '123'}, "passed")
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_in_OPTIONS_response(self):
self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk,
{'Allow': 'GET, PUT'})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'}, headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('OPTIONS', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('Allow', 'GET, PUT, COPY') in headers)
self.assertEqual(len(self.authorized), 1)
self.assertEqual('OPTIONS', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
# For basic test cases, assert orig_req_method behavior
self.assertNotIn('swift.orig_req_method', req.environ)
def test_COPY_in_OPTIONS_response_CORS(self):
self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk,
{'Allow': 'GET, PUT',
'Access-Control-Allow-Methods': 'GET, PUT'})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'}, headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('OPTIONS', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('Allow', 'GET, PUT, COPY') in headers)
self.assertTrue(('Access-Control-Allow-Methods',
'GET, PUT, COPY') in headers)
self.assertEqual(len(self.authorized), 1)
self.assertEqual('OPTIONS', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def _test_COPY_source_headers(self, extra_put_headers):
# helper method to perform a COPY with some metadata headers that
# should always be sent to the destination
put_headers = {'Destination': '/c1/o',
'X-Object-Meta-Test2': 'added',
'X-Object-Sysmeta-Test2': 'added',
'X-Object-Transient-Sysmeta-Test2': 'added'}
put_headers.update(extra_put_headers)
get_resp_headers = {
'X-Timestamp': '1234567890.12345',
'X-Backend-Timestamp': '1234567890.12345',
'Content-Type': 'text/original',
'Content-Encoding': 'gzip',
'Content-Disposition': 'attachment; filename=myfile',
'X-Object-Meta-Test': 'original',
'X-Object-Sysmeta-Test': 'original',
'X-Object-Transient-Sysmeta-Test': 'original',
'X-Foo': 'Bar'}
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk, headers=get_resp_headers)
self.app.register('PUT', '/v1/a/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY', headers=put_headers)
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
self.assertEqual(2, len(calls))
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
# these headers should always be applied to the destination
self.assertEqual('added', req_headers.get('X-Object-Meta-Test2'))
self.assertEqual('added', req_headers.get('X-Object-Sysmeta-Test2'))
self.assertEqual('added',
req_headers.get('X-Object-Transient-Sysmeta-Test2'))
return req_headers
def test_COPY_source_headers_no_updates(self):
# copy should preserve existing metadata if not updated
req_headers = self._test_COPY_source_headers({})
self.assertEqual('text/original', req_headers.get('Content-Type'))
self.assertEqual('gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=myfile',
req_headers.get('Content-Disposition'))
self.assertEqual('original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('original', req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Bar', req_headers.get('X-Foo'))
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
def test_COPY_source_headers_with_updates(self):
# copy should apply any updated values to existing metadata
put_headers = {
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
def test_COPY_x_fresh_metadata_no_updates(self):
# existing user metadata should not be copied, sysmeta is copied
put_headers = {
'X-Fresh-Metadata': 'true',
'X-Extra': 'Fresh'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('text/original', req_headers.get('Content-Type'))
self.assertEqual('Fresh', req_headers.get('X-Extra'))
self.assertEqual('original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertIn('X-Fresh-Metadata', req_headers)
self.assertNotIn('X-Object-Meta-Test', req_headers)
self.assertNotIn('X-Object-Transient-Sysmeta-Test', req_headers)
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
self.assertNotIn('Content-Encoding', req_headers)
self.assertNotIn('Content-Disposition', req_headers)
self.assertNotIn('X-Foo', req_headers)
def test_COPY_x_fresh_metadata_with_updates(self):
# existing user metadata should not be copied, new metadata replaces it
put_headers = {
'X-Fresh-Metadata': 'true',
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar',
'X-Extra': 'Fresh'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('Fresh', req_headers.get('X-Extra'))
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
def test_COPY_with_single_range(self):
# verify that source etag is not copied when copying a range
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'etag': 'bogus etag'}, "abcdefghijklmnop")
self.app.register('PUT', '/v1/a/c1/o', swob.HTTPCreated, {})
req = swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Destination': 'c1/o',
'Range': 'bytes=5-10'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
self.assertEqual(2, len(calls))
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c1/o', path)
self.assertNotIn('etag', (h.lower() for h in req_headers))
self.assertEqual('6', req_headers['content-length'])
req = swob.Request.blank('/v1/a/c1/o', method='GET')
status, headers, body = self.call_ssc(req)
self.assertEqual('fghijk', body)
@patch_policies(with_ec_default=True)
class TestServerSideCopyMiddlewareWithEC(unittest.TestCase):
container_info = {
'status': 200,
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
def setUp(self):
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
self.ssc = copy.filter_factory({})(self.app)
self.ssc.logger = self.app.logger
self.policy = POLICIES.default
self.app.container_info = dict(self.container_info)
def test_COPY_with_single_range(self):
req = swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Destination': 'c1/o',
'Range': 'bytes=5-10'})
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = ('asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
# we need only first chunk to rebuild 5-10 range
fragments = self.policy.pyeclib_driver.encode(chunks[0])
fragment_payloads = []
fragment_payloads.append(fragments)
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments),
self.policy.object_ring.replicas) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
responses += [(201, '', {})] * self.policy.object_ring.replicas
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
put_hdrs = []
def capture_conn(host, port, dev, part, method, path, *args, **kwargs):
if method == 'PUT':
put_hdrs.append(args[0])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers,
give_connect=capture_conn):
resp = req.get_response(self.ssc)
self.assertEqual(resp.status_int, 201)
expected_puts = POLICIES.default.ec_ndata + POLICIES.default.ec_nparity
self.assertEqual(expected_puts, len(put_hdrs))
for hdrs in put_hdrs:
# etag should not be copied from source
self.assertNotIn('etag', (h.lower() for h in hdrs))
def test_COPY_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = ('a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body).hexdigest()
req = swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments]
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments),
self.policy.object_ring.replicas) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertTrue(start >= 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
if start >= segment_size:
responses = [(416, range_not_satisfiable_body, headers)
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
# TODO possibly use FakeApp here
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.ssc)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
| [
[
[
619,
623
],
[
27592,
27596
],
[
52102,
52106
],
[
53521,
53525
]
],
[
[
631,
639
],
[
1176,
1184
],
[
3294,
3302
],
[
65104,
65112
]
],
[
[
660,
663
],
[
68633,
68636
]
],
[
[
686,
692
],
[
7801,
7807
],
[
30762,
30768
]
],
[
[
719,
723
],
[
4972,
4976
],
[
5415,
5419
],
[
5870,
5874
],
[
5944,
5948
],
[
6959,
6963
],
[
7196,
7200
],
[
8625,
8629
],
[
8839,
8843
],
[
9918,
9922
],
[
9993,
9997
],
[
10801,
10805
],
[
10877,
10881
],
[
12623,
12627
],
[
12696,
12700
],
[
13494,
13498
],
[
13570,
13574
],
[
14497,
14501
],
[
14570,
14574
],
[
15577,
15581
],
[
15652,
15656
],
[
16787,
16791
],
[
16860,
16864
],
[
17900,
17904
],
[
17975,
17979
],
[
19078,
19082
],
[
19151,
19155
],
[
20135,
20139
],
[
20210,
20214
],
[
22569,
22573
],
[
23022,
23026
],
[
23699,
23703
],
[
24301,
24305
],
[
24954,
24958
],
[
25027,
25031
],
[
26120,
26124
],
[
26193,
26197
],
[
27352,
27356
],
[
28050,
28054
],
[
28158,
28162
],
[
29244,
29248
],
[
29402,
29406
],
[
30068,
30072
],
[
30222,
30226
],
[
31916,
31920
],
[
32496,
32500
],
[
33526,
33530
],
[
34989,
34993
],
[
36001,
36005
],
[
37086,
37090
],
[
37162,
37166
],
[
38336,
38340
],
[
38410,
38414
],
[
39156,
39160
],
[
39229,
39233
],
[
40163,
40167
],
[
40238,
40242
],
[
41297,
41301
],
[
41370,
41374
],
[
42118,
42122
],
[
42193,
42197
],
[
43267,
43271
],
[
43340,
43344
],
[
44293,
44297
],
[
44368,
44372
],
[
46569,
46573
],
[
47159,
47163
],
[
47805,
47809
],
[
48372,
48376
],
[
48987,
48991
],
[
49060,
49064
],
[
50110,
50114
],
[
50185,
50189
],
[
51311,
51315
],
[
51893,
51897
],
[
52589,
52593
],
[
53245,
53249
],
[
53980,
53984
],
[
54101,
54105
],
[
54844,
54848
],
[
54967,
54971
],
[
55788,
55792
],
[
56709,
56713
],
[
58568,
58572
],
[
58654,
58658
],
[
64112,
64116
],
[
64242,
64246
],
[
64278,
64282
],
[
64884,
64888
],
[
65858,
65862
],
[
68674,
68678
],
[
69354,
69358
]
],
[
[
760,
764
],
[
1361,
1365
],
[
1608,
1612
],
[
1856,
1860
],
[
2185,
2189
],
[
2388,
2392
],
[
2637,
2641
],
[
2887,
2891
],
[
3220,
3224
],
[
3385,
3389
],
[
65625,
65629
]
],
[
[
805,
813
],
[
65723,
65731
],
[
66841,
66849
],
[
67624,
67632
],
[
67652,
67660
],
[
69625,
69633
],
[
69775,
69783
]
],
[
[
844,
851
],
[
1249,
1256
],
[
1489,
1496
],
[
1743,
1750
],
[
2028,
2035
],
[
2276,
2283
],
[
2518,
2525
],
[
2774,
2781
],
[
3063,
3070
],
[
5008,
5015
],
[
5450,
5457
],
[
5980,
5987
],
[
7232,
7239
],
[
8875,
8882
],
[
10029,
10036
],
[
10949,
10956
],
[
11796,
11803
],
[
12163,
12170
],
[
12732,
12739
],
[
13606,
13613
],
[
14636,
14643
],
[
15718,
15725
],
[
16934,
16941
],
[
18011,
18018
],
[
19187,
19194
],
[
20246,
20253
],
[
21274,
21281
],
[
21648,
21655
],
[
22074,
22081
],
[
22616,
22623
],
[
23069,
23076
],
[
23736,
23743
],
[
24338,
24345
],
[
25063,
25070
],
[
26229,
26236
],
[
27396,
27403
],
[
28194,
28201
],
[
29438,
29445
],
[
30258,
30265
],
[
32532,
32539
],
[
33214,
33221
],
[
33562,
33569
],
[
34678,
34685
],
[
35025,
35032
],
[
35706,
35713
],
[
36037,
36044
],
[
36753,
36760
],
[
37198,
37205
],
[
38446,
38453
],
[
39265,
39272
],
[
40274,
40281
],
[
41406,
41413
],
[
42229,
42236
],
[
43376,
43383
],
[
44404,
44411
],
[
45395,
45402
],
[
45731,
45738
],
[
46132,
46139
],
[
46616,
46623
],
[
47206,
47213
],
[
47842,
47849
],
[
48409,
48416
],
[
49096,
49103
],
[
50221,
50228
],
[
51348,
51355
],
[
51937,
51944
],
[
52626,
52633
],
[
53289,
53296
],
[
54137,
54144
],
[
55003,
55010
],
[
55864,
55871
],
[
56856,
56863
],
[
58690,
58697
]
],
[
[
853,
866
],
[
2144,
2157
],
[
3179,
3192
]
],
[
[
898,
917
],
[
4238,
4257
]
],
[
[
940,
954
],
[
65026,
65040
]
],
[
[
956,
968
],
[
65357,
65369
]
],
[
[
970,
982
],
[
65506,
65518
]
],
[
[
984,
992
],
[
65535,
65543
],
[
65574,
65582
]
],
[
[
1041,
1050
],
[
3354,
3363
]
],
[
[
1100,
1116
],
[
67317,
67333
],
[
70042,
70058
]
],
[
[
1124,
1147
],
[
65463,
65486
]
],
[
[
1156,
1175
]
],
[
[
3265,
3293
]
],
[
[
65069,
65103
]
]
] |
from typing import Dict
from twitchio.dataclasses import Message
class TeamData:
def __init__(self, num_teams: int = 2):
self.num_teams = num_teams
self.teams: Dict[int, int] = {}
async def handle_join(self, msg: Message) -> None:
if msg.author.id in self.teams:
# User already on a team
return
all_teams = self.teams.values()
if len(all_teams) < self.num_teams:
self.teams[msg.author.id] = len(all_teams)
return
team_counts: Dict[int, int] = {}
for team_id in all_teams:
team_counts[team_id] = team_counts.setdefault(team_id, 0) + 1
min_member_team_id = min(team_counts, key=team_counts.get)
self.teams[msg.author.id] = min_member_team_id
def get_team_member_map(self):
reverse_dict = {}
for k, v in self.teams.items():
reverse_dict.setdefault(v, []).append(k)
return reverse_dict
def reset(self, num_teams: int = 2):
self.num_teams = num_teams
self.teams: Dict[int, int] = {} | [
[
[
19,
23
],
[
183,
187
],
[
537,
541
],
[
1068,
1072
]
],
[
[
57,
64
],
[
241,
248
]
],
[
[
73,
81
]
]
] |
from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_0))
loc0.set_progress(1, mgr.Equals(x_pc, i_1))
loc1 = Location(env, mgr.Equals(pc, i_1))
loc1.set_progress(2, mgr.Equals(x_pc, i_2))
loc2 = Location(env, mgr.Equals(pc, i_2))
loc2.set_progress(0, mgr.Equals(x_pc, i_3))
loc3 = Location(env, mgr.Equals(pc, i_3))
loc3.set_progress(0, mgr.Equals(x_pc, i_0))
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2, loc3])
res.append(h_pc)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
return frozenset(res)
| [
[
[
19,
28
],
[
260,
269
],
[
4252,
4261
]
],
[
[
30,
35
],
[
254,
259
]
],
[
[
43,
64
],
[
439,
444
],
[
474,
479
],
[
509,
514
],
[
544,
549
],
[
4363,
4368
],
[
4398,
4403
],
[
4433,
4438
],
[
4468,
4473
]
],
[
[
95,
118
],
[
241,
249
],
[
373,
381
],
[
4239,
4247
],
[
4296,
4304
]
],
[
[
143,
148
],
[
270,
275
],
[
278,
283
],
[
285,
290
],
[
338,
343
]
],
[
[
167,
179
],
[
566,
578
],
[
598,
610
],
[
629,
641
],
[
660,
672
],
[
4527,
4539
],
[
4559,
4571
],
[
4590,
4602
],
[
4621,
4633
]
],
[
[
197,
201
],
[
4262,
4266
],
[
4836,
4840
],
[
5310,
5314
],
[
5717,
5721
],
[
6089,
6093
],
[
6406,
6410
],
[
6864,
6868
],
[
7161,
7165
],
[
7532,
7536
]
],
[
[
203,
211
],
[
4752,
4760
],
[
4934,
4942
],
[
5028,
5036
],
[
5122,
5130
],
[
5216,
5224
],
[
5464,
5472
],
[
5617,
5625
],
[
5822,
5830
],
[
5975,
5983
],
[
6194,
6202
],
[
6296,
6304
],
[
6511,
6519
],
[
6664,
6672
],
[
6779,
6787
],
[
6975,
6983
],
[
7061,
7069
],
[
7266,
7274
],
[
7356,
7364
],
[
7442,
7450
]
],
[
[
218,
235
]
],
[
[
4228,
4233
]
]
] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.utilities import geometric_key
__all__ = [
'mesh_delete_duplicate_vertices'
]
def mesh_delete_duplicate_vertices(mesh, precision=None):
"""Cull all duplicate vertices of a mesh and sanitize affected faces.
Parameters
----------
mesh : Mesh
A mesh object.
precision : str (None)
A formatting option that specifies the precision of the
individual numbers in the string (truncation after the decimal point).
Supported values are any float precision, or decimal integer (``'d'``).
Default is ``'3f'``.
Returns
-------
None
The mesh is modified in-place.
Examples
--------
>>> import compas
>>> from compas.datastructures import Mesh
>>> mesh = Mesh.from_obj(compas.get('faces.obj'))
>>> mesh.number_of_vertices()
36
>>> for x, y, z in mesh.vertices_attributes('xyz', keys=list(mesh.vertices())[:5]):
... mesh.add_vertex(x=x, y=y, z=z)
...
36
37
38
39
40
>>> mesh.number_of_vertices()
41
>>> mesh_delete_duplicate_vertices(mesh)
>>> mesh.number_of_vertices()
36
"""
key_gkey = {key: geometric_key(mesh.vertex_attributes(key, 'xyz'), precision=precision) for key in mesh.vertices()}
gkey_key = {gkey: key for key, gkey in iter(key_gkey.items())}
for key in list(mesh.vertices()):
test = gkey_key[key_gkey[key]]
if test != key:
del mesh.vertex[key]
del mesh.halfedge[key]
for u in mesh.halfedge:
nbrs = list(mesh.halfedge[u].keys())
for v in nbrs:
if v == key:
del mesh.halfedge[u][v]
for fkey in mesh.faces():
seen = set()
face = []
for key in [gkey_key[key_gkey[key]] for key in mesh.face_vertices(fkey)]:
if key not in seen:
seen.add(key)
face.append(key)
mesh.face[fkey] = face
for u, v in mesh.face_halfedges(fkey):
mesh.halfedge[u][v] = fkey
if u not in mesh.halfedge[v]:
mesh.halfedge[v][u] = None
| [
[
[
23,
37
]
],
[
[
61,
76
]
],
[
[
100,
108
]
],
[
[
139,
152
],
[
1284,
1297
]
],
[
[
155,
162
]
],
[
[
212,
242
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Ruxcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test RPC calls related to blockchain state. Tests correspond to code in
# rpc/blockchain.cpp.
#
from decimal import Decimal
from test_framework.test_framework import RuxcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import (
assert_equal,
assert_raises,
assert_is_hex_string,
assert_is_hash_string,
start_nodes,
connect_nodes_bi,
)
class BlockchainTest(RuxcoinTestFramework):
"""
Test blockchain-related RPC calls:
- gettxoutsetinfo
- verifychain
"""
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 2
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
self._test_gettxoutsetinfo()
self._test_getblockheader()
self.nodes[0].verifychain(4, 0)
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bytes_serialized'], 13924),
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized']), 64)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises(
JSONRPCException, lambda: node.getblockheader('nonsense'))
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
if __name__ == '__main__':
BlockchainTest().main()
| [
[
[
336,
343
],
[
1399,
1406
],
[
2791,
2798
]
],
[
[
387,
407
],
[
650,
670
]
],
[
[
445,
461
],
[
1812,
1828
]
],
[
[
500,
512
],
[
1365,
1377
],
[
1433,
1445
],
[
1480,
1492
],
[
1521,
1533
],
[
1562,
1574
],
[
1616,
1628
],
[
1664,
1676
],
[
2019,
2031
],
[
2066,
2078
],
[
2110,
2122
],
[
2159,
2171
]
],
[
[
518,
531
],
[
1785,
1798
]
],
[
[
537,
557
],
[
2225,
2245
]
],
[
[
563,
584
],
[
2275,
2296
],
[
2321,
2342
],
[
2380,
2401
],
[
2432,
2453
]
],
[
[
590,
601
],
[
960,
971
]
],
[
[
607,
623
],
[
1017,
1033
]
],
[
[
635,
649
],
[
2832,
2846
]
]
] |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('button07.xlsm')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
workbook.set_vba_name()
worksheet.set_vba_name()
worksheet.insert_button('C2', {'macro': 'say_hello',
'caption': 'Hello'})
workbook.add_vba_project(self.vba_dir + 'vbaProject02.bin')
workbook.close()
self.assertExcelEqual()
def test_create_file_explicit_vba_names(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
workbook.set_vba_name('ThisWorkbook')
worksheet.set_vba_name('Sheet1')
worksheet.insert_button('C2', {'macro': 'say_hello',
'caption': 'Hello'})
workbook.add_vba_project(self.vba_dir + 'vbaProject02.bin')
workbook.close()
self.assertExcelEqual()
def test_create_file_implicit_vba_names(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_button('C2', {'macro': 'say_hello',
'caption': 'Hello'})
workbook.add_vba_project(self.vba_dir + 'vbaProject02.bin')
workbook.close()
self.assertExcelEqual()
| [
[
[
209,
228
],
[
291,
310
]
],
[
[
253,
261
],
[
579,
587
],
[
1102,
1110
],
[
1647,
1655
]
],
[
[
270,
290
]
]
] |
class Fan():
"""Default Device with ON / OFF Functions"""
deviceID = None
def __init__(self, deviceID):
if deviceID is None:
print("Provide a Device ID")
return
self.deviceID = deviceID
def setSpeed(self):
pass
def getSpeed(self):
pass
| [
[
[
6,
9
]
]
] |
# Generated by Django 2.2 on 2020-10-29 04:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ConstructionSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_construction_system', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_material', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Origin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_origin', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_project', models.CharField(max_length=255)),
('use', models.CharField(max_length=255)),
('builded_surface', models.IntegerField()),
('living_area', models.IntegerField()),
('tier', models.IntegerField()),
('useful_life', models.IntegerField()),
],
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_section', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_unit', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='MaterialSchemeProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('provider_distance', models.IntegerField()),
('construction_system_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.ConstructionSystem')),
('material_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Material')),
('origin_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Origin')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Project')),
('unit_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Unit')),
],
),
]
| [
[
[
69,
79
],
[
139,
149
],
[
238,
248
],
[
551,
561
],
[
843,
853
],
[
1131,
1141
],
[
1701,
1711
],
[
1991,
2001
],
[
2275,
2285
]
],
[
[
81,
87
],
[
345,
351
],
[
482,
488
],
[
648,
654
],
[
774,
780
],
[
938,
944
],
[
1062,
1068
],
[
1227,
1233
],
[
1352,
1358
],
[
1411,
1417
],
[
1482,
1488
],
[
1538,
1544
],
[
1587,
1593
],
[
1643,
1649
],
[
1797,
1803
],
[
1922,
1928
],
[
2084,
2090
],
[
2206,
2212
],
[
2385,
2391
],
[
2506,
2512
],
[
2568,
2574
],
[
2635,
2641
],
[
2773,
2779
],
[
2899,
2905
],
[
3024,
3030
],
[
3147,
3153
]
],
[
[
95,
120
],
[
2663,
2669
],
[
2801,
2807
],
[
2927,
2933
],
[
3052,
3058
],
[
3175,
3181
]
],
[
[
129,
138
]
]
] |
from typing import Optional
from cryptoxlib.exceptions import CryptoXLibException
class AAXException(CryptoXLibException):
pass
class AAXRestException(AAXException):
def __init__(self, status_code: int, body: Optional[dict]):
super().__init__(f"Rest API exception: status [{status_code}], response [{body}]")
self.status_code = status_code
self.body = body | [
[
[
19,
27
],
[
216,
224
]
],
[
[
63,
82
],
[
104,
123
]
],
[
[
91,
103
],
[
157,
169
]
],
[
[
140,
156
]
]
] |
import aioredis
from sanic import Sanic
class RedisWorker:
def __init__(self):
self.__host = None
self.__pool = None
async def init(self, app: Sanic):
self.__host = app.config.REDIS_HOST
self.__pool = await aioredis.create_redis(self.__host)
async def check_session(self, token):
return await self.__pool.expire(token, 300)
async def set_conf_msg(self, phone, msg):
await self.__pool.set(phone, msg)
await self.__pool.expire(phone, 60)
async def get_conf_msg(self, phone, msg):
real_code = self.__pool.get(phone)
if real_code == msg:
self.__pool.delete(phone)
return True
else:
return False
async def get_user(self, token):
return await self.__pool.get(token)
async def create_session(self, user_id, token):
cur_token = await self.__pool.get(user_id)
if not cur_token:
await self.__pool.set(token, user_id)
await self.__pool.expire(token, 300)
else:
token = cur_token
return token
async def close(self):
self.__pool.close()
await self.__pool.wait_closed()
redis = RedisWorker()
| [
[
[
7,
15
],
[
250,
258
]
],
[
[
34,
39
],
[
170,
175
]
],
[
[
48,
59
],
[
1215,
1226
]
],
[
[
1207,
1212
]
]
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Security Groups
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit ec2 credentials but can
also utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at::
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
secgroup.keyid: GKTADJGHEIQSXMKKRBJ08H
secgroup.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration::
secgroup.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
'''
from __future__ import absolute_import
# Import Python libs
import logging
import re
from distutils.version import LooseVersion as _LooseVersion
import six
log = logging.getLogger(__name__)
# Import third party libs
try:
import boto
import boto.ec2
logging.getLogger('boto').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from six import string_types
import salt.utils.odict as odict
def __virtual__():
'''
Only load if boto libraries exist and if boto libraries are greater than
a given version.
'''
required_boto_version = '2.4.0'
# Boto < 2.4.0 GroupOrCIDR objects have different attributes than
# Boto >= 2.4.0 GroupOrCIDR objects
# Differences include no group_id attribute in Boto < 2.4.0 and returning
# a groupId attribute when a GroupOrCIDR object authorizes an IP range
# Support for Boto < 2.4.0 can be added if needed
if not HAS_BOTO:
return False
elif _LooseVersion(boto.__version__) < _LooseVersion(required_boto_version):
return False
else:
return True
def exists(name=None, region=None, key=None, keyid=None, profile=None,
vpc_id=None, group_id=None):
'''
Check to see if an security group exists.
CLI example::
salt myminion boto_secgroup.exists mysecgroup
'''
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
group = _get_group(conn, name, vpc_id, group_id, region)
if group:
return True
else:
return False
def _split_rules(rules):
'''
Split rules with combined grants into individual rules.
Amazon returns a set of rules with the same protocol, from and to ports
together as a single rule with a set of grants. Authorizing and revoking
rules, however, is done as a split set of rules. This function splits the
rules up.
'''
split = []
for rule in rules:
ip_protocol = rule.get('ip_protocol')
to_port = rule.get('to_port')
from_port = rule.get('from_port')
grants = rule.get('grants')
for grant in grants:
_rule = {'ip_protocol': ip_protocol,
'to_port': to_port,
'from_port': from_port}
for key, val in six.iteritems(grant):
_rule[key] = val
split.append(_rule)
return split
def _get_group(conn, name=None, vpc_id=None, group_id=None, region=None):
'''
Get a group object given a name, name and vpc_id or group_id. Return a
boto.ec2.securitygroup.SecurityGroup object if the group is found, else
return None.
'''
if name:
if vpc_id is None:
log.debug('getting group for {0}'.format(name))
group_filter = {'group-name': name}
filtered_groups = conn.get_all_security_groups(filters=group_filter)
# security groups can have the same name if groups exist in both
# EC2-Classic and EC2-VPC
# iterate through groups to ensure we return the EC2-Classic
# security group
for group in filtered_groups:
# a group in EC2-Classic will have vpc_id set to None
if group.vpc_id is None:
return group
return None
elif vpc_id:
log.debug('getting group for {0} in vpc_id {1}'.format(name, vpc_id))
group_filter = {'group-name': name, 'vpc_id': vpc_id}
filtered_groups = conn.get_all_security_groups(filters=group_filter)
if len(filtered_groups) == 1:
return filtered_groups[0]
else:
return None
else:
return None
elif group_id:
try:
groups = conn.get_all_security_groups(group_ids=[group_id])
except boto.exception.BotoServerError as e:
log.debug(e)
return None
if len(groups) == 1:
return groups[0]
else:
return None
else:
return None
def get_group_id(name, vpc_id=None, region=None, key=None, keyid=None, profile=None):
'''
Get a Group ID given a Group Name or Group Name and VPC ID
CLI example::
salt myminion boto_secgroup.get_group_id mysecgroup
'''
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
group = _get_group(conn, name, vpc_id, region)
if group:
return group.id
else:
return False
def convert_to_group_ids(groups, vpc_id, region=None, key=None, keyid=None,
profile=None):
'''
Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
'''
log.debug('security group contents {0} pre-conversion'.format(groups))
group_ids = []
for group in groups:
if re.match('sg-.*', group):
log.debug('group {0} is a group id. get_group_id not called.'
.format(group))
group_ids.append(group)
else:
log.debug('calling boto_secgroup.get_group_id for'
' group name {0}'.format(group))
group_id = get_group_id(group, vpc_id, region, key, keyid, profile)
log.debug('group name {0} has group id {1}'.format(
group, group_id)
)
group_ids.append(str(group_id))
log.debug('security group contents {0} post-conversion'.format(group_ids))
return group_ids
def get_config(name=None, group_id=None, region=None, key=None, keyid=None,
profile=None, vpc_id=None):
'''
Get the configuration for a security group.
CLI example::
salt myminion boto_secgroup.get_config mysecgroup
'''
conn = _get_conn(region, key, keyid, profile)
if not conn:
return None
sg = _get_group(conn, name, vpc_id, group_id, region)
if sg:
ret = odict.OrderedDict()
ret['name'] = sg.name
# TODO: add support for vpc_id in return
# ret['vpc_id'] = sg.vpc_id
ret['group_id'] = sg.id
ret['owner_id'] = sg.owner_id
ret['description'] = sg.description
# TODO: add support for tags
_rules = []
for rule in sg.rules:
log.debug('examining rule {0} for group {1}'.format(rule, sg.id))
attrs = ['ip_protocol', 'from_port', 'to_port', 'grants']
_rule = odict.OrderedDict()
for attr in attrs:
val = getattr(rule, attr)
if not val:
continue
if attr == 'grants':
_grants = []
for grant in val:
log.debug('examining grant {0} for'.format(grant))
g_attrs = {'name': 'source_group_name',
'owner_id': 'source_group_owner_id',
'group_id': 'source_group_group_id',
'cidr_ip': 'cidr_ip'}
_grant = odict.OrderedDict()
for g_attr, g_attr_map in six.iteritems(g_attrs):
g_val = getattr(grant, g_attr)
if not g_val:
continue
_grant[g_attr_map] = g_val
_grants.append(_grant)
_rule['grants'] = _grants
elif attr == 'from_port':
_rule[attr] = int(val)
elif attr == 'to_port':
_rule[attr] = int(val)
else:
_rule[attr] = val
_rules.append(_rule)
ret['rules'] = _split_rules(_rules)
return ret
else:
return None
def create(name, description, vpc_id=None, region=None, key=None, keyid=None,
profile=None):
'''
Create an autoscale group.
CLI example::
salt myminion boto_secgroup.create mysecgroup 'My Security Group'
'''
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
created = conn.create_security_group(name, description, vpc_id)
if created:
log.info('Created security group {0}.'.format(name))
return True
else:
msg = 'Failed to create security group {0}.'.format(name)
log.error(msg)
return False
def delete(name=None, group_id=None, region=None, key=None, keyid=None,
profile=None, vpc_id=None):
'''
Delete an autoscale group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup
'''
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
group = _get_group(conn, name, vpc_id, group_id, region)
if group:
deleted = conn.delete_security_group(group_id=group.id)
if deleted:
log.info('Deleted security group {0} with id {1}.'.format(group.name,
group.id))
return True
else:
msg = 'Failed to delete security group {0}.'.format(name)
log.error(msg)
return False
else:
log.debug('Security group not found.')
return False
def authorize(name=None, source_group_name=None,
source_group_owner_id=None, ip_protocol=None,
from_port=None, to_port=None, cidr_ip=None, group_id=None,
source_group_group_id=None, region=None, key=None,
keyid=None, profile=None, vpc_id=None):
'''
Add a new rule to an existing security group.
CLI example::
salt myminion boto_secgroup.authorize mysecgroup ip_protocol=tcp from_port=80 to_port=80 cidr_ip='['10.0.0.0/8', '192.168.0.0/24']'
'''
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
group = _get_group(conn, name, vpc_id, group_id, region)
if group:
try:
added = conn.authorize_security_group(
src_security_group_name=source_group_name,
src_security_group_owner_id=source_group_owner_id,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip, group_id=group.id,
src_security_group_group_id=source_group_group_id)
if added:
log.info('Added rule to security group {0} with id {1}'
.format(group.name, group.id))
return True
else:
msg = ('Failed to add rule to security group {0} with id {1}.'
.format(group.name, group.id))
log.error(msg)
return False
except boto.exception.EC2ResponseError as e:
log.debug(e)
msg = ('Failed to add rule to security group {0} with id {1}.'
.format(group.name, group.id))
log.error(msg)
return False
else:
log.debug('Failed to add rule to security group.')
return False
def revoke(name=None, source_group_name=None,
source_group_owner_id=None, ip_protocol=None,
from_port=None, to_port=None, cidr_ip=None, group_id=None,
source_group_group_id=None, region=None, key=None,
keyid=None, profile=None, vpc_id=None):
'''
Remove a rule from an existing security group.
CLI example::
salt myminion boto_secgroup.revoke mysecgroup ip_protocol=tcp from_port=80 to_port=80 cidr_ip='10.0.0.0/8'
'''
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
group = _get_group(conn, name, vpc_id, group_id, region)
if group:
try:
revoked = conn.revoke_security_group(
src_security_group_name=source_group_name,
src_security_group_owner_id=source_group_owner_id,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip, group_id=group.id,
src_security_group_group_id=source_group_group_id)
if revoked:
log.info('Removed rule from security group {0} with id {1}.'
.format(group.name, group.id))
return True
else:
msg = ('Failed to remove rule from security group {0} with id {1}.'
.format(group.name, group.id))
log.error(msg)
return False
except boto.exception.EC2ResponseError as e:
log.debug(e)
msg = ('Failed to remove rule from security group {0} with id {1}.'
.format(group.name, group.id))
log.error(msg)
return False
else:
log.debug('Failed to remove rule from security group.')
return False
def _get_conn(region, key, keyid, profile):
'''
Get a boto connection to ec2.
'''
if profile:
if isinstance(profile, string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile
key = _profile.get('key', None)
keyid = _profile.get('keyid', None)
region = _profile.get('region', None)
if not region and __salt__['config.option']('secgroup.region'):
region = __salt__['config.option']('secgroup.region')
if not region:
region = 'us-east-1'
if not key and __salt__['config.option']('secgroup.key'):
key = __salt__['config.option']('secgroup.key')
if not keyid and __salt__['config.option']('secgroup.keyid'):
keyid = __salt__['config.option']('secgroup.keyid')
try:
conn = boto.ec2.connect_to_region(region, aws_access_key_id=keyid,
aws_secret_access_key=key)
except boto.exception.NoAuthHandlerFound:
log.error('No authentication credentials found when attempting to'
' make ec2 connection for security groups.')
return None
return conn
| [
[
[
1218,
1233
]
],
[
[
1263,
1270
],
[
1359,
1366
],
[
1459,
1466
],
[
1494,
1501
]
],
[
[
1278,
1280
],
[
6234,
6236
]
],
[
[
1311,
1340
],
[
2175,
2188
],
[
2209,
2222
]
],
[
[
1348,
1351
],
[
3502,
3505
],
[
8513,
8516
]
],
[
[
1353,
1356
],
[
3918,
3921
],
[
4555,
4558
],
[
5108,
5111
],
[
6108,
6111
],
[
6272,
6275
],
[
6434,
6437
],
[
6632,
6635
],
[
6779,
6782
],
[
7656,
7659
],
[
8094,
8097
],
[
9608,
9611
],
[
9765,
9768
],
[
10294,
10297
],
[
10565,
10568
],
[
10623,
10626
],
[
11801,
11804
],
[
12108,
12111
],
[
12217,
12220
],
[
12367,
12370
],
[
12425,
12428
],
[
13577,
13580
],
[
13894,
13897
],
[
14003,
14006
],
[
14158,
14161
],
[
14216,
14219
],
[
15343,
15346
]
],
[
[
1430,
1434
]
],
[
[
1446,
1454
],
[
2189,
2193
],
[
5059,
5063
],
[
12167,
12171
],
[
13953,
13957
],
[
15160,
15164
],
[
15300,
15304
]
],
[
[
1516,
1524
],
[
2135,
2143
]
],
[
[
1556,
1564
],
[
2135,
2143
]
],
[
[
1590,
1602
],
[
14436,
14448
]
],
[
[
1610,
1635
],
[
7308,
7313
],
[
7812,
7817
],
[
8443,
8448
]
],
[
[
1642,
1653
]
],
[
[
2304,
2310
]
],
[
[
2767,
2779
],
[
9111,
9123
]
],
[
[
3612,
3622
],
[
2647,
2657
],
[
5618,
5628
],
[
7234,
7244
],
[
10135,
10145
],
[
11312,
11322
],
[
13087,
13097
]
],
[
[
5277,
5289
],
[
6563,
6575
]
],
[
[
5732,
5752
]
],
[
[
6881,
6891
]
],
[
[
9187,
9193
]
],
[
[
9807,
9813
]
],
[
[
10689,
10698
]
],
[
[
12503,
12509
]
],
[
[
14299,
14308
],
[
2558,
2567
],
[
5529,
5538
],
[
7149,
7158
],
[
9439,
9448
],
[
10046,
10055
],
[
11223,
11232
],
[
12998,
13007
]
]
] |
import sys
sys.path.insert(0, "../") # our fake sigrokdecode lives one dir upper
from pd import Decoder
class DS1307():
def __init__(self):
self.sigrokDecoder = Decoder()
def get_capabilities(self):
settings = {}
for option in self.sigrokDecoder.options :
settingType = ''
choices = []
if ("values" not in option) :
# TODO sigrok docs does not mention that default is mandatory
if (isinstance(option['default'], str)) :
settingType = 'string'
elif (isinstance(option['default'], int) or isinstance(option['default'], float)) :
settingType = 'number'
else :
print("Cannot determine the type of the " + option['desc'] + " parameter from it's default value: " + option['default'])
settings[option["desc"]] = {
'type': settingType
}
if ("values" in option) :
settings[option["desc"]]['choices'] = option["values"]
return {
'settings': settings
}
def set_settings(self, settings):
# TODO handle the settings
# convert sigrok's
# annotations = (
# ('warning', 'Warning'),
# ....
#
# format annotations to Logic's format
self.sigrokDecoder.reset()
resultTypes = {}
for annotation in self.sigrokDecoder.annotations :
resultTypes[annotation[0]] = annotation[1] + "{{data.data}}"
return {
"result_types": resultTypes
}
def decode(self, data):
self.sigrokDecoder.processI2C(data)
if (not self.packet == {}) :
ret = self.generate_logic_result()
self.packet = {}
return ret | [
[
[
7,
10
],
[
12,
15
]
],
[
[
98,
105
],
[
170,
177
]
],
[
[
113,
119
]
]
] |
import torch
import numpy as np
import json, sys, re, string
import collections
from collections import Counter
from collections import OrderedDict
def get_sp_pred(pred_sp_idx, data):
"""get the prediction of supporting facts in original format
Arguments:
pred_sp_idx {[type]} -- [description]
data {[type]} -- [description]
"""
pred = []
for p in pred_sp_idx:
if p < len(data):
pred.append([data[p].doc_title[0], data[p].sent_id])
return pred
def process_logit(batch_index, batch_logits, predict_features, predict_examples, max_answer_length):
"""get predictions for each sample in the batch
Arguments:
batch_index {[type]} -- [description]
batch_logits {[type]} -- 0: supporting facts logits, 1: answer span logits, 2: answer type logits 3: gold doc logits
batch_size {[type]} -- [description]
predict_file {[type]} -- [description]
"""
sp_logits_np = torch.sigmoid(batch_logits[0]).detach().cpu().numpy()
ans_type_logits_np = batch_logits[1].detach().cpu().numpy()
batch_index = batch_index.numpy().tolist()
sp_pred, span_pred, ans_type_pred = [], [], []
for idx, data in enumerate(batch_index):
# supporting facts prediction
pred_sp_idx = [ x[0] for x in enumerate(sp_logits_np[idx,:].tolist()) if x[1] > 0.5 ]
print(pred_sp_idx)
if len(pred_sp_idx) != 0:
sp_pred.append(get_sp_pred(pred_sp_idx, predict_examples[data]))
else:
sp_pred.append([])
# answer type prediction, for debug purpose
ans_type_pred.append(np.argmax(ans_type_logits_np[idx,:]))
# answer span prediction
if ans_type_pred[-1] == 0:
span_pred.append("no")
elif ans_type_pred[-1] == 1:
span_pred.append("yes")
else:
span_pred.append("")
return sp_pred, span_pred, ans_type_pred
# def evaluate(eval_file, answer_dict):
# f1 = exact_match = total = 0
# for key, value in enumerate(answer_dict):
# total += 1
# ground_truths = eval_file[key]["answer"]
# prediction = value
# cur_EM = exact_match_score(prediction, ground_truths)
# cur_f1, _, _ = f1_score(prediction, ground_truths)
# exact_match += cur_EM
# f1 += cur_f1
# exact_match = 100.0 * exact_match / total
# f1 = 100.0 * f1 / total
# return {'exact_match': exact_match, 'f1': f1}
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
ZERO_METRIC = (0, 0, 0)
if normalized_prediction in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:
return ZERO_METRIC
if normalized_ground_truth in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:
return ZERO_METRIC
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return ZERO_METRIC
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1, precision, recall
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def write_prediction(sp_preds, answer_preds, orig_data, predict_file, output_dir):
"""write predictions to json file
Arguments:
sp_preds {[type]} -- [description]
answer_preds {[type]} -- [description]
orig_data {[type]} -- [description]
predict_file {[type]} -- [description]
output_dir {[type]} -- [description]
"""
if len(answer_preds) == 0:
answer_preds = ["place_holder"] * len(orig_data)
all_pred = {}
all_pred['answer'] = OrderedDict()
all_pred['sp'] = OrderedDict()
for idx, data in enumerate(orig_data):
all_pred['answer'][data['_id']] = answer_preds[idx]
all_pred['sp'][data['_id']] = sp_preds[idx]
with open(output_dir, 'w') as fid:
json.dump(all_pred, fid)
| [
[
[
7,
12
],
[
981,
986
]
],
[
[
20,
31
],
[
1643,
1645
]
],
[
[
39,
43
],
[
4748,
4752
]
],
[
[
45,
48
]
],
[
[
50,
52
],
[
2563,
2565
]
],
[
[
54,
60
],
[
2720,
2726
]
],
[
[
68,
79
]
],
[
[
104,
111
],
[
3517,
3524
],
[
3546,
3553
]
],
[
[
136,
147
],
[
4496,
4507
],
[
4531,
4542
]
],
[
[
154,
165
],
[
1466,
1477
]
],
[
[
517,
530
]
],
[
[
2495,
2511
],
[
2991,
3007
],
[
3050,
3066
],
[
3924,
3940
],
[
3956,
3972
]
],
[
[
2927,
2935
]
],
[
[
3867,
3884
]
],
[
[
3994,
4010
]
]
] |
#!/usr/bin/python
#-*-coding=utf-8-*-
def pyramid(n):
most = 2*n - 1
for i in range(1,n+1):
star = 2*i - 1
space = n - i
print(" "*space + "*"*star)
def test():
pyramid(3)
pyramid(4)
pyramid(5)
if __name__ == "__main__":
test()
| [
[
[
43,
50
],
[
181,
188
],
[
194,
201
],
[
207,
214
]
],
[
[
171,
175
],
[
248,
252
]
]
] |
# Run this again after editing submodules so Colab uses the updated versions
from citylearn import CityLearn
from citylearn import GridLearn
import matplotlib.pyplot as plt
from pathlib import Path
from citylearn import RL_Agents_Coord, Cluster_Agents
import numpy as np
import csv
import time
import re
import pandas as pd
import torch
from joblib import dump, load
# Load environment
climate_zone = 1
data_path = Path("citylearn/data/Climate_Zone_"+str(climate_zone))
building_attributes = data_path / 'building_attributes.json'
weather_file = data_path / 'weather_data.csv'
solar_profile = data_path / 'solar_generation_1kW.csv'
building_state_actions = 'citylearn/buildings_state_action_space.json'
building_id = ["Building_1","Building_2","Building_3","Building_4","Building_5","Building_6","Building_7","Building_8","Building_9"]
objective_function = ['ramping','1-load_factor','average_daily_peak','peak_demand','net_electricity_consumption','quadratic','voltage_dev']
ep_period = 8760
print("Initializing the grid...")
# Contain the lower and upper bounds of the states and actions, to be provided to the agent to normalize the variables between 0 and 1.
# Can be obtained using observations_spaces[i].low or .high
env = GridLearn(data_path, building_attributes, weather_file, solar_profile, building_id, 1, buildings_states_actions = building_state_actions, simulation_period = (0,ep_period), cost_function = objective_function, verbose=1, n_buildings_per_bus=1)
# Hyperparameters
batch_size = 254
bs = batch_size
tau = 0.005
gamma = 0.99
lr = 0.0003
hid = [batch_size,batch_size]
n_episodes = 3
n_training_eps = n_episodes - 1
if not (batch_size < ep_period * n_training_eps):
print("will produce a key error because the neural nets won't be initialized yet")
print("Initializing the agents...")
# Instantiating the control agent(s)
agents = RL_Agents_Coord(env, list(env.buildings.keys()), discount = gamma, batch_size = bs, replay_buffer_capacity = 1e5, regression_buffer_capacity = 12*ep_period, tau=tau, lr=lr, hidden_dim=hid, start_training=(ep_period+1)*(n_episodes-1), exploration_period = (ep_period+1)*(n_episodes)+1, start_regression=(ep_period+1), information_sharing = True, pca_compression = .95, action_scaling_coef=0.5, reward_scaling = 5., update_per_step = 1, iterations_as = 2)
print("Starting the experiment...")
# The number of episodes can be replaces by a stopping criterion (i.e. convergence of the average reward)
start = time.time()
for e in range(n_episodes):
is_evaluating = (e > n_training_eps) # Evaluate deterministic policy after 7 epochs
rewards = []
state = env.reset()
done = False
j = 0
print("is_deterministic", is_evaluating)
action, coordination_vars = agents.select_action(state, deterministic=is_evaluating)
# print(action)
while not done:
next_state, reward, done, _ = env.step(action)
action_next, coordination_vars_next = agents.select_action(next_state, deterministic=is_evaluating)
agents.add_to_buffer(state, action, reward, next_state, done, coordination_vars, coordination_vars_next)
state = next_state
coordination_vars = coordination_vars_next
action = action_next
print('Loss -',env.cost(), 'Simulation time (min) -',(time.time()-start)/60.0)
| [
[
[
99,
108
]
],
[
[
131,
140
],
[
1232,
1241
]
],
[
[
148,
172
]
],
[
[
193,
197
],
[
416,
420
]
],
[
[
220,
235
],
[
1863,
1878
]
],
[
[
237,
251
]
],
[
[
259,
270
]
],
[
[
278,
281
]
],
[
[
289,
293
],
[
2469,
2473
],
[
3284,
3288
]
],
[
[
301,
303
]
],
[
[
311,
323
]
],
[
[
331,
336
]
],
[
[
356,
360
]
],
[
[
362,
366
]
],
[
[
387,
399
],
[
456,
468
]
],
[
[
404,
413
],
[
493,
502
],
[
547,
556
],
[
594,
603
],
[
1242,
1251
]
],
[
[
471,
490
],
[
1253,
1272
]
],
[
[
532,
544
],
[
1274,
1286
]
],
[
[
578,
591
],
[
1288,
1301
]
],
[
[
633,
655
],
[
1346,
1368
]
],
[
[
704,
715
],
[
1303,
1314
]
],
[
[
837,
855
],
[
1421,
1439
]
],
[
[
978,
987
],
[
1393,
1402
],
[
1664,
1673
],
[
2009,
2018
],
[
2068,
2077
],
[
2119,
2128
],
[
2167,
2176
]
],
[
[
1226,
1229
],
[
1879,
1882
],
[
1889,
1892
],
[
2626,
2629
],
[
2879,
2882
],
[
3245,
3248
]
],
[
[
1494,
1504
],
[
1516,
1526
],
[
1571,
1581
],
[
1582,
1592
],
[
1651,
1661
]
],
[
[
1511,
1513
],
[
1943,
1945
]
],
[
[
1527,
1530
],
[
2024,
2027
]
],
[
[
1539,
1544
],
[
1923,
1928
]
],
[
[
1552,
1554
],
[
2032,
2034
]
],
[
[
1564,
1567
],
[
2047,
2050
]
],
[
[
1595,
1605
],
[
1627,
1637
],
[
2082,
2092
],
[
2133,
2143
],
[
2496,
2506
]
],
[
[
1610,
1624
],
[
1676,
1690
],
[
2534,
2548
]
],
[
[
1854,
1860
],
[
2744,
2750
],
[
2942,
2948
],
[
3012,
3018
]
],
[
[
2461,
2466
],
[
3296,
3301
]
],
[
[
2485,
2486
],
[
2530,
2531
]
],
[
[
2513,
2526
],
[
2697,
2710
],
[
2786,
2799
],
[
2989,
3002
]
],
[
[
2601,
2608
]
],
[
[
2618,
2623
],
[
2765,
2770
],
[
3033,
3038
]
],
[
[
2642,
2646
],
[
2835,
2839
]
],
[
[
2660,
2661
]
],
[
[
2716,
2722
],
[
2888,
2894
],
[
3040,
3046
]
],
[
[
2724,
2741
],
[
3074,
3091
]
],
[
[
2849,
2859
],
[
2963,
2973
],
[
3056,
3066
],
[
3134,
3144
]
],
[
[
2861,
2867
],
[
3048,
3054
]
],
[
[
2869,
2873
],
[
3068,
3072
],
[
2835,
2839
]
],
[
[
2875,
2876
]
],
[
[
2904,
2915
],
[
3213,
3224
]
],
[
[
2917,
2939
],
[
3093,
3115
],
[
3173,
3195
]
],
[
[
3126,
3131
],
[
3033,
3038
]
],
[
[
3153,
3170
],
[
3074,
3091
]
],
[
[
3204,
3210
],
[
2888,
2894
],
[
3040,
3046
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P"""
import asyncore
from collections import defaultdict
from io import BytesIO
import logging
import socket
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet4": b"\xfd\xd2\xc8\xf1", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncore.dispatcher):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# All P2PConnections must be created before starting the NetworkThread.
# assert that the network thread is not running.
assert not network_thread_running()
super().__init__(map=mininode_socket_map)
def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
self.state = "connecting"
self.network = net
self.disconnect = False
logger.info('Connecting to Actinium Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
def peer_disconnect(self):
# Connection could have already been closed by other end.
if self.state == "connected":
self.disconnect_node()
# Connection and disconnection methods
def handle_connect(self):
"""asyncore callback when a connection is opened."""
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.on_open()
def handle_close(self):
"""asyncore callback when a connection is closed."""
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.on_close()
def disconnect_node(self):
"""Disconnect the p2p connection.
Called by the test logic thread. Causes the p2p connection
to be disconnected on the next iteration of the asyncore loop."""
self.disconnect = True
# Socket read methods
def handle_read(self):
"""asyncore callback when data is read from the socket."""
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def writable(self):
"""asyncore method to determine whether the handle_write() callback should be called on the next loop."""
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
"""asyncore callback when data should be written to the socket."""
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
if (len(self.sendbuf) == 0 and not pushbuf):
try:
sent = self.send(tmsg)
self.sendbuf = tmsg[sent:]
except BlockingIOError:
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
# Class utility methods
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.state != "connected"
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
def __init__(self):
super().__init__(name="NetworkThread")
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
def network_thread_start():
"""Start the network thread."""
# Only one network thread may run at a time
assert not network_thread_running()
NetworkThread().start()
def network_thread_running():
"""Return whether the network thread is running."""
return any([thread.name == "NetworkThread" for thread in threading.enumerate()])
def network_thread_join(timeout=10):
"""Wait timeout seconds for the network thread to terminate.
Throw if the network thread doesn't terminate in timeout seconds."""
network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"]
assert len(network_threads) <= 1
for thread in network_threads:
thread.join(timeout)
assert not thread.is_alive()
| [
[
[
647,
655
],
[
1732,
1740
],
[
15771,
15779
]
],
[
[
680,
691
],
[
9423,
9434
]
],
[
[
707,
714
],
[
5827,
5834
]
],
[
[
722,
729
],
[
878,
885
]
],
[
[
737,
743
],
[
2633,
2639
],
[
2649,
2655
],
[
2700,
2706
],
[
2720,
2726
]
],
[
[
751,
757
],
[
5123,
5129
],
[
7857,
7863
]
],
[
[
765,
768
],
[
10742,
10745
]
],
[
[
776,
785
],
[
15189,
15198
],
[
15228,
15237
],
[
16216,
16225
],
[
16461,
16470
]
],
[
[
823,
824
],
[
951,
959
],
[
975,
984
],
[
1003,
1015
],
[
1036,
1050
],
[
1070,
1083
],
[
1101,
1112
],
[
1132,
1145
],
[
1167,
1182
],
[
1200,
1211
],
[
1232,
1246
],
[
1264,
1275
],
[
1289,
1296
],
[
1314,
1325
],
[
1340,
1348
],
[
1363,
1371
],
[
1388,
1398
],
[
1418,
1431
],
[
1453,
1468
],
[
1481,
1487
],
[
1504,
1514
],
[
1532,
1543
],
[
9700,
9712
],
[
9713,
9725
],
[
5403,
5409
],
[
5435,
5441
],
[
7899,
7905
],
[
7924,
7930
],
[
9879,
9890
],
[
11728,
11739
],
[
11956,
11964
],
[
12123,
12144
],
[
12248,
12269
],
[
12297,
12307
],
[
14257,
14265
]
],
[
[
857,
867
],
[
12501,
12511
],
[
12788,
12798
],
[
12969,
12979
],
[
13156,
13166
],
[
13777,
13787
],
[
13953,
13963
],
[
14419,
14429
]
],
[
[
869,
875
],
[
2899,
2905
],
[
3444,
3450
],
[
3682,
3688
],
[
6041,
6047
],
[
8796,
8802
],
[
15847,
15853
]
],
[
[
923,
933
],
[
5659,
5669
],
[
5860,
5870
]
],
[
[
1548,
1559
],
[
4845,
4856
],
[
7745,
7756
]
],
[
[
1718,
1731
],
[
8843,
8856
]
],
[
[
8830,
8842
]
],
[
[
14677,
14696
],
[
2461,
2480
],
[
15352,
15371
],
[
15593,
15612
],
[
15809,
15828
]
],
[
[
15173,
15186
],
[
6451,
6464
],
[
6724,
6737
],
[
7991,
8004
],
[
10407,
10420
],
[
12549,
12562
],
[
12836,
12849
],
[
13017,
13030
],
[
13204,
13217
],
[
13825,
13838
],
[
14001,
14014
],
[
14467,
14480
]
],
[
[
15214,
15227
],
[
16044,
16057
]
],
[
[
15891,
15911
]
],
[
[
16073,
16095
],
[
2406,
2428
],
[
16014,
16036
]
],
[
[
16245,
16264
]
]
] |
#!/usr/bin/env python3
import argparse
import os
import pickle
from getpass import getpass
from typing import Tuple
import requests
from appdirs import user_cache_dir
from mysodexo import api
from mysodexo.constants import APPLICATION_NAME, SESSION_CACHE_FILENAME
def prompt_login() -> Tuple[str, str]:
"""Prompts user for credentials and the returns them as a tuple."""
email = input("email: ")
password = getpass("password: ")
return (email, password)
def get_session_cache_path() -> str:
return os.path.join(
user_cache_dir(appname=APPLICATION_NAME), SESSION_CACHE_FILENAME
)
def get_cached_session_info() -> Tuple[
requests.cookies.RequestsCookieJar, str
]:
"""Returns session and DNI from cache."""
session_cache_path = get_session_cache_path()
with open(session_cache_path, "rb") as f:
cached_session_info = pickle.load(f)
cookies = cached_session_info["cookies"]
dni = cached_session_info["dni"]
return (cookies, dni)
def cache_session_info(
cookies: requests.cookies.RequestsCookieJar, dni: str
) -> None:
"""Stores session info to cache."""
session_cache_path = get_session_cache_path()
cached_session_info = {
"cookies": cookies,
"dni": dni,
}
os.makedirs(os.path.dirname(session_cache_path), exist_ok=True)
with open(session_cache_path, "wb") as f:
pickle.dump(cached_session_info, f)
def login() -> Tuple[requests.sessions.Session, str]:
"""Logins and returns session info."""
email, password = prompt_login()
session, account_info = api.login(email, password)
dni = account_info["dni"]
return (session, dni)
def process_login() -> Tuple[requests.sessions.Session, str]:
"""Logins and stores session info to cache."""
session, dni = login()
cache_session_info(session.cookies, dni)
return (session, dni)
def get_session_or_login() -> Tuple[requests.sessions.Session, str]:
"""Retrieves session from cache or prompts login then stores session."""
try:
cookies, dni = get_cached_session_info()
session = requests.session()
session.cookies.update(cookies)
except FileNotFoundError:
session, dni = process_login()
return session, dni
def print_balance(cards):
"""Prints per card balance."""
for card in cards:
pan = card["pan"]
details = card["_details"]
balance = details["cardBalance"]
print(f"{pan}: {balance}")
def process_balance():
session, dni = get_session_or_login()
cards = api.get_cards(session, dni)
for card in cards:
card_number = card["cardNumber"]
details = api.get_detail_card(session, card_number)
card["_details"] = details
print_balance(cards)
def main():
parser = argparse.ArgumentParser(
description="MySodexo Command Line Interface"
)
parser.add_argument(
"--login", action="store_true", help="Logins and store session.",
)
parser.add_argument(
"--balance",
action="store_true",
help="Returns account balance per card",
)
args = parser.parse_args()
if args.login:
process_login()
elif args.balance:
process_balance()
else:
parser.print_help()
if __name__ == "__main__":
main()
| [
[
[
30,
38
],
[
2803,
2811
]
],
[
[
46,
48
],
[
524,
526
],
[
1272,
1274
],
[
1284,
1286
]
],
[
[
56,
62
],
[
878,
884
],
[
1390,
1396
]
],
[
[
83,
90
],
[
423,
430
]
],
[
[
110,
115
],
[
290,
295
],
[
652,
657
],
[
1443,
1448
],
[
1698,
1703
],
[
1918,
1923
]
],
[
[
124,
132
],
[
663,
671
],
[
1040,
1048
],
[
1449,
1457
],
[
1704,
1712
],
[
1924,
1932
],
[
2110,
2118
]
],
[
[
153,
167
],
[
546,
560
]
],
[
[
190,
193
],
[
1590,
1593
],
[
2564,
2567
],
[
2674,
2677
]
],
[
[
225,
241
],
[
569,
585
]
],
[
[
243,
265
],
[
588,
610
]
],
[
[
272,
284
],
[
1547,
1559
]
],
[
[
480,
502
],
[
777,
799
],
[
1161,
1183
]
],
[
[
623,
646
],
[
2066,
2089
]
],
[
[
1007,
1025
],
[
1819,
1837
]
],
[
[
1432,
1437
],
[
1807,
1812
]
],
[
[
1679,
1692
],
[
2222,
2235
],
[
3181,
3194
]
],
[
[
1892,
1912
],
[
2529,
2549
]
],
[
[
2268,
2281
],
[
2755,
2768
]
],
[
[
2491,
2506
],
[
3228,
3243
]
],
[
[
2782,
2786
],
[
3317,
3321
]
]
] |
# Реалізувати клас Герой що має мати наступні атрибути: ім‘я, здоров‘я, ранг, сила і метод вдарити.
# Метод вдарити повинен наносити шкоду противнику в розмірі сили героя. Герой має мати наступні
# обмеження: здоров‘я від 0 до 100, ранг 1,2,3. Сила не більше 10% теперішнього здоров‘я героя.
# Не можна бити героїв здоров‘я яких менше 5.
#
# Реалізувати клас маг, який може відновлювати здоров'я інших героїв, також він має ранг як герой і
# може наносити удари. За відновлення здоров'я він бере гроші. ( Вам потрібно реалізувати цей функціонал ).
# Герой заробляє гроші за перемогу у бою з іншим героєм, також при перемозі він забирає всі гроші суперника.
# Скільки герой отримує грошей за перемогу і скільки коштує відновити здоров'я, на ваш розсуд)
| [] |
# coding: utf-8
import pprint
import re # noqa: F401
import six
class Body19(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_token': 'str',
'name': 'str',
'description': 'str',
'homepage': 'str',
'has_issues': 'bool',
'has_wiki': 'bool',
'can_comment': 'bool',
'issue_comment': 'bool',
'security_hole_enabled': 'bool',
'private': 'bool',
'path': 'str',
'default_branch': 'str',
'pull_requests_enabled': 'bool',
'online_edit_enabled': 'bool',
'lightweight_pr_enabled': 'bool'
}
attribute_map = {
'access_token': 'access_token',
'name': 'name',
'description': 'description',
'homepage': 'homepage',
'has_issues': 'has_issues',
'has_wiki': 'has_wiki',
'can_comment': 'can_comment',
'issue_comment': 'issue_comment',
'security_hole_enabled': 'security_hole_enabled',
'private': 'private',
'path': 'path',
'default_branch': 'default_branch',
'pull_requests_enabled': 'pull_requests_enabled',
'online_edit_enabled': 'online_edit_enabled',
'lightweight_pr_enabled': 'lightweight_pr_enabled'
}
def __init__(self, access_token=None, name=None, description=None, homepage=None, has_issues=True, has_wiki=True,
can_comment=True, issue_comment=None, security_hole_enabled=None, private=None, path=None,
default_branch=None, pull_requests_enabled=None, online_edit_enabled=None,
lightweight_pr_enabled=None): # noqa: E501
"""Body19 - a model defined in Swagger""" # noqa: E501
self._access_token = None
self._name = None
self._description = None
self._homepage = None
self._has_issues = None
self._has_wiki = None
self._can_comment = None
self._issue_comment = None
self._security_hole_enabled = None
self._private = None
self._path = None
self._default_branch = None
self._pull_requests_enabled = None
self._online_edit_enabled = None
self._lightweight_pr_enabled = None
self.discriminator = None
if access_token is not None:
self.access_token = access_token
self.name = name
if description is not None:
self.description = description
if homepage is not None:
self.homepage = homepage
if has_issues is not None:
self.has_issues = has_issues
if has_wiki is not None:
self.has_wiki = has_wiki
if can_comment is not None:
self.can_comment = can_comment
if issue_comment is not None:
self.issue_comment = issue_comment
if security_hole_enabled is not None:
self.security_hole_enabled = security_hole_enabled
if private is not None:
self.private = private
if path is not None:
self.path = path
if default_branch is not None:
self.default_branch = default_branch
if pull_requests_enabled is not None:
self.pull_requests_enabled = pull_requests_enabled
if online_edit_enabled is not None:
self.online_edit_enabled = online_edit_enabled
if lightweight_pr_enabled is not None:
self.lightweight_pr_enabled = lightweight_pr_enabled
@property
def access_token(self):
"""Gets the access_token of this Body19. # noqa: E501
用户授权码 # noqa: E501
:return: The access_token of this Body19. # noqa: E501
:rtype: str
"""
return self._access_token
@access_token.setter
def access_token(self, access_token):
"""Sets the access_token of this Body19.
用户授权码 # noqa: E501
:param access_token: The access_token of this Body19. # noqa: E501
:type: str
"""
self._access_token = access_token
@property
def name(self):
"""Gets the name of this Body19. # noqa: E501
仓库名称 # noqa: E501
:return: The name of this Body19. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Body19.
仓库名称 # noqa: E501
:param name: The name of this Body19. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this Body19. # noqa: E501
仓库描述 # noqa: E501
:return: The description of this Body19. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Body19.
仓库描述 # noqa: E501
:param description: The description of this Body19. # noqa: E501
:type: str
"""
self._description = description
@property
def homepage(self):
"""Gets the homepage of this Body19. # noqa: E501
主页(eg: https://gitee.com) # noqa: E501
:return: The homepage of this Body19. # noqa: E501
:rtype: str
"""
return self._homepage
@homepage.setter
def homepage(self, homepage):
"""Sets the homepage of this Body19.
主页(eg: https://gitee.com) # noqa: E501
:param homepage: The homepage of this Body19. # noqa: E501
:type: str
"""
self._homepage = homepage
@property
def has_issues(self):
"""Gets the has_issues of this Body19. # noqa: E501
允许提Issue与否。默认: 允许(true) # noqa: E501
:return: The has_issues of this Body19. # noqa: E501
:rtype: bool
"""
return self._has_issues
@has_issues.setter
def has_issues(self, has_issues):
"""Sets the has_issues of this Body19.
允许提Issue与否。默认: 允许(true) # noqa: E501
:param has_issues: The has_issues of this Body19. # noqa: E501
:type: bool
"""
self._has_issues = has_issues
@property
def has_wiki(self):
"""Gets the has_wiki of this Body19. # noqa: E501
提供Wiki与否。默认: 提供(true) # noqa: E501
:return: The has_wiki of this Body19. # noqa: E501
:rtype: bool
"""
return self._has_wiki
@has_wiki.setter
def has_wiki(self, has_wiki):
"""Sets the has_wiki of this Body19.
提供Wiki与否。默认: 提供(true) # noqa: E501
:param has_wiki: The has_wiki of this Body19. # noqa: E501
:type: bool
"""
self._has_wiki = has_wiki
@property
def can_comment(self):
"""Gets the can_comment of this Body19. # noqa: E501
允许用户对仓库进行评论。默认: 允许(true) # noqa: E501
:return: The can_comment of this Body19. # noqa: E501
:rtype: bool
"""
return self._can_comment
@can_comment.setter
def can_comment(self, can_comment):
"""Sets the can_comment of this Body19.
允许用户对仓库进行评论。默认: 允许(true) # noqa: E501
:param can_comment: The can_comment of this Body19. # noqa: E501
:type: bool
"""
self._can_comment = can_comment
@property
def issue_comment(self):
"""Gets the issue_comment of this Body19. # noqa: E501
允许对“关闭”状态的 Issue 进行评论。默认: 不允许(false) # noqa: E501
:return: The issue_comment of this Body19. # noqa: E501
:rtype: bool
"""
return self._issue_comment
@issue_comment.setter
def issue_comment(self, issue_comment):
"""Sets the issue_comment of this Body19.
允许对“关闭”状态的 Issue 进行评论。默认: 不允许(false) # noqa: E501
:param issue_comment: The issue_comment of this Body19. # noqa: E501
:type: bool
"""
self._issue_comment = issue_comment
@property
def security_hole_enabled(self):
"""Gets the security_hole_enabled of this Body19. # noqa: E501
允许用户创建涉及敏感信息的 Issue。默认: 不允许(false) # noqa: E501
:return: The security_hole_enabled of this Body19. # noqa: E501
:rtype: bool
"""
return self._security_hole_enabled
@security_hole_enabled.setter
def security_hole_enabled(self, security_hole_enabled):
"""Sets the security_hole_enabled of this Body19.
允许用户创建涉及敏感信息的 Issue。默认: 不允许(false) # noqa: E501
:param security_hole_enabled: The security_hole_enabled of this Body19. # noqa: E501
:type: bool
"""
self._security_hole_enabled = security_hole_enabled
@property
def private(self):
"""Gets the private of this Body19. # noqa: E501
仓库公开或私有。 # noqa: E501
:return: The private of this Body19. # noqa: E501
:rtype: bool
"""
return self._private
@private.setter
def private(self, private):
"""Sets the private of this Body19.
仓库公开或私有。 # noqa: E501
:param private: The private of this Body19. # noqa: E501
:type: bool
"""
self._private = private
@property
def path(self):
"""Gets the path of this Body19. # noqa: E501
更新仓库路径 # noqa: E501
:return: The path of this Body19. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this Body19.
更新仓库路径 # noqa: E501
:param path: The path of this Body19. # noqa: E501
:type: str
"""
self._path = path
@property
def default_branch(self):
"""Gets the default_branch of this Body19. # noqa: E501
更新默认分支 # noqa: E501
:return: The default_branch of this Body19. # noqa: E501
:rtype: str
"""
return self._default_branch
@default_branch.setter
def default_branch(self, default_branch):
"""Sets the default_branch of this Body19.
更新默认分支 # noqa: E501
:param default_branch: The default_branch of this Body19. # noqa: E501
:type: str
"""
self._default_branch = default_branch
@property
def pull_requests_enabled(self):
"""Gets the pull_requests_enabled of this Body19. # noqa: E501
接受 pull request,协作开发 # noqa: E501
:return: The pull_requests_enabled of this Body19. # noqa: E501
:rtype: bool
"""
return self._pull_requests_enabled
@pull_requests_enabled.setter
def pull_requests_enabled(self, pull_requests_enabled):
"""Sets the pull_requests_enabled of this Body19.
接受 pull request,协作开发 # noqa: E501
:param pull_requests_enabled: The pull_requests_enabled of this Body19. # noqa: E501
:type: bool
"""
self._pull_requests_enabled = pull_requests_enabled
@property
def online_edit_enabled(self):
"""Gets the online_edit_enabled of this Body19. # noqa: E501
是否允许仓库文件在线编辑 # noqa: E501
:return: The online_edit_enabled of this Body19. # noqa: E501
:rtype: bool
"""
return self._online_edit_enabled
@online_edit_enabled.setter
def online_edit_enabled(self, online_edit_enabled):
"""Sets the online_edit_enabled of this Body19.
是否允许仓库文件在线编辑 # noqa: E501
:param online_edit_enabled: The online_edit_enabled of this Body19. # noqa: E501
:type: bool
"""
self._online_edit_enabled = online_edit_enabled
@property
def lightweight_pr_enabled(self):
"""Gets the lightweight_pr_enabled of this Body19. # noqa: E501
是否接受轻量级 pull request # noqa: E501
:return: The lightweight_pr_enabled of this Body19. # noqa: E501
:rtype: bool
"""
return self._lightweight_pr_enabled
@lightweight_pr_enabled.setter
def lightweight_pr_enabled(self, lightweight_pr_enabled):
"""Sets the lightweight_pr_enabled of this Body19.
是否接受轻量级 pull request # noqa: E501
:param lightweight_pr_enabled: The lightweight_pr_enabled of this Body19. # noqa: E501
:type: bool
"""
self._lightweight_pr_enabled = lightweight_pr_enabled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Body19, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Body19):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
25,
31
],
[
13864,
13870
]
],
[
[
39,
41
]
],
[
[
64,
67
],
[
12943,
12946
]
],
[
[
76,
82
],
[
13647,
13653
],
[
14104,
14110
]
]
] |
"""
Winning Python script for EasyMarkit Hackathon by Team Sigma
"""
##Team Sigma - Members: Betty Zhou, Bailey Lei, Alex Pak
# Usage: python sigma_script.py data/train.csv data/test.csv
# import any necessary packages here
#loading libraries
import argparse
import os
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
import lightgbm as lgb
# read in command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("train_file_path") #path of training set
parser.add_argument("test_file_path") #path of test set
args = parser.parse_args()
def onehot_drop(df, column_name):
for index in column_name:
one_hot = pd.get_dummies(df[index], prefix = index)
df = df.drop(index,axis = 1)
df = df.join(one_hot)
return df
def fit_train(df):
train_df = df
train_clean = onehot_drop(train_df, ['type', 'province'])
train_clean['cli_area'] = train_clean['cli_area'].map({'Urban':1, 'Rural':0})
train_clean['pat_area'] = train_clean['pat_area'].map({'Urban':1, 'Rural':0})
train_clean['gender'] = train_clean['gender'].map({'M':1, 'F':0})
# convert to datetime
train_clean['apt_date'] = pd.to_datetime(train_df.apt_date,format='%Y-%m-%d %H:%M:%S', utc =True)
train_clean['sent_time'] = pd.to_datetime(train_df.sent_time,format='%Y-%m-%d %H:%M', utc =True)
train_clean['send_time'] = pd.to_datetime(train_df.send_time, format='%H:%M:%S', utc =True).dt.time
# find time between reminder and appointment
train_clean['sent_to_apt'] = (train_clean['apt_date'] - train_clean['sent_time']).dt.total_seconds()/3600
# attributes
train_clean['apt_month'] = train_clean['apt_date'].dt.month
train_clean['sent_day_of_week'] = train_clean['sent_time'].dt.day_name()
# one-hot encoding
train_clean = onehot_drop(train_clean, ['sent_day_of_week'])
X = train_clean.iloc[:, 2:]
y = train_clean.iloc[:,1]
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=1)
X_train_drop = X_train.drop(["apt_type", "apt_date", "sent_time", "send_time", "city", "cli_zip", 'pat_id', 'family_id','clinic'], axis = 1)
X_test_drop = X_test.drop(["apt_type", "apt_date", "sent_time", "send_time", "city", "cli_zip", 'pat_id', 'family_id','clinic'], axis = 1)
print("Number of training examples:", len(y_train))
print("Number of test examples:", len(y_test))
lg = lgb.LGBMClassifier(silent=False, n_estimators = 2000, max_depth=100)
lg_model = lg.fit(X_train_drop, y_train)
print("train accuracy: ", lg.score(X_train_drop, y_train))
print("test accuracy: ", lg.score(X_test_drop, y_test))
return lg_model
def predict_test(test_df, lg_model):
test_clean = onehot_drop(test_df, ['type', 'province'])
test_clean['cli_area'] = test_clean['cli_area'].map({'Urban':1, 'Rural':0})
test_clean['pat_area'] = test_clean['pat_area'].map({'Urban':1, 'Rural':0})
test_clean['gender'] = test_clean['gender'].map({'M':1, 'F':0})
# convert to datetime
test_clean['apt_date'] = pd.to_datetime(test_df.apt_date,format='%Y-%m-%d %H:%M:%S', utc =True)
test_clean['sent_time'] = pd.to_datetime(test_df.sent_time,format='%Y-%m-%d %H:%M', utc =True)
test_clean['send_time'] = pd.to_datetime(test_df.send_time, format='%H:%M:%S', utc =True).dt.time
# find time between reminder and appointment
test_clean['sent_to_apt'] = (test_clean['apt_date'] - test_clean['sent_time']).dt.total_seconds()/3600
# attributes
test_clean['apt_month'] = test_clean['apt_date'].dt.month
test_clean['sent_day_of_week'] = test_clean['sent_time'].dt.day_name()
# one-hot encoding
test_clean = onehot_drop(test_clean, ['sent_day_of_week'])
test_clean_month = onehot_drop(test_clean, ['apt_month'])
test_final = test_clean.iloc[:, 1:]
test_final = test_final.drop(["apt_type", "apt_date", "sent_time", "send_time", "city", "cli_zip", 'pat_id', 'family_id','clinic'], axis = 1)
print("Number of test examples:", len(test_df))
print("Number of final cleaned test examples:", len(test_final))
print("test data shape: ", test_final.shape)
test_clean["response"] = lg_model.predict(test_final)
df = test_clean[["ReminderId","response"]]
return df
def write_to_csv(df):
group_name = "sigma"
df.to_csv(group_name + "_output.csv", index=False)
print(group_name + "_output.csv output successful")
def main():
# loading train and test data
train_df = pd.read_csv(args.train_file_path)
test_df = pd.read_csv(args.test_file_path)
# pre-processing input train and test data for training model
lg_model = fit_train(train_df)
#predict and write to new CSV for submission
df = predict_test(test_df, lg_model)
write_to_csv(df)
if __name__ == "__main__":
main()
| [
[
[
258,
266
],
[
450,
458
]
],
[
[
274,
276
]
],
[
[
284,
296
],
[
703,
705
],
[
1217,
1219
],
[
1320,
1322
],
[
1421,
1423
],
[
3109,
3111
],
[
3210,
3212
],
[
3309,
3311
],
[
4541,
4543
],
[
4589,
4591
]
],
[
[
304,
315
]
],
[
[
323,
330
]
],
[
[
367,
383
],
[
2005,
2021
]
],
[
[
391,
406
],
[
2467,
2470
]
],
[
[
441,
447
],
[
476,
482
],
[
537,
543
],
[
600,
606
]
],
[
[
593,
597
],
[
4553,
4557
],
[
4601,
4605
]
],
[
[
625,
636
],
[
882,
893
],
[
1855,
1866
],
[
2782,
2793
],
[
3734,
3745
],
[
3803,
3814
]
],
[
[
831,
840
],
[
4704,
4713
]
],
[
[
2732,
2744
],
[
4783,
4795
]
],
[
[
4325,
4337
],
[
4819,
4831
]
],
[
[
4484,
4488
],
[
4868,
4872
]
]
] |
import pathlib
import pkg_resources
from clvm_tools.clvmc import compile_clvm
from hddcoin.types.blockchain_format.program import Program, SerializedProgram
def load_serialized_clvm(clvm_filename, package_or_requirement=__name__) -> SerializedProgram:
"""
This function takes a .clvm file in the given package and compiles it to a
.clvm.hex file if the .hex file is missing or older than the .clvm file, then
returns the contents of the .hex file as a `Program`.
clvm_filename: file name
package_or_requirement: usually `__name__` if the clvm file is in the same package
"""
hex_filename = f"{clvm_filename}.hex"
try:
if pkg_resources.resource_exists(package_or_requirement, clvm_filename):
full_path = pathlib.Path(pkg_resources.resource_filename(package_or_requirement, clvm_filename))
output = full_path.parent / hex_filename
compile_clvm(full_path, output, search_paths=[full_path.parent])
except NotImplementedError:
# pyinstaller doesn't support `pkg_resources.resource_exists`
# so we just fall through to loading the hex clvm
pass
clvm_hex = pkg_resources.resource_string(package_or_requirement, hex_filename).decode("utf8")
clvm_blob = bytes.fromhex(clvm_hex)
return SerializedProgram.from_bytes(clvm_blob)
def load_clvm(clvm_filename, package_or_requirement=__name__) -> Program:
return Program.from_bytes(bytes(load_serialized_clvm(clvm_filename, package_or_requirement=package_or_requirement)))
| [
[
[
7,
14
],
[
766,
773
]
],
[
[
23,
36
],
[
672,
685
],
[
779,
792
],
[
1170,
1183
]
],
[
[
66,
78
],
[
916,
928
]
],
[
[
132,
139
],
[
1411,
1418
],
[
1431,
1438
]
],
[
[
141,
158
],
[
237,
254
],
[
1304,
1321
]
],
[
[
165,
185
],
[
1456,
1476
]
],
[
[
1350,
1359
]
]
] |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, StreamingHttpResponse, FileResponse
from django.template import loader
from django.shortcuts import get_object_or_404, render, redirect
from django.views import View
from django.views.generic import DetailView, ListView
from django.db.models import (
Count,
Max,
Min,
Q,
F,
Prefetch,
Subquery,
OuterRef,
ExpressionWrapper,
FloatField,
BooleanField,
)
from django.db.models.functions import Concat, FirstValue, Cast
from django.core import management
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db.models.functions import Coalesce
from django.contrib.postgres.search import SearchRank, SearchQuery
from django.contrib.postgres.aggregates import StringAgg
from django.urls import reverse, reverse_lazy
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required, user_passes_test
from dal.autocomplete import Select2QuerySetView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.db import transaction, IntegrityError
from django.forms.models import model_to_dict
from django.forms import formset_factory, inlineformset_factory, modelformset_factory
from django.conf import settings
from django.utils.html import format_html
from django.views.decorators.cache import cache_page
import glob
from os.path import basename, getmtime
from datetime import datetime
import csv
import sys
from operator import attrgetter
from tempfile import NamedTemporaryFile, TemporaryDirectory
import zipfile
from . import models
from .models import (
Work,
WorkType,
Author,
Conference,
Institution,
Appellation,
Affiliation,
ConferenceSeries,
SeriesMembership,
Organizer,
Country,
Keyword,
Topic,
Language,
CountryLabel,
Authorship,
License,
)
from .forms import (
WorkFilter,
AuthorFilter,
AuthorMergeForm,
WorkForm,
WorkAuthorshipForm,
FullInstitutionForm,
InstitutionMergeForm,
AffiliationEditForm,
AffiliationMergeForm,
KeywordMergeForm,
TagForm,
TopicMergeForm,
AffiliationMultiMergeForm,
KeywordMultiMergeForm,
ConferenceForm,
ConferenceCheckoutForm,
ConferenceSeriesInline,
LanguageMergeForm,
WorkTypeMergeForm,
InstitutionMultiMergeForm,
TopicMultiMergeForm,
ConferenceXMLUploadForm,
)
PERMISSIONS_ERROR_TEXT = (
"Please contact the lead project editors to edit this part of the database."
)
def cache_for_anon(func):
"""
On these views, call the cache if the user is not authenticated
"""
def wrap(request, *args, **kwargs):
if request.user.is_authenticated:
return func(request, *args, **kwargs)
else:
return cache_page(settings.CACHES["default"]["TIMEOUT"])(func)(
request, *args, **kwargs
)
return wrap
def user_is_staff(func):
def wrap(request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect(f"{reverse('login')}?next={request.path}")
if request.user.is_staff:
return func(request, *args, **kwargs)
else:
messages.warning(request, PERMISSIONS_ERROR_TEXT)
return redirect("home_view")
return wrap
class StaffRequiredMixin:
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect(f"{reverse('login')}?next={self.request.path}")
if self.request.user.is_staff:
return super().dispatch(*args, **kwargs)
else:
messages.warning(self.request, PERMISSIONS_ERROR_TEXT)
return redirect("home_view")
class ItemLabelAutocomplete(Select2QuerySetView):
def get_selected_result_label(self, item):
return self.get_result_label(item)
class WorkAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Work.objects.all()
parents_only = self.forwarded.get("parents_only", None)
if parents_only:
qs = qs.filter(work_type__is_parent=True)
conference = self.forwarded.get("conference", None)
if conference:
qs = qs.filter(conference=conference)
if self.q:
qs = qs.filter(title__icontains=self.q)
return qs.all()
class AppellationAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Appellation.objects.all()
if self.q:
qs = qs.filter(
Q(first_name__icontains=self.q) | Q(last_name__icontains=self.q)
).all()
return qs
class KeywordAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Keyword.objects.annotate(n_works=Count("works")).order_by("-n_works")
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class LanguageAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Language.objects.annotate(n_works=Count("works")).order_by(
"-n_works", "title"
)
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class TopicAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Topic.objects.annotate(n_works=Count("works")).order_by("-n_works")
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class CountryAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Country.objects.annotate(
n_works=Count(
"institutions__affiliations__asserted_by__work", distinct=True
)
).order_by("-n_works")
if self.q:
qs = qs.filter(
Q(pref_name__icontains=self.q) | Q(names__name__icontains=self.q)
)
return qs.distinct()
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class InstitutionAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = (
Institution.objects.annotate(
n_works=Count("affiliations__asserted_by__work", distinct=True)
)
.select_related("country")
.order_by("-n_works")
)
if self.q:
qs = qs.filter(name__icontains=self.q).all()
return qs
def get_result_label(self, item):
if item.country is not None:
c_label = item.country.pref_name
else:
c_label = ""
location_statement = ", ".join(
[l for l in [item.state_province_region, c_label] if l != ""]
)
return f"{item} ({item.n_works} works)<br><small text-class='muted'>{location_statement}</small>"
class AffiliationAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = (
Affiliation.objects.annotate(
n_works=Count("asserted_by__work", distinct=True)
)
.select_related("institution", "institution__country")
.order_by("-n_works")
)
inst_filter = self.forwarded.get("institution", None)
if inst_filter:
qs = qs.filter(institution=inst_filter)
if self.q:
qs = qs.filter(
Q(department__icontains=self.q) | Q(institution__name__icontains=self.q)
).distinct()
return qs
def get_result_label(self, item):
if item.institution.country is not None:
c_label = item.institution.country.pref_name
else:
c_label = ""
location_statement = ", ".join(
[l for l in [item.institution.state_province_region, c_label] if l != ""]
)
return f"{item} ({item.n_works} works)<br><small text-class='muted'>{location_statement}</small>"
class ConferenceAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Conference.objects.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
)
).order_by("year", "main_series", "short_title", "theme_title")
if self.q:
qs = qs.filter(search_text__icontains=self.q).distinct()
return qs
def get_result_label(self, item):
if item.main_series:
return f"{item.main_series} - {item.year} - {item.short_title}"
elif item.short_title:
return f"{item.year} - {item.short_title}"
else:
return f"{item.year} - {item.theme_title}"
class AuthorAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Author.objects.annotate(
n_works=Count("authorships", distinct=True),
main_last_name=Max("appellations__last_name"),
main_first_name=Max("appellations__first_name"),
).order_by("main_last_name", "main_first_name", "-n_works")
if self.q:
qs = qs.filter(appellations_index__icontains=self.q).distinct()
return qs
def get_result_label(self, item):
return format_html(
f"{item.most_recent_appellation} ({item.n_works} works)<br><small text-class='muted'>(All names: {item.appellations_index})</small>"
)
def work_view(request, work_id):
related_conference = Conference.objects.annotate(
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
main_series=StringAgg(
"series_memberships__series__abbreviation", delimiter=" / ", distinct=True
),
).prefetch_related("series", "organizers")
work = get_object_or_404(
Work.objects.select_related("work_type", "full_text_license").prefetch_related(
Prefetch("conference", queryset=related_conference),
"keywords",
"topics",
"languages",
Prefetch(
"session_papers",
queryset=Work.objects.prefetch_related(
Prefetch(
"authorships",
queryset=Authorship.objects.select_related("appellation"),
),
),
),
Prefetch(
"parent_session",
queryset=Work.objects.prefetch_related(
Prefetch(
"authorships",
queryset=Authorship.objects.select_related(
"author", "appellation"
),
)
),
),
),
pk=work_id,
)
authorships = (
Authorship.objects.filter(work_id=work_id)
.order_by("authorship_order")
.distinct()
.select_related("work", "author", "appellation")
.prefetch_related(
Prefetch(
"affiliations",
queryset=Affiliation.objects.select_related(
"institution", "institution__country"
),
)
)
)
context = {"work": work, "authorships": authorships}
return render(request, "work_detail.html", context)
def author_view(request, author_id):
author = get_object_or_404(Author, pk=author_id)
sorted_authorships = (
Authorship.objects.filter(author=author)
.order_by("work__conference__year")
.prefetch_related(
Prefetch("work", queryset=Work.objects.select_related("conference"))
)
)
appellations = (
Appellation.objects.filter(asserted_by__author=author)
.distinct()
.annotate(latest_year=Max("asserted_by__work__conference__year"))
.order_by("-latest_year")
.prefetch_related(Prefetch("asserted_by", queryset=sorted_authorships))
)
affiliations = (
Affiliation.objects.filter(asserted_by__author=author)
.distinct()
.annotate(latest_year=Max("asserted_by__work__conference__year"))
.order_by("-latest_year")
.prefetch_related(
Prefetch("asserted_by", queryset=sorted_authorships),
Prefetch(
"institution", queryset=Institution.objects.select_related("country")
),
)
)
works = (
Work.objects.filter(authorships__author=author)
.order_by("conference__year")
.distinct()
.select_related("conference", "parent_session", "work_type")
.prefetch_related(
Prefetch(
"conference",
queryset=Conference.objects.prefetch_related("series", "organizers"),
),
"session_papers",
"keywords",
"topics",
"languages",
Prefetch(
"authorships",
queryset=Authorship.objects.select_related("appellation", "author"),
),
)
)
author_admin_page = reverse("admin:abstracts_author_change", args=(author.pk,))
context = {
"author": author,
"works": works,
"appellations": appellations,
"affiliations": affiliations,
"author_admin_page": author_admin_page,
}
return render(request, "author_detail.html", context)
class AuthorSplit(DetailView, StaffRequiredMixin):
model = Author
template_name = "author_split.html"
context_object_name = "original_author"
def get_context_data(self, **kwargs):
authorships = Authorship.objects.filter(author=self.get_object()).order_by(
"work__conference__year"
)
return {self.context_object_name: self.get_object(), "authorships": authorships}
@transaction.atomic
def post(self, request, *args, **kwargs):
"""
Create new author and transfer authorships
"""
authorships_to_move = request.POST.getlist("splitselect")
try:
new_author = Author.objects.create()
Authorship.objects.filter(id__in=authorships_to_move).update(
author=new_author
)
# Force-update appellations
self.get_object().save()
new_author.save()
messages.success(
request,
f"{len(authorships_to_move)} authorships moved to new author id {new_author.id}",
)
return redirect("author_detail", new_author.id)
except:
messages.error(request, str(authorships_to_move))
return redirect("author_split", self.get_object().id)
class XMLView(DetailView, LoginRequiredMixin):
model = Work
context_object_name = "work"
def get(self, request, *args, **kwargs):
response = HttpResponse(self.get_object().full_text, content_type="xhtml+xml")
response[
"Content-Disposition"
] = f"attachment; filename={self.get_object().id}.xml"
return response
class AuthorList(ListView):
context_object_name = "author_list"
template_name = "author_list.html"
paginate_by = 50
def get_queryset(self):
base_result_set = Author.objects.exclude(appellations__isnull=True).annotate(
n_conferences=Count("works__conference", distinct=True)
)
raw_filter_form = AuthorFilter(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
order_res = filter_form["ordering"]
if order_res is None or order_res == "":
order_res = "last_name"
result_set = base_result_set.annotate(
last_name=Max("appellations__last_name"),
n_works=Count("authorships", distinct=True),
).order_by(order_res)
author_res = filter_form["author"]
if author_res is not None:
result_set = result_set.filter(id=author_res.id)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(
authorships__affiliations=affiliation_res
)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(
authorships__affiliations__institution=institution_res
)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(
authorships__affiliations__institution__country=country_res
)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(works__conference=conference_res)
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
name_res = filter_form["name"]
if name_res != "":
result_set = result_set.filter(appellations_index__icontains=name_res)
first_name_res = filter_form["first_name"]
if first_name_res != "":
result_set = result_set.filter(
authorships__appellation__first_name__icontains=first_name_res
)
last_name_res = filter_form["last_name"]
if last_name_res != "":
result_set = result_set.filter(
authorships__appellation__last_name__icontains=last_name_res
)
# Newest affiliations
newest_authorship = Authorship.objects.filter(
author=OuterRef("pk")
).order_by("-work__conference__year")
annotated_authors = result_set.annotate(
main_affiliation_department=Subquery(
newest_authorship.values("affiliations__department")[:1]
),
main_affiliation_institution=Subquery(
newest_authorship.values("affiliations__institution__name")[:1]
),
main_affiliation_institution_city=Subquery(
newest_authorship.values("affiliations__institution__city")[:1]
),
main_affiliation_institution_state=Subquery(
newest_authorship.values(
"affiliations__institution__state_province_region"
)[:1]
),
main_affiliation_institution_country=Subquery(
newest_authorship.values(
"affiliations__institution__country__pref_name"
)[:1]
),
most_recent_first_name=Subquery(
newest_authorship.values("appellation__first_name")[:1]
),
most_recent_last_name=Subquery(
newest_authorship.values("appellation__last_name")[:1]
),
n_works=Count("authorships", distinct=True),
)
return annotated_authors
else:
messages.warning(
self.request,
"Query parameters not recognized. Check your URL and try again.",
)
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["author_filter_form"] = AuthorFilter(data=self.request.GET)
context["available_authors_count"] = Author.objects.count()
context["redirect_url"] = reverse("author_list")
return context
def annotate_multiple_series(qs):
return qs.annotate(
n_conferences=Count("conferences", distinct=True),
earliest_year=Min("conferences__year"),
latest_year=Max("conferences__year"),
n_complete=Count(
"conferences", filter=Q(conferences__entry_status="c"), distinct=True
),
n_in_progress=Count(
"conferences", filter=Q(conferences__entry_status="i"), distinct=True
),
n_in_review=Count(
"conferences", filter=Q(conferences__entry_status="r"), distinct=True
),
n_remaining=F("n_conferences")
- F("n_complete")
- F("n_in_progress")
- F("n_in_review"),
pct_complete=(
Cast(F("n_complete"), FloatField()) / Cast(F("n_conferences"), FloatField())
)
* 100,
pct_in_progress=(
Cast(F("n_in_progress"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
pct_in_review=(
Cast(F("n_in_review"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
pct_remaining=(
Cast(F("n_remaining"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
).order_by("title")
def annotate_single_series(qs):
res = qs.aggregate(
earliest_year=Min("year"),
latest_year=Max("year"),
n_conferences=Count("id", distinct=True),
n_complete=Count("id", filter=Q(entry_status="c"), distinct=True),
n_in_progress=Count("id", filter=Q(entry_status="i"), distinct=True),
n_in_review=Count("id", filter=Q(entry_status="r"), distinct=True),
)
res["n_remaining"] = (
res["n_conferences"]
- res["n_complete"]
- res["n_in_progress"]
- res["n_in_review"]
)
if res["n_conferences"] > 0:
res["pct_complete"] = (res["n_complete"] / res["n_conferences"]) * 100
res["pct_in_progress"] = (res["n_in_progress"] / res["n_conferences"]) * 100
res["pct_in_review"] = (res["n_in_review"] / res["n_conferences"]) * 100
res["pct_remaining"] = (res["n_remaining"] / res["n_conferences"]) * 100
else:
res["pct_complete"] = 0
res["pct_in_progress"] = 0
res["pct_in_review"] = 0
res["pct_remaining"] = 0
return res
def conference_series_qs():
return annotate_multiple_series(
ConferenceSeries.objects.exclude(conferences__isnull=True)
)
class ConferenceSeriesList(ListView):
context_object_name = "series_list"
template_name = "conference_series_list.html"
def get_queryset(self):
base_result_set = conference_series_qs()
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
sa_conf = Conference.objects.filter(series__isnull=True)
context["standalone_conferences"] = annotate_single_series(sa_conf)
context["standalone_conference_count"] = sa_conf.count()
return context
class ConferenceSeriesDetail(DetailView):
model = ConferenceSeries
template_name = "conference_series_detail.html"
context_object_name = "series"
def get_member_conferences(self):
return Conference.objects.filter(series_memberships__series=self.get_object())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["series_progress"] = annotate_single_series(
self.get_member_conferences()
)
series_order_subquery = SeriesMembership.objects.filter(
conference=OuterRef("pk"), series=self.get_object()
).order_by("number")
context["conference_list"] = (
self.get_member_conferences()
.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
series_order=Subquery(series_order_subquery.values("number")[:1]),
)
.order_by("series_order")
.prefetch_related(
"series_memberships",
"series_memberships__series",
"organizers",
"country",
"hosting_institutions",
"hosting_institutions__country",
"documents",
)
)
context["series_list"] = conference_series_qs()
return context
class StandaloneList(View):
template_name = "conference_series_detail.html"
def get_standalone_list(self):
qs = (
Conference.objects.filter(series__isnull=True)
.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
)
.order_by("year", "short_title", "theme_title")
.prefetch_related(
"series_memberships",
"series_memberships__series",
"organizers",
"country",
"hosting_institutions",
"hosting_institutions__country",
"documents",
)
)
return qs
def get(self, request):
faux_series = {
"title": "Standalone Events",
"notes": "Digital humanities events not belonging to a larger series, such symposia or workshops.",
"n_conferences": self.get_standalone_list().count(),
}
context = {
"conference_list": self.get_standalone_list(),
"series": faux_series,
"series_list": conference_series_qs(),
"series_progress": annotate_single_series(self.get_standalone_list()),
}
return render(request, self.template_name, context)
def home_view(request):
conference_count = Conference.objects.count()
years_count = Conference.objects.aggregate(year_range=Max("year") - Min("year"))[
"year_range"
]
work_count = Work.objects.count()
author_count = Author.objects.exclude(authorships__work__isnull=True).count()
institution_count = Institution.objects.count()
country_count = (
Country.objects.filter(
Q(institutions__affiliations__asserted_by__work__isnull=False)
| Q(institutions__conferences__isnull=False)
| Q(conferences__isnull=False)
)
.distinct()
.count()
)
context = {
"site": {
"conference_count": conference_count,
"years_count": years_count,
"work_count": work_count,
"author_count": author_count,
"institution_count": institution_count,
"country_count": country_count,
}
}
return render(request, "index.html", context)
@user_is_staff
@transaction.atomic
def author_merge_view(request, author_id):
author = get_object_or_404(Author, pk=author_id)
if request.method == "GET":
"""
Initial load of the merge form displays all the authorships of the current author that will be affected
"""
context = {"merging": author, "author_merge_form": AuthorMergeForm}
return render(request, "author_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = AuthorMergeForm(request.POST)
if raw_form.is_valid():
target_author = raw_form.cleaned_data["into"]
if author == target_author:
"""
If the user chooses the existing author, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an author into themselves. Please select a different author.",
)
return redirect("author_merge", author_id=author_id)
else:
old_author_string = str(author)
merge_results = author.merge(target_author)
target_author.user_last_updated = request.user
target_author.save()
messages.success(
request,
f"Author {old_author_string} has been merged into {target_author}, and the old author entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} authorships updated"
)
return redirect("author_detail", author_id=target_author.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "author_merge.html", context)
def field_required(field):
if field.get_internal_type() in ("CharField", "TextField") and field.blank:
return False
if field.null:
return False
return True
def download_data(request):
data_dictionary = []
if request.user.is_authenticated:
dt_config = settings.PRIVATE_DATA_TABLE_CONFIG
zip_url = reverse("private_all_tables_download")
else:
dt_config = settings.PUBLIC_DATA_TABLE_CONFIG
zip_url = reverse("public_all_tables_download")
denormalized_url = reverse("works_download")
denormalized_last_updated = datetime.fromtimestamp(
getmtime(f"{settings.DATA_OUTPUT_PATH}/{settings.DENORMALIZED_WORKS_NAME}.zip")
)
for m in dt_config["CONFIGURATION"]:
model = attrgetter(m["model"])(models)
if "manual_model_description" in m:
model_description = m["manual_model_description"]
else:
try:
model_description = model.model_description
except:
model_description = None
all_model_fields = [
{
"name": f.name,
"relation": f.is_relation,
"help_text": f.help_text,
"related_model": str(f.related_model)
.replace("<class 'abstracts.models.", "")
.replace("'>", ""),
"type": f.get_internal_type(),
"required": field_required(f),
}
for f in model._meta.fields
if not f.one_to_many and f.name not in m["exclude_fields"]
]
if m.get("include_string", False):
all_model_fields.append(
{
"name": "label",
"relation": None,
"help_text": "General label for this object",
"related_model": None,
"type": "CharField",
"required": True,
}
)
data_dictionary.append(
{
"model": m["model"],
"model_description": model_description,
"csv_name": m["csv_name"],
"fields": all_model_fields,
}
)
normalized_last_updated = datetime.fromtimestamp(
getmtime(f"{settings.DATA_OUTPUT_PATH}/{dt_config['DATA_ZIP_NAME']}")
)
context = {
"zip_url": zip_url,
"denormalized_url": denormalized_url,
"denormalized_last_updated": denormalized_last_updated,
"normalized_last_updated": normalized_last_updated,
"data_dictionary": data_dictionary,
"denormalized_data_dictionary": settings.DENORMALIZED_HEADERS,
}
return render(request, "downloads.html", context)
def download_works_csv(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.DENORMALIZED_WORKS_NAME}.zip"
response = FileResponse(open(target_zip, "rb"))
return response
def public_download_all_tables(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.PUBLIC_DATA_TABLE_CONFIG['DATA_ZIP_NAME']}"
response = FileResponse(open(target_zip, "rb"))
return response
@login_required
def private_download_all_tables(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.PRIVATE_DATA_TABLE_CONFIG['DATA_ZIP_NAME']}"
response = FileResponse(open(target_zip, "rb"))
return response
@login_required
def WorkCreate(request):
if request.method == "GET":
if "conference" in request.GET:
conf = get_object_or_404(Conference, pk=int(request.GET["conference"]))
work_form = WorkForm(initial={"conference": conf.pk})
else:
work_form = WorkForm()
if request.method == "POST":
work_form = WorkForm(request.POST)
if work_form.is_valid():
new_work = work_form.save()
new_work.user_last_updated = request.user
new_work.save()
messages.success(request, f"{new_work} created.")
return redirect("work_edit_authorship", work_id=new_work.pk)
else:
for err in work_form.errors:
messages.error(request, err)
context = {"work_form": work_form}
return render(request, "work_create.html", context)
@login_required
def WorkEdit(request, work_id):
work = get_object_or_404(Work, pk=work_id)
if request.method == "POST":
work_form = WorkForm(request.POST, instance=work)
if work_form.is_valid():
work.user_last_updated = request.user
work_form.save()
messages.success(request, f'"{work.title}" sucessfully updated.')
return redirect("work_detail", work_id=work.pk)
else:
for f, e in work_form.errors.items():
messages.error(request, f"{f}: {e}")
work_initial_data = model_to_dict(work)
context = {"work_form": WorkForm(initial=work_initial_data), "work": work}
return render(request, "work_edit.html", context)
@login_required
@transaction.atomic
def WorkEditAuthorship(request, work_id):
work = get_object_or_404(Work, pk=work_id)
authorships = work.authorships.all()
AuthorshipWorkFormset = formset_factory(
WorkAuthorshipForm, can_delete=True, extra=0
)
initial_data = []
for authorship in authorships:
base_data = {
"author": authorship.author,
"authorship_order": authorship.authorship_order,
"first_name": authorship.appellation.first_name,
"last_name": authorship.appellation.last_name,
"affiliations": [aff for aff in authorship.affiliations.all()],
}
initial_data.append(base_data)
if request.method == "GET":
authorships_forms = AuthorshipWorkFormset(initial=initial_data)
elif request.method == "POST":
authorships_forms = AuthorshipWorkFormset(request.POST)
if authorships_forms.is_valid():
for d_form in authorships_forms.deleted_forms:
d_form_data = d_form.cleaned_data
attached_author = d_form_data["author"]
Authorship.objects.filter(
work=work, author=d_form_data["author"]
).delete()
# Refresh the author in DB to update appellations index
attached_author.save()
for aform in authorships_forms:
if aform not in authorships_forms.deleted_forms:
aform_data = aform.cleaned_data
appellation = Appellation.objects.get_or_create(
first_name=aform_data["first_name"],
last_name=aform_data["last_name"],
)[0]
affiliations = aform_data["affiliations"]
authorship_order = aform_data["authorship_order"]
try:
if aform_data["author"] is None:
author_id = Author.objects.create()
else:
author_id = aform_data["author"]
auth = Authorship.objects.update_or_create(
work=work,
author=author_id,
defaults={
"authorship_order": authorship_order,
"appellation": appellation,
"user_last_updated": request.user,
},
)[0]
author_id.user_last_updated = request.user
author_id.save()
except IntegrityError as e:
messages.error(
request, f"{e}: Ensure authorship order numbers are unique"
)
return redirect("work_edit_authorship", work.pk)
auth.affiliations.clear()
if affiliations is not None:
auth.affiliations.set(affiliations)
messages.success(
request, f'"{work.title}" authorships successfully updated.'
)
if "start_new" in request.POST:
return redirect(
f"{reverse('work_create')}?conference={work.conference.pk}"
)
return redirect("work_detail", work_id=work.pk)
else:
for error in authorships_forms.errors:
messages.error(request, error)
context = {
"authorships_form": authorships_forms,
"work": work,
"affiliation_form": AffiliationEditForm,
}
return render(request, "work_edit_authorships.html", context)
@login_required
def AuthorInfoJSON(request, author_id):
if request.method == "GET":
author = get_object_or_404(Author, pk=author_id)
author_aff = Affiliation.objects.filter(asserted_by__author=author).distinct()
author_dict = {
"first_name": author.most_recent_appellation.first_name,
"last_name": author.most_recent_appellation.last_name,
"work_titles": [w.title for w in author.works.all()][:4],
"works_count": author.works.count(),
}
if author_aff is not None:
author_dict["affiliations"] = [
{"name": str(aff), "id": aff.pk} for aff in author_aff
]
return JsonResponse(author_dict)
@login_required
def AffiliationInfoJSON(request, affiliation_id):
if request.method == "GET":
affiliation = get_object_or_404(Affiliation, pk=affiliation_id)
affiliation_dict = {
"institution": {
"name": str(affiliation.institution),
"id": affiliation.institution.id,
}
}
if affiliation.department is not None:
affiliation_dict["department"] = affiliation.department
return JsonResponse(affiliation_dict)
class WorkDelete(LoginRequiredMixin, SuccessMessageMixin, DeleteView):
model = Work
template_name = "work_delete.html"
extra_context = {"cancel_view": "work_list"}
success_url = reverse_lazy("work_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, f"'{self.get_object().title}' deleted")
return super().delete(request, *args, **kwargs)
class FullWorkList(ListView):
context_object_name = "work_list"
template_name = "work_list.html"
paginate_by = 10
def get_queryset(self):
base_result_set = Work.objects.all()
raw_filter_form = WorkFilter(self.request.GET)
if raw_filter_form.is_valid():
result_set = base_result_set
filter_form = raw_filter_form.cleaned_data
work_type_res = filter_form["work_type"]
if work_type_res is not None:
result_set = result_set.filter(work_type=work_type_res)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(conference=conference_res)
affiliation_res = filter_form["affiliation"]
if len(affiliation_res) > 0:
result_set = result_set.filter(
authorships__affiliations__in=affiliation_res
).distinct()
institution_res = filter_form["institution"]
if len(institution_res) > 0:
result_set = result_set.filter(
authorships__affiliations__institution__in=institution_res
).distinct()
author_res = filter_form["author"]
if len(author_res) > 0:
result_set = result_set.filter(authorships__author__in=author_res)
keyword_res = filter_form["keywords"]
if len(keyword_res) > 0:
result_set = result_set.filter(keywords__in=keyword_res)
topic_res = filter_form["topics"]
if len(topic_res) > 0:
result_set = result_set.filter(topics__in=topic_res)
language_res = filter_form["languages"]
if len(language_res) > 0:
result_set = result_set.filter(languages__in=language_res)
if filter_form["full_text_available"]:
result_set = result_set.exclude(full_text="")
if filter_form["full_text_viewable"]:
result_set = result_set.exclude(full_text="").filter(
full_text_license__isnull=False
)
text_res = filter_form["text"]
if text_res != "":
text_query = SearchQuery(text_res, search_type="websearch")
result_set = (
result_set.filter(search_text=text_query)
.annotate(
rank=SearchRank(
F("search_text"),
text_query,
),
# Does the search text show up only in the full text?
search_in_ft_only=ExpressionWrapper(
~Q(title__icontains=text_res), output_field=BooleanField()
),
)
.filter(rank__gt=0.1)
.order_by("-rank")
)
order_res = "rank"
# To find the last name of the first author, we develop a subquery that will pull the first authorship for a given work. We can then call the appellation__last_name
first_author_subquery = Authorship.objects.filter(
work=OuterRef("pk")
).order_by("authorship_order")
order_res = filter_form["ordering"]
if order_res is None or order_res == "":
order_res = "year"
if order_res == "year":
result_set = result_set.order_by("conference__year", "title")
elif order_res == "-year":
result_set = result_set.order_by("-conference__year", "title")
elif order_res == "title":
result_set = result_set.order_by("title")
elif order_res == "-title":
result_set = result_set.order_by("-title")
elif order_res == "last_name":
result_set = result_set.annotate(
first_author_last_name=Subquery(
first_author_subquery.values("appellation__last_name")[:1]
)
).order_by("first_author_last_name", "title")
elif order_res == "-last_name":
result_set = result_set.annotate(
first_author_last_name=Subquery(
first_author_subquery.values("appellation__last_name")[:1]
)
).order_by("-first_author_last_name", "title")
return (
result_set.select_related(
"conference", "work_type", "parent_session", "full_text_license"
)
.annotate(
main_series=StringAgg(
"conference__series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
main_institution=StringAgg(
"conference__hosting_institutions__name",
delimiter=" / ",
distinct=True,
),
)
.prefetch_related(
Prefetch(
"conference",
queryset=Conference.objects.prefetch_related(
Prefetch(
"series_memberships",
queryset=SeriesMembership.objects.select_related(
"series"
),
),
"organizers",
),
),
"session_papers",
Prefetch(
"authorships",
queryset=Authorship.objects.select_related(
"appellation", "author"
),
),
"keywords",
"topics",
"languages",
)
)
else:
for error in raw_filter_form.errors:
messages.warning(self.request, error)
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
raw_filter_form = WorkFilter(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
conference_res = filter_form["conference"]
if conference_res is not None:
conferences_data = (
Conference.objects.filter(id=conference_res.id)
.annotate(
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
)
.select_related("country")
.prefetch_related(
"organizers", "series_memberships", "series_memberships__series"
)
.all()
)
context["selected_conferences"] = conferences_data
context["work_filter_form"] = WorkFilter(data=self.request.GET)
context["available_works_count"] = Work.objects.count()
context["filtered_works_count"] = self.get_queryset().count()
context["redirect_url"] = reverse("work_list")
return context
class FullInstitutionList(LoginRequiredMixin, ListView):
context_object_name = "institution_list"
template_name = "full_institution_list.html"
paginate_by = 10
def get_queryset(self):
annotated_affiliations = Affiliation.objects.annotate(
n_works=Count("asserted_by__work", distinct=True)
)
result_set = (
Institution.objects.annotate(
n_works=Count("affiliations__asserted_by__work", distinct=True)
)
.prefetch_related(
Prefetch("affiliations", annotated_affiliations), "country"
)
.order_by("-n_works")
)
if self.request.GET:
raw_filter_form = FullInstitutionForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
result_set = result_set.annotate(
n_conferences=Count(
"affiliations__asserted_by__work__conference", distinct=True
)
)
department_res = filter_form["department"]
if department_res != "":
result_set = result_set.filter(
affiliations__department__icontains=department_res
)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(affiliations=affiliation_res)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(pk=institution_res.pk)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(country=country_res)
if filter_form["no_department"]:
result_set = result_set.filter(affiliations__department="")
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(
affiliations__asserted_by__work__conference=conference_res
).distinct()
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
if filter_form["ordering"] == "n_dsc":
result_set = result_set.order_by(
"-n_works", "affiliations__institution__name"
)
elif filter_form["ordering"] == "n_asc":
result_set = result_set.order_by(
"n_works", "affiliations__institution__name"
)
elif filter_form["ordering"] == "a":
result_set = result_set.order_by("affiliations__institution__name")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
return result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["institution_filter_form"] = FullInstitutionForm(
initial=self.request.GET
)
context["available_institutions_count"] = Institution.objects.count()
context["filtered_institutions_count"] = self.get_queryset().count()
context["redirect_url"] = reverse("full_institution_list")
return context
class AuthorInstitutionList(FullInstitutionList):
template_name = "author_institution_list.html"
def get_queryset(self):
base_result_set = Institution.objects.annotate(
n_authors=Count("affiliations__asserted_by__author", distinct=True),
n_conferences=Count(
"affiliations__asserted_by__work__conference", distinct=True
),
).distinct()
result_set = base_result_set
if self.request.GET:
raw_filter_form = FullInstitutionForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
department_res = filter_form["department"]
if department_res != "":
result_set = result_set.filter(
affiliations__department__icontains=department_res
)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(affiliations=affiliation_res)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(pk=institution_res.pk)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(works__conference=conference_res)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(country=country_res)
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
if filter_form["no_department"]:
result_set = result_set.filter(affiliations__department="")
if filter_form["ordering"] == "n_dsc":
result_set = result_set.order_by("-n_authors")
elif filter_form["ordering"] == "n_asc":
result_set = result_set.order_by("n_authors")
elif filter_form["ordering"] == "a":
result_set = result_set.order_by("affiliations__institution__name")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
result_set = base_result_set
else:
# Otherwise default to sorting by n_dsc
result_set = result_set.order_by("-n_authors")
return result_set.distinct()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["redirect_url"] = reverse("author_institution_list")
return context
class InstitutionEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Institution
template_name = "generic_form.html"
fields = ["name", "city", "state_province_region", "country"]
extra_context = {
"form_title": "Edit institution",
"cancel_view": "full_institution_list",
"merge_view": "institution_merge",
}
success_message = "%(name)s updated"
success_url = reverse_lazy("full_institution_list")
def form_valid(self, form):
response = super(InstitutionEdit, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class InstitutionCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Institution
template_name = "generic_form.html"
fields = ["name", "city", "state_province_region", "country"]
extra_context = {
"form_title": "Create institution",
"cancel_view": "full_institution_list",
}
success_message = "%(name)s created"
success_url = reverse_lazy("full_institution_list")
def form_valid(self, form):
response = super(InstitutionCreate, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
@user_is_staff
@transaction.atomic
def institution_merge(request, institution_id):
institution = get_object_or_404(Institution, pk=institution_id)
context = {"merging": institution, "institution_merge_form": InstitutionMergeForm}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this institution.
"""
return render(request, "institution_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = InstitutionMergeForm(request.POST)
if raw_form.is_valid():
target_institution = raw_form.cleaned_data["into"]
if institution == target_institution:
"""
If the user chooses the existing institution, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an institution into itself. Please select a different institution.",
)
return redirect("institution_merge", institution_id=institution_id)
else:
old_institution_id = str(institution)
merge_results = institution.merge(target_institution)
target_institution.user_last_updated = request.user
target_institution.save()
messages.success(
request,
f"Author {old_institution_id} has been merged into {target_institution}, and the old institution entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} affiliations updated"
)
return redirect("institution_edit", pk=target_institution.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "institution_merge.html", context)
@user_is_staff
@transaction.atomic
def institution_multi_merge(request):
context = {"form": InstitutionMultiMergeForm}
if request.method == "POST":
raw_form = InstitutionMultiMergeForm(request.POST)
if raw_form.is_valid():
target_institution = raw_form.cleaned_data["into"]
source_institutions = raw_form.cleaned_data["sources"].exclude(
pk=target_institution.pk
)
for institution in source_institutions:
old_institution_id = str(institution)
merge_results = institution.merge(target_institution)
target_institution.user_last_updated = request.user
target_institution.save()
messages.success(
request,
f"Institution {old_institution_id} has been merged into {target_institution}, and the old institution entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} institutions updated"
)
return redirect("institution_edit", pk=target_institution.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "institution_multi_merge.html", context)
class AffiliationEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Affiliation
template_name = "generic_form.html"
form_class = AffiliationEditForm
extra_context = {
"form_title": "Edit affiliation",
"cancel_view": "full_institution_list",
"merge_view": "affiliation_merge",
}
success_message = "%(department)s updated"
success_url = reverse_lazy("full_institution_list")
class AffiliationCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Affiliation
template_name = "generic_form.html"
form_class = AffiliationEditForm
extra_context = {
"form_title": "Create affiliation",
"cancel_view": "full_institution_list",
}
success_message = "%(department)s created"
success_url = reverse_lazy("full_institution_list")
def get_initial(self, **kwargs):
super().get_initial(**kwargs)
if "institution" in self.request.GET:
self.initial = {"institution": int(self.request.GET["institution"])}
return self.initial
@login_required
def ajax_affiliation_create(request):
newaff = Affiliation.objects.get_or_create(
department=request.POST["department"],
institution=Institution.objects.get(pk=int(request.POST["institution"])),
)[0]
return JsonResponse({"name": str(newaff), "id": newaff.pk})
@user_is_staff
@transaction.atomic
def affiliation_merge(request, affiliation_id):
affiliation = get_object_or_404(Affiliation, pk=affiliation_id)
context = {"merging": affiliation, "affiliation_merge_form": AffiliationMergeForm}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this affiliation.
"""
return render(request, "affiliation_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = AffiliationMergeForm(request.POST)
if raw_form.is_valid():
target_affiliation = raw_form.cleaned_data["into"]
if affiliation == target_affiliation:
"""
If the user chooses the existing affiliation, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an affiliation into itself. Please select a different affiliation.",
)
return redirect("affiliation_merge", affiliation_id=affiliation_id)
else:
old_affiliation_id = str(affiliation)
merge_results = affiliation.merge(target_affiliation)
messages.success(
request,
f"Affiliation {old_affiliation_id} has been merged into {target_affiliation}, and the old affiliation entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} affiliations updated"
)
return redirect("affiliation_edit", pk=target_affiliation.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "affiliation_merge.html", context)
@user_is_staff
@transaction.atomic
def affiliation_multi_merge(request):
context = {"form": AffiliationMultiMergeForm}
if request.method == "POST":
raw_form = AffiliationMultiMergeForm(request.POST)
if raw_form.is_valid():
target_affiliation = raw_form.cleaned_data["into"]
source_affiliations = raw_form.cleaned_data["sources"].exclude(
pk=target_affiliation.pk
)
for affiliation in source_affiliations:
old_affiliation_id = str(affiliation)
merge_results = affiliation.merge(target_affiliation)
messages.success(
request,
f"Affiliation {old_affiliation_id} has been merged into {target_affiliation}, and the old affiliation entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} affiliations updated"
)
return redirect("affiliation_edit", pk=target_affiliation.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "affiliation_multi_merge.html", context)
@user_is_staff
@transaction.atomic
def wipe_unused(request):
deletion_dict = {
"Author": Author.objects.exclude(authorships__isnull=False).distinct(),
"Affiliation": Affiliation.objects.exclude(
asserted_by__isnull=False
).distinct(),
"Institution": Institution.objects.exclude(
Q(affiliations__asserted_by__isnull=False) | Q(conferences__isnull=False)
).distinct(),
"Keyword": Keyword.objects.exclude(works__isnull=False).distinct(),
"Appellation": Appellation.objects.exclude(
asserted_by__isnull=False
).distinct(),
}
if request.method == "POST":
for k, v in deletion_dict.items():
res = v.delete()
if res[0] > 0:
messages.success(request, f"{k}: {res[0]} objects deleted")
any_hanging_items = any([v.exists() for k, v in deletion_dict.items()])
context = {"deletions": deletion_dict, "hanging_items": any_hanging_items}
return render(request, "wipe_unused.html", context)
class ConferenceCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = Conference
template_name = "conference_create.html"
form_class = ConferenceForm
extra_context = {
"form_title": "Create conference",
"cancel_view": "conference_list",
}
success_message = "Conference '%(year)s - %(short_title)s' created"
@transaction.atomic
def post(self, request, *args, **kwargs):
response = super().post(request, *args, **kwargs)
form_instance = self.get_form()
if form_instance.is_valid():
for organizer in form_instance.cleaned_data["organizers"]:
self.object.organizers.add(organizer)
self.object.save()
return response
if "goto_abstracts" in request.POST:
return redirect(reverse("work_list") + f"?conference={self.object.id}")
else:
for err in form_instance.errors:
messages.error(request, err)
return response
@user_is_staff
@transaction.atomic
def ConferenceEdit(request, pk):
conference = get_object_or_404(Conference, pk=pk)
# populate the conference form, including pulling in the related organizers
conference_dict = model_to_dict(conference)
conference_dict["organizers"] = conference.organizers.all()
form = ConferenceForm(initial=conference_dict)
ConferenceSeriesFormSet = formset_factory(
ConferenceSeriesInline, can_delete=True, extra=0
)
initial_series = [
{"series": memb.series, "number": memb.number}
for memb in SeriesMembership.objects.filter(conference=conference).all()
]
context = {
"conference": conference,
"form": form,
# "licenses": License.objects.all(),
"series_membership_form": ConferenceSeriesFormSet(initial=initial_series),
"form_title": "Edit conference",
"cancel_view": "conference_list",
}
if request.method == "POST":
form = ConferenceForm(data=request.POST, instance=conference)
if form.is_valid():
clean_form = form.cleaned_data
conference.year = clean_form["year"]
conference.short_title = clean_form["short_title"]
conference.notes = clean_form["notes"]
conference.url = clean_form["url"]
# Clear existing relations and update according to the form
conference.organizers.clear()
for organizer in clean_form["organizers"]:
conference.organizers.add(organizer)
conference.hosting_institutions.clear()
for hosting_institution in clean_form["hosting_institutions"]:
conference.hosting_institutions.add(hosting_institution)
conference.save()
# License action
license_action = clean_form["license_action"]
if license_action == "":
pass
elif license_action == "clear":
conference.works.all().update(full_text_license=None)
else:
license_object = License.objects.get(id=int(license_action))
conference.works.all().update(full_text_license=license_object)
series_forms = ConferenceSeriesFormSet(data=request.POST)
if series_forms.is_valid():
# Delete memberships first
for d_form in series_forms.deleted_forms:
d_form_data = d_form.cleaned_data
SeriesMembership.objects.filter(
conference=conference,
series=d_form_data["series"],
number=d_form_data["number"],
).delete()
# Then update new ones
for s_form in series_forms.forms:
if s_form not in series_forms.deleted_forms:
s_form_data = s_form.cleaned_data
SeriesMembership.objects.update_or_create(
conference=conference,
series=s_form_data["series"],
defaults={"number": s_form_data["number"]},
)
messages.success(request, f"Conference {conference} updated.")
if "goto_abstracts" in request.POST:
return redirect(
reverse("work_list") + f"?conference={conference.id}"
)
if "goto_series" in request.POST:
first_series = conference.series.first()
if first_series is None:
return redirect("standalone_conferences")
else:
return redirect("conference_series_detail", pk=first_series.id)
return redirect("conference_edit", pk=conference.pk)
else:
for f, e in series_forms.errors.items():
messages.error(request, f"{f}: {e}")
else:
for f, e in form.errors.items():
messages.error(request, f"{f}: {e}")
return render(request, "conference_edit.html", context)
class ConferenceDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Conference
template_name = "conference_delete.html"
extra_context = {
"form_title": "Delete conference",
"cancel_view": "conference_list",
}
success_message = "Conference deleted"
success_url = reverse_lazy("conference_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(ConferenceDelete, self).delete(request, *args, **kwargs)
class ConferenceXMLLoad(StaffRequiredMixin, DetailView):
model = Conference
template_name = "conference_xml_load.html"
extra_context = {"form": ConferenceXMLUploadForm()}
@transaction.atomic
def post(self, request, *args, **kwargs):
raw_form = ConferenceXMLUploadForm(request.POST, request.FILES)
conference = self.get_object()
if raw_form.is_valid():
with TemporaryDirectory() as upload_dir:
# Write uploaded zip to tempdir
with NamedTemporaryFile(dir=upload_dir, suffix=".zip") as tei_zip:
with open(tei_zip.name, "wb") as upload_zip:
for chunk in request.FILES["file"]:
upload_zip.write(chunk)
if not zipfile.is_zipfile(tei_zip.name):
messages.error(request, "That is not a valid zipfile.")
return render(
request,
"conference_xml_load.html",
{
"object": self.get_object(),
"form": ConferenceXMLUploadForm(),
},
)
# Extract all the files within
with zipfile.ZipFile(tei_zip.name) as zip_ref:
zip_ref.extractall(upload_dir)
# Import all XML
import_results = conference.import_xml_directory(upload_dir)
n_success = len(import_results["successful_files"])
n_failed = len(import_results["failed_files"])
messages.info(
request,
f"{n_success} of {n_success + n_failed} files valid.",
)
for err in import_results["failed_files"]:
messages.error(
request, f"{basename(err['filepath'])}: {err['error']}"
)
if n_failed == 0:
messages.success(request, f"All files imported successfully.")
else:
messages.info(
request,
"Please fix errors or remove malformed files, and re-upload zip. All TEI documents must be valid in order to complete the import.",
)
return render(
request,
"conference_xml_load.html",
{"object": self.get_object(), "form": ConferenceXMLUploadForm()},
)
else:
for f, e in raw_form.errors.items():
messages.error(request, f"{f}: {e}")
return render(
request,
"conference_xml_load.html",
{"object": self.get_object(), "form": ConferenceXMLUploadForm()},
)
@login_required
@transaction.atomic
def conference_checkout(request, conference_id):
conference = get_object_or_404(Conference, pk=conference_id)
if request.method == "GET":
"""
Load the current form and display current attached user
"""
context = {
"conference": conference,
"form": ConferenceCheckoutForm(
{"entry_status": conference.entry_status, "editing_user": "self"}
),
}
return render(request, "conference_checkout.html", context)
elif request.method == "POST":
"""
Get the form and update the status if the user has the authority to do so
"""
raw_form = ConferenceCheckoutForm(request.POST)
if raw_form.is_valid():
clean_form = raw_form.cleaned_data
if clean_form["entry_status"] == "c" and not request.user.is_staff:
messages.error(
request,
"Only an administrator can mark this conference as completed.",
)
return redirect("conference_checkout", conference_id=conference.id)
else:
if clean_form["assign_user"] == "self":
conference.entry_status = clean_form["entry_status"]
conference.editing_user = request.user
conference.save()
messages.success(request, "Conference checked out")
elif clean_form["assign_user"] == "clear":
conference.entry_status = clean_form["entry_status"]
conference.editing_user = None
conference.save()
messages.success(request, "Conference cleared")
return redirect(reverse("work_list") + f"?conference={conference.id}")
class SeriesCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Create conference series",
"cancel_view": "conference_list",
}
fields = ["title", "abbreviation", "notes"]
success_message = "Series '%(title)s' created"
success_url = reverse_lazy("conference_list")
class SeriesEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Update conference series",
"cancel_view": "conference_list",
}
fields = ["title", "abbreviation", "notes"]
success_message = "Series '%(title)s' updated"
success_url = reverse_lazy("conference_list")
class SeriesDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete conference series",
"cancel_view": "conference_list",
}
success_message = "Series '%(title)s' deleted"
success_url = reverse_lazy("conference_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(SeriesDelete, self).delete(request, *args, **kwargs)
class OrganizerCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Create conference organizer",
"cancel_view": "full_organizer_list",
}
fields = ["name", "abbreviation", "conferences_organized", "notes", "url"]
success_message = "Organizer '%(name)s' created"
success_url = reverse_lazy("full_organizer_list")
def form_valid(self, form):
response = super(OrganizerCreate, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class OrganizerEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Update conference organizer",
"cancel_view": "full_organizer_list",
}
fields = ["name", "abbreviation", "conferences_organized", "notes", "url"]
success_message = "Organizer '%(name)s' updated"
success_url = reverse_lazy("full_organizer_list")
def form_valid(self, form):
response = super(OrganizerEdit, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class OrganizerDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete organizer",
"cancel_view": "full_organizer_list",
}
success_message = "Organizer %(name)s deleted."
success_url = reverse_lazy("full_organizer_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(OrganizerDelete, self).delete(request, *args, **kwargs)
class OrganizerList(LoginRequiredMixin, ListView):
model = Organizer
template_name = "full_organizer_list.html"
context_object_name = "organizer_list"
class KeywordCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Keyword
template_name = "generic_form.html"
extra_context = {"form_title": "Create keyword", "cancel_view": "full_keyword_list"}
fields = ["title"]
success_message = "Keyword '%(title)s' created"
success_url = reverse_lazy("full_keyword_list")
class KeywordDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Keyword
template_name = "generic_form.html"
extra_context = {"form_title": "Delete keyword", "cancel_view": "full_keyword_list"}
success_message = "Keyword '%(title)s' deleted"
success_url = reverse_lazy("full_keyword_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(KeywordDelete, self).delete(request, *args, **kwargs)
class KeywordEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Keyword
template_name = "generic_form.html"
extra_context = {
"form_title": "Update keyword",
"cancel_view": "full_keyword_list",
"merge_view": "keyword_merge",
"delete_view": "keyword_delete",
}
fields = ["title"]
success_message = "Keyword '%(title)s' updated"
success_url = reverse_lazy("full_keyword_list")
class KeywordList(LoginRequiredMixin, ListView):
model = Keyword
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Keywords",
"tag_edit_view": "keyword_edit",
"tag_create_view": "keyword_create",
"tag_list_view": "full_keyword_list",
"multi_merge": "keyword_multi_merge",
"filter_param_name": "keywords",
}
def get_queryset(self):
base_results_set = Keyword.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
if self.request.GET:
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(
title__icontains=filter_form["name"]
)
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
else:
results_set = results_set.order_by("title")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tag_filter_form"] = TagForm(initial=self.request.GET)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Keyword.objects.count()
return context
@user_is_staff
@transaction.atomic
def keyword_merge(request, keyword_id):
keyword = get_object_or_404(Keyword, pk=keyword_id)
affected_works = Work.objects.filter(keywords=keyword).all()
sample_works = affected_works[:15]
count_elements = affected_works.count() - 15
context = {
"merging": keyword,
"tag_merge_form": KeywordMergeForm,
"sample_elements": sample_works,
"tag_category": "Keyword",
"merge_view": "keyword_merge",
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this keyword.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = KeywordMergeForm(request.POST)
if raw_form.is_valid():
target_keyword = raw_form.cleaned_data["into"]
if keyword == target_keyword:
"""
If the user chooses the existing keyword, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a keyword into itself. Please select a different keyword.",
)
return redirect("keyword_merge", keyword_id=keyword_id)
else:
old_keyword_id = str(keyword)
merge_results = keyword.merge(target_keyword)
messages.success(
request,
f"Keyword {old_keyword_id} has been merged into {target_keyword}, and the old keyword entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} keywords updated"
)
return redirect("keyword_edit", pk=target_keyword.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
@user_is_staff
@transaction.atomic
def keyword_multi_merge(request):
context = {
"tag_merge_form": KeywordMultiMergeForm,
"tag_category": "Keyword",
"multi_merge_view": "keyword_multi_merge",
}
if request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = KeywordMultiMergeForm(request.POST)
if raw_form.is_valid():
target_keyword = raw_form.cleaned_data["into"]
source_keywords = raw_form.cleaned_data["sources"].exclude(
pk=target_keyword.pk
)
for keyword in source_keywords:
old_keyword_id = keyword.title
merge_results = keyword.merge(target_keyword)
messages.success(
request,
f"Keyword {old_keyword_id} has been merged into {target_keyword}, and the old keyword entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} keywords updated"
)
return redirect("keyword_edit", pk=target_keyword.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_multi_merge.html", context)
class TopicCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Topic
template_name = "generic_form.html"
extra_context = {"form_title": "Create topic", "cancel_view": "full_topic_list"}
fields = ["title"]
success_message = "Topic '%(title)s' created"
success_url = reverse_lazy("full_topic_list")
class TopicDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Topic
template_name = "generic_form.html"
extra_context = {"form_title": "Delete topic", "cancel_view": "full_topic_list"}
success_message = "Topic '%(title)s' deleted"
success_url = reverse_lazy("full_topic_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(TopicDelete, self).delete(request, *args, **kwargs)
class TopicEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Topic
template_name = "generic_form.html"
extra_context = {
"form_title": "Update topic",
"cancel_view": "full_topic_list",
"merge_view": "topic_merge",
"delete_view": "topic_delete",
}
fields = ["title"]
success_message = "Topic '%(title)s' updated"
success_url = reverse_lazy("full_topic_list")
class TopicList(LoginRequiredMixin, ListView):
model = Topic
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Topics",
"tag_edit_view": "topic_edit",
"tag_create_view": "topic_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_topic_list",
"multi_merge": "topic_multi_merge",
"filter_param_name": "topics",
}
def get_queryset(self):
base_results_set = Topic.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Topic.objects.count()
return context
@user_is_staff
@transaction.atomic
def topic_merge(request, topic_id):
topic = get_object_or_404(Topic, pk=topic_id)
affected_elements = topic.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": topic,
"tag_merge_form": TopicMergeForm,
"tag_category": "Topic",
"merge_view": "topic_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this topic.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = TopicMergeForm(request.POST)
if raw_form.is_valid():
target_topic = raw_form.cleaned_data["into"]
if topic == target_topic:
"""
If the user chooses the existing topic, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a topic into itself. Please select a different topic.",
)
return redirect("topic_merge", topic_id=topic_id)
else:
old_topic_id = str(topic)
merge_results = topic.merge(target_topic)
messages.success(
request,
f"Topic {old_topic_id} has been merged into {target_topic}, and the old topic entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} topics updated"
)
return redirect("topic_edit", pk=target_topic.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
@user_is_staff
@transaction.atomic
def topic_multi_merge(request):
context = {
"tag_merge_form": TopicMultiMergeForm,
"tag_category": "Topic",
"multi_merge_view": "topic_multi_merge",
}
if request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = TopicMultiMergeForm(request.POST)
if raw_form.is_valid():
target_topic = raw_form.cleaned_data["into"]
source_topics = raw_form.cleaned_data["sources"].exclude(pk=target_topic.pk)
for topic in source_topics:
old_topic_id = topic.title
merge_results = topic.merge(target_topic)
messages.success(
request,
f"Topic {old_topic_id} has been merged into {target_topic}, and the old topic entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} topics updated"
)
return redirect("topic_edit", pk=target_topic.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_multi_merge.html", context)
class LanguageCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Create language",
"cancel_view": "full_language_list",
}
fields = ["title", "code"]
success_message = "Language '%(title)s' created"
success_url = reverse_lazy("full_language_list")
class LanguageDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete language",
"cancel_view": "full_language_list",
}
success_message = "Language '%(title)s' deleted"
success_url = reverse_lazy("full_language_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(LanguageDelete, self).delete(request, *args, **kwargs)
class LanguageEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Update language",
"cancel_view": "full_language_list",
"merge_view": "language_merge",
"delete_view": "language_delete",
}
fields = ["title", "code"]
success_message = "Language '%(title)s' updated"
success_url = reverse_lazy("full_language_list")
class LanguageList(LoginRequiredMixin, ListView):
model = Language
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Languages",
"tag_edit_view": "language_edit",
"tag_create_view": "language_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_language_list",
"filter_param_name": "languages",
}
def get_queryset(self):
base_results_set = Language.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Language.objects.count()
return context
@user_is_staff
@transaction.atomic
def language_merge(request, language_id):
language = get_object_or_404(Language, pk=language_id)
affected_elements = language.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": language,
"tag_merge_form": LanguageMergeForm,
"tag_category": "Language",
"merge_view": "language_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this language.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = LanguageMergeForm(request.POST)
if raw_form.is_valid():
target_language = raw_form.cleaned_data["into"]
if language == target_language:
"""
If the user chooses the existing language, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a language into itself. Please select a different language.",
)
return redirect("language_merge", language_id=language_id)
else:
old_language_id = str(language)
merge_results = language.merge(target_language)
messages.success(
request,
f"Language {old_language_id} has been merged into {target_language}, and the old language entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} languages updated"
)
return redirect("language_edit", pk=target_language.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
class WorkTypeCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Create work_type",
"cancel_view": "full_work_type_list",
}
fields = ["title", "is_parent"]
success_message = "Abstract type '%(title)s' created"
success_url = reverse_lazy("full_work_type_list")
class WorkTypeDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete work_type",
"cancel_view": "full_work_type_list",
}
success_message = "Abstract type '%(title)s' deleted"
success_url = reverse_lazy("full_work_type_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(WorkTypeDelete, self).delete(request, *args, **kwargs)
class WorkTypeEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Update abstract type",
"cancel_view": "full_work_type_list",
"merge_view": "work_type_merge",
"delete_view": "work_type_delete",
}
fields = ["title", "is_parent"]
success_message = "Abstract '%(title)s' updated"
success_url = reverse_lazy("full_work_type_list")
class WorkTypeList(LoginRequiredMixin, ListView):
model = WorkType
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Abstract Types",
"tag_edit_view": "work_type_edit",
"tag_create_view": "work_type_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_work_type_list",
"filter_param_name": "work_type",
}
def get_queryset(self):
base_results_set = WorkType.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = WorkType.objects.count()
return context
@user_is_staff
@transaction.atomic
def work_type_merge(request, work_type_id):
work_type = get_object_or_404(WorkType, pk=work_type_id)
affected_elements = work_type.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": work_type,
"tag_merge_form": WorkTypeMergeForm,
"tag_category": "Abstract Type",
"merge_view": "work_type_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this work_type.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = WorkTypeMergeForm(request.POST)
if raw_form.is_valid():
target_work_type = raw_form.cleaned_data["into"]
if work_type == target_work_type:
"""
If the user chooses the existing work_type, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a work_type into itself. Please select a different work_type.",
)
return redirect("work_type_merge", work_type_id=work_type_id)
else:
old_work_type_id = str(work_type)
merge_results = work_type.merge(target_work_type)
messages.success(
request,
f"WorkType {old_work_type_id} has been merged into {target_work_type}, and the old work_type entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} work_types updated"
)
return redirect("work_type_edit", pk=target_work_type.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
| [
[
[
29,
35
]
],
[
[
60,
72
],
[
15552,
15564
]
],
[
[
74,
86
],
[
39175,
39187
],
[
39689,
39701
],
[
60343,
60355
]
],
[
[
88,
109
]
],
[
[
111,
123
],
[
32591,
32603
],
[
32807,
32819
],
[
33041,
33053
]
],
[
[
152,
158
]
],
[
[
188,
205
],
[
10491,
10508
],
[
12077,
12094
],
[
27801,
27818
],
[
33233,
33250
],
[
34036,
34053
],
[
34800,
34817
],
[
38580,
38597
],
[
39323,
39340
],
[
55664,
55681
],
[
60499,
60516
],
[
65797,
65814
],
[
73507,
73524
],
[
81950,
81967
],
[
88254,
88271
],
[
94578,
94595
],
[
99725,
99742
]
],
[
[
207,
213
],
[
11980,
11986
],
[
14048,
14054
],
[
26645,
26651
],
[
27668,
27674
],
[
28101,
28107
],
[
29642,
29648
],
[
32411,
32417
],
[
33930,
33936
],
[
34666,
34672
],
[
38418,
38424
],
[
55981,
55987
],
[
57613,
57619
],
[
58953,
58959
],
[
60816,
60822
],
[
62343,
62349
],
[
63573,
63579
],
[
64638,
64644
],
[
69835,
69841
],
[
71359,
71365
],
[
72922,
72928
],
[
73230,
73236
],
[
73901,
73907
],
[
82530,
82536
],
[
83964,
83970
],
[
85340,
85346
],
[
88851,
88857
],
[
90243,
90249
],
[
91547,
91553
],
[
95199,
95205
],
[
96654,
96660
],
[
100356,
100362
],
[
101830,
101836
]
],
[
[
215,
223
],
[
3673,
3681
],
[
3922,
3930
],
[
15203,
15211
],
[
15341,
15349
],
[
28820,
28828
],
[
29466,
29474
],
[
33725,
33733
],
[
34373,
34381
],
[
37598,
37606
],
[
37985,
37993
],
[
38113,
38121
],
[
56736,
56744
],
[
57436,
57444
],
[
58783,
58791
],
[
61571,
61579
],
[
62166,
62174
],
[
63403,
63411
],
[
65509,
65517
],
[
69064,
69072
],
[
69361,
69369
],
[
69453,
69461
],
[
69533,
69541
],
[
74497,
74505
],
[
75186,
75194
],
[
83248,
83256
],
[
83795,
83803
],
[
85178,
85186
],
[
89555,
89563
],
[
90078,
90086
],
[
91389,
91397
],
[
95924,
95932
],
[
96483,
96491
],
[
101087,
101095
],
[
101657,
101665
],
[
3264,
3272
],
[
3495,
3503
]
],
[
[
249,
253
],
[
25177,
25181
]
],
[
[
287,
297
],
[
14115,
14125
],
[
15404,
15414
],
[
23585,
23595
],
[
70471,
70481
]
],
[
[
299,
307
],
[
15778,
15786
],
[
23019,
23027
],
[
40147,
40155
],
[
47935,
47943
],
[
78530,
78538
],
[
80012,
80020
],
[
86704,
86712
],
[
93039,
93047
],
[
98175,
98183
]
],
[
[
343,
348
],
[
5072,
5077
],
[
5449,
5454
],
[
5851,
5856
],
[
6239,
6244
],
[
6832,
6837
],
[
7664,
7669
],
[
9547,
9552
],
[
10214,
10219
],
[
10263,
10268
],
[
16030,
16035
],
[
16510,
16515
],
[
19840,
19845
],
[
20537,
20542
],
[
20687,
20692
],
[
20809,
20814
],
[
20929,
20934
],
[
21917,
21922
],
[
21964,
21969
],
[
22042,
22047
],
[
22118,
22123
],
[
24529,
24534
],
[
24586,
24591
],
[
25587,
25592
],
[
25644,
25649
],
[
46932,
46937
],
[
46997,
47002
],
[
48173,
48178
],
[
48314,
48319
],
[
48834,
48839
],
[
51656,
51661
],
[
51741,
51746
],
[
80545,
80550
],
[
87257,
87262
],
[
93569,
93574
],
[
98713,
98718
]
],
[
[
354,
357
],
[
9611,
9614
],
[
9671,
9674
],
[
12497,
12500
],
[
12796,
12799
],
[
16454,
16457
],
[
20642,
20645
],
[
21882,
21885
],
[
26826,
26829
]
],
[
[
363,
366
],
[
20596,
20599
],
[
21849,
21852
],
[
26840,
26843
]
],
[
[
372,
373
],
[
4814,
4815
],
[
4848,
4849
],
[
6434,
6435
],
[
6467,
6468
],
[
8034,
8035
],
[
8068,
8069
],
[
20728,
20729
],
[
20850,
20851
],
[
20970,
20971
],
[
21983,
21984
],
[
22061,
22062
],
[
22137,
22138
],
[
27123,
27124
],
[
27200,
27201
],
[
27257,
27258
],
[
42904,
42905
],
[
63971,
63972
],
[
64016,
64017
]
],
[
[
379,
380
],
[
21049,
21050
],
[
21078,
21079
],
[
21104,
21105
],
[
21133,
21134
],
[
21191,
21192
],
[
21229,
21230
],
[
21331,
21332
],
[
21384,
21385
],
[
21484,
21485
],
[
21535,
21536
],
[
21635,
21636
],
[
21686,
21687
],
[
42651,
42652
]
],
[
[
386,
394
],
[
10610,
10618
],
[
10746,
10754
],
[
10866,
10874
],
[
11067,
11075
],
[
11187,
11195
],
[
11702,
11710
],
[
12277,
12285
],
[
12601,
12609
],
[
12913,
12921
],
[
12979,
12987
],
[
13343,
13351
],
[
13597,
13605
],
[
45369,
45377
],
[
45515,
45523
],
[
45922,
45930
],
[
48431,
48439
]
],
[
[
400,
408
],
[
18650,
18658
],
[
18801,
18809
],
[
18964,
18972
],
[
19128,
19136
],
[
19357,
19365
],
[
19569,
19577
],
[
19712,
19720
],
[
24655,
24663
],
[
44166,
44174
],
[
44480,
44488
]
],
[
[
414,
422
],
[
18487,
18495
],
[
24137,
24145
],
[
43407,
43415
]
],
[
[
428,
445
],
[
42856,
42873
]
],
[
[
451,
461
],
[
21208,
21218
],
[
21249,
21259
],
[
21351,
21361
],
[
21404,
21414
],
[
21502,
21512
],
[
21555,
21565
],
[
21653,
21663
],
[
21706,
21716
]
],
[
[
467,
479
],
[
42947,
42959
]
],
[
[
522,
528
]
],
[
[
530,
540
]
],
[
[
542,
546
],
[
21186,
21190
],
[
21224,
21228
],
[
21326,
21330
],
[
21379,
21383
],
[
21479,
21483
],
[
21530,
21534
],
[
21630,
21634
],
[
21681,
21685
]
],
[
[
571,
581
]
],
[
[
616,
625
]
],
[
[
627,
643
]
],
[
[
645,
654
]
],
[
[
694,
702
]
],
[
[
746,
756
],
[
42611,
42621
]
],
[
[
758,
769
],
[
42411,
42422
]
],
[
[
817,
826
],
[
8754,
8763
],
[
10323,
10332
],
[
24339,
24348
],
[
25397,
25406
],
[
44885,
44894
],
[
45116,
45125
],
[
47073,
47082
]
],
[
[
851,
858
],
[
3685,
3692
],
[
13779,
13786
],
[
20409,
20416
],
[
30040,
30047
],
[
30161,
30168
],
[
30222,
30229
],
[
38018,
38025
],
[
47843,
47850
],
[
51390,
51397
],
[
54194,
54201
],
[
65518,
65525
],
[
69098,
69105
],
[
75195,
75202
],
[
3276,
3283
]
],
[
[
860,
872
],
[
39916,
39928
],
[
54680,
54692
],
[
55314,
55326
],
[
59415,
59427
],
[
59819,
59831
],
[
70205,
70217
],
[
75631,
75643
],
[
76042,
76054
],
[
76407,
76419
],
[
77040,
77052
],
[
77698,
77710
],
[
78265,
78277
],
[
78971,
78983
],
[
79300,
79312
],
[
79938,
79950
],
[
85697,
85709
],
[
86014,
86026
],
[
86634,
86646
],
[
91950,
91962
],
[
92308,
92320
],
[
92963,
92975
],
[
97063,
97075
],
[
97429,
97441
],
[
98098,
98110
]
],
[
[
900,
908
],
[
3848,
3856
],
[
15029,
15037
],
[
15272,
15280
],
[
19956,
19964
],
[
28632,
28640
],
[
29108,
29116
],
[
29321,
29329
],
[
29592,
29600
],
[
33656,
33664
],
[
33850,
33858
],
[
34288,
34296
],
[
34494,
34502
],
[
37437,
37445
],
[
37809,
37817
],
[
38235,
38243
],
[
39999,
40007
],
[
46347,
46355
],
[
50923,
50931
],
[
53814,
53822
],
[
56542,
56550
],
[
57066,
57074
],
[
57290,
57298
],
[
57563,
57571
],
[
58412,
58420
],
[
58641,
58649
],
[
58910,
58918
],
[
61377,
61385
],
[
61791,
61799
],
[
62020,
62028
],
[
62293,
62301
],
[
63032,
63040
],
[
63261,
63269
],
[
63530,
63538
],
[
64410,
64418
],
[
65649,
65657
],
[
68921,
68929
],
[
69674,
69682
],
[
69786,
69794
],
[
70294,
70302
],
[
71272,
71280
],
[
72127,
72135
],
[
72363,
72371
],
[
72551,
72559
],
[
72664,
72672
],
[
73174,
73182
],
[
74327,
74335
],
[
74822,
74830
],
[
75115,
75123
],
[
76496,
76504
],
[
78358,
78366
],
[
79391,
79399
],
[
81394,
81402
],
[
83063,
83071
],
[
83440,
83448
],
[
83653,
83661
],
[
83914,
83922
],
[
84827,
84835
],
[
85040,
85048
],
[
85297,
85305
],
[
86103,
86111
],
[
89374,
89382
],
[
89733,
89741
],
[
89938,
89946
],
[
90193,
90201
],
[
91048,
91056
],
[
91253,
91261
],
[
91504,
91512
],
[
92400,
92408
],
[
95737,
95745
],
[
96123,
96131
],
[
96340,
96348
],
[
96604,
96612
],
[
97522,
97530
],
[
100898,
100906
],
[
101293,
101301
],
[
101513,
101521
],
[
101780,
101788
],
[
3426,
3434
]
],
[
[
951,
970
],
[
39759,
39778
],
[
54296,
54315
],
[
54971,
54990
],
[
59054,
59073
],
[
59499,
59518
],
[
64728,
64747
],
[
69929,
69948
],
[
75291,
75310
],
[
75702,
75721
],
[
76115,
76134
],
[
76667,
76686
],
[
77325,
77344
],
[
77983,
78002
],
[
78695,
78714
],
[
79047,
79066
],
[
79559,
79578
],
[
85429,
85448
],
[
85769,
85788
],
[
86267,
86286
],
[
91639,
91658
],
[
92028,
92047
],
[
92570,
92589
],
[
96740,
96759
],
[
97142,
97161
],
[
97692,
97711
]
],
[
[
1010,
1028
],
[
15416,
15434
],
[
39739,
39757
],
[
47915,
47933
],
[
54276,
54294
],
[
54951,
54969
],
[
59034,
59052
],
[
59479,
59497
],
[
78510,
78528
],
[
78675,
78693
],
[
79992,
80010
],
[
85409,
85427
],
[
86247,
86265
],
[
86684,
86702
],
[
91619,
91637
],
[
93019,
93037
],
[
98155,
98173
]
],
[
[
1030,
1049
]
],
[
[
1093,
1107
],
[
32867,
32881
],
[
33101,
33115
],
[
33978,
33992
],
[
34712,
34726
],
[
38476,
38490
],
[
39204,
39218
],
[
60093,
60107
],
[
73406,
73420
]
],
[
[
1109,
1125
]
],
[
[
1155,
1174
],
[
3974,
3993
]
],
[
[
1213,
1223
],
[
54992,
55002
],
[
59520,
59530
],
[
64749,
64759
],
[
75312,
75322
],
[
76688,
76698
],
[
78716,
78726
],
[
85450,
85460
],
[
91660,
91670
],
[
96761,
96771
]
],
[
[
1225,
1235
],
[
39780,
39790
],
[
69950,
69960
],
[
76136,
76146
],
[
78004,
78014
],
[
79068,
79078
],
[
85790,
85800
],
[
92049,
92059
],
[
97163,
97173
]
],
[
[
1237,
1247
],
[
54317,
54327
],
[
59075,
59085
],
[
75723,
75733
],
[
77346,
77356
],
[
79580,
79590
],
[
86288,
86298
],
[
92591,
92601
],
[
97713,
97723
]
],
[
[
1270,
1281
],
[
14520,
14531
],
[
27725,
27736
],
[
34728,
34739
],
[
55579,
55590
],
[
57682,
57693
],
[
60414,
60425
],
[
62412,
62423
],
[
63648,
63659
],
[
65053,
65064
],
[
65728,
65739
],
[
70616,
70627
],
[
73422,
73433
],
[
81877,
81888
],
[
84025,
84036
],
[
88187,
88198
],
[
90304,
90315
],
[
94502,
94513
],
[
99646,
99657
]
],
[
[
1283,
1297
],
[
37392,
37406
]
],
[
[
1330,
1343
],
[
34556,
34569
],
[
65936,
65949
]
],
[
[
1369,
1384
],
[
34905,
34920
],
[
66107,
66122
]
],
[
[
1386,
1407
]
],
[
[
1409,
1429
]
],
[
[
1454,
1462
],
[
29987,
29995
],
[
30109,
30117
],
[
30324,
30332
],
[
30352,
30360
],
[
31999,
32007
],
[
32362,
32370
],
[
32509,
32517
],
[
32537,
32545
],
[
32711,
32719
],
[
32739,
32747
],
[
32944,
32952
],
[
32972,
32980
],
[
3014,
3022
]
],
[
[
1493,
1504
],
[
9941,
9952
]
],
[
[
1547,
1557
],
[
3003,
3013
]
],
[
[
1565,
1569
]
],
[
[
1590,
1598
],
[
72419,
72427
]
],
[
[
1600,
1608
],
[
30312,
30320
],
[
31987,
31995
]
],
[
[
1630,
1638
],
[
30280,
30288
],
[
31955,
31963
]
],
[
[
1646,
1649
]
],
[
[
1657,
1660
]
],
[
[
1682,
1692
],
[
30456,
30466
]
],
[
[
1714,
1732
],
[
70947,
70965
]
],
[
[
1734,
1752
],
[
70841,
70859
]
],
[
[
1760,
1767
],
[
71214,
71221
],
[
71752,
71759
]
],
[
[
1782,
1788
],
[
30479,
30485
]
],
[
[
1816,
1820
],
[
15449,
15453
],
[
39805,
39809
],
[
4205,
4209
],
[
10518,
10522
],
[
10815,
10819
],
[
11136,
11140
],
[
12303,
12307
],
[
13129,
13133
],
[
26899,
26903
],
[
34054,
34058
],
[
34818,
34822
],
[
40309,
40313
],
[
47718,
47722
],
[
82013,
82017
]
],
[
[
1826,
1834
],
[
96786,
96794
],
[
97188,
97196
],
[
97738,
97746
],
[
98198,
98206
],
[
98622,
98630
],
[
99580,
99588
],
[
99743,
99751
]
],
[
[
1840,
1846
],
[
14160,
14166
],
[
9502,
9508
],
[
12095,
12101
],
[
14764,
14770
],
[
15944,
15950
],
[
20352,
20358
],
[
26940,
26946
],
[
27819,
27825
],
[
36693,
36699
],
[
38598,
38604
],
[
63733,
63739
]
],
[
[
1852,
1862
],
[
64774,
64784
],
[
69975,
69985
],
[
70496,
70506
],
[
8701,
8711
],
[
10169,
10179
],
[
13408,
13418
],
[
23343,
23353
],
[
23768,
23778
],
[
25299,
25309
],
[
26740,
26750
],
[
26786,
26796
],
[
33251,
33261
],
[
45450,
45460
],
[
46821,
46831
],
[
65815,
65825
],
[
73525,
73535
]
],
[
[
1868,
1879
],
[
54342,
54353
],
[
55017,
55028
],
[
6778,
6789
],
[
13029,
13040
],
[
27028,
27039
],
[
48260,
48271
],
[
51251,
51262
],
[
51604,
51615
],
[
55682,
55693
],
[
60261,
60272
],
[
63930,
63941
]
],
[
[
1885,
1896
],
[
4724,
4735
],
[
12392,
12403
],
[
36257,
36268
],
[
64166,
64177
]
],
[
[
1902,
1913
],
[
59100,
59111
],
[
59545,
59556
],
[
7610,
7621
],
[
11769,
11780
],
[
12691,
12702
],
[
38641,
38652
],
[
39341,
39352
],
[
48123,
48134
],
[
60159,
60170
],
[
60517,
60528
],
[
63818,
63829
]
],
[
[
1919,
1935
],
[
23610,
23626
],
[
75337,
75353
],
[
75748,
75764
],
[
76161,
76177
],
[
22925,
22941
]
],
[
[
1941,
1957
],
[
24081,
24097
],
[
45620,
45636
],
[
66285,
66301
],
[
68200,
68216
],
[
68655,
68671
]
],
[
[
1963,
1972
],
[
76713,
76722
],
[
77371,
77380
],
[
78029,
78038
],
[
78553,
78562
]
],
[
[
1978,
1985
],
[
6193,
6200
],
[
27087,
27094
]
],
[
[
1991,
1998
],
[
78741,
78748
],
[
79093,
79100
],
[
79605,
79612
],
[
80035,
80042
],
[
5039,
5046
],
[
64086,
64093
],
[
80455,
80462
],
[
81812,
81819
],
[
81968,
81975
]
],
[
[
2004,
2009
],
[
85475,
85480
],
[
85815,
85820
],
[
86313,
86318
],
[
86727,
86732
],
[
5820,
5825
],
[
87169,
87174
],
[
88124,
88129
],
[
88272,
88277
]
],
[
[
2015,
2023
],
[
91685,
91693
],
[
92074,
92082
],
[
92616,
92624
],
[
93062,
93070
],
[
5415,
5423
],
[
93478,
93486
],
[
94436,
94444
],
[
94596,
94604
]
],
[
[
2029,
2041
]
],
[
[
2047,
2057
],
[
10948,
10958
],
[
11269,
11279
],
[
11505,
11515
],
[
12153,
12163
],
[
13663,
13673
],
[
14316,
14326
],
[
14800,
14810
],
[
18437,
18447
],
[
35837,
35847
],
[
36839,
36849
],
[
43359,
43369
],
[
46004,
46014
]
],
[
[
2063,
2070
],
[
67790,
67797
]
],
[
[
2100,
2110
],
[
40354,
40364
],
[
46543,
46553
],
[
47641,
47651
]
],
[
[
2116,
2128
],
[
16108,
16120
],
[
20271,
20283
]
],
[
[
2134,
2149
],
[
28069,
28084
],
[
28322,
28337
]
],
[
[
2155,
2163
],
[
33322,
33330
],
[
33402,
33410
],
[
33466,
33474
],
[
34126,
34134
],
[
34604,
34612
]
],
[
[
2169,
2187
],
[
34930,
34948
]
],
[
[
2193,
2212
],
[
48609,
48628
],
[
51133,
51152
],
[
51958,
51977
]
],
[
[
2218,
2238
],
[
55779,
55799
],
[
56207,
56227
]
],
[
[
2244,
2263
],
[
59169,
59188
],
[
59614,
59633
],
[
38380,
38399
]
],
[
[
2269,
2289
],
[
60614,
60634
],
[
61042,
61062
]
],
[
[
2295,
2311
],
[
82215,
82231
],
[
82748,
82764
]
],
[
[
2317,
2324
],
[
86971,
86978
],
[
93318,
93325
],
[
98461,
98468
],
[
80621,
80628
],
[
81667,
81674
],
[
87300,
87307
],
[
93612,
93619
],
[
98756,
98763
]
],
[
[
2330,
2344
],
[
88499,
88513
],
[
89069,
89083
]
],
[
[
2350,
2375
],
[
62492,
62517
],
[
62572,
62597
]
],
[
[
2381,
2402
],
[
84120,
84141
],
[
84407,
84428
]
],
[
[
2408,
2422
],
[
64847,
64861
],
[
66037,
66051
],
[
66689,
66703
]
],
[
[
2428,
2450
],
[
73755,
73777
],
[
74115,
74137
]
],
[
[
2456,
2478
],
[
66132,
66154
]
],
[
[
2484,
2501
],
[
94835,
94852
],
[
95417,
95434
]
],
[
[
2507,
2524
],
[
99985,
100002
],
[
100574,
100591
]
],
[
[
2530,
2555
],
[
57762,
57787
],
[
57842,
57867
]
],
[
[
2561,
2580
],
[
90397,
90416
],
[
90678,
90697
]
],
[
[
2586,
2609
],
[
70583,
70606
],
[
70700,
70723
],
[
71591,
71614
],
[
73053,
73076
],
[
73361,
73384
]
],
[
[
2615,
2637
],
[
3879,
3901
],
[
3452,
3474
]
],
[
[
2731,
2745
]
],
[
[
3138,
3151
],
[
27710,
27723
],
[
55564,
55577
],
[
57667,
57680
],
[
60399,
60412
],
[
62397,
62410
],
[
63633,
63646
],
[
65713,
65726
],
[
81862,
81875
],
[
84010,
84023
],
[
88172,
88185
],
[
90289,
90302
],
[
94487,
94500
],
[
99631,
99644
]
],
[
[
3542,
3560
],
[
14127,
14145
],
[
64708,
64726
],
[
69909,
69927
],
[
70451,
70469
],
[
75271,
75289
],
[
75682,
75700
],
[
76095,
76113
],
[
76647,
76665
],
[
77305,
77323
],
[
77963,
77981
],
[
79027,
79045
],
[
79539,
79557
],
[
85749,
85767
],
[
92008,
92026
],
[
92550,
92568
],
[
96720,
96738
],
[
97122,
97140
],
[
97672,
97690
]
],
[
[
3952,
3973
],
[
4111,
4132
],
[
4631,
4652
],
[
4946,
4967
],
[
5322,
5343
],
[
5727,
5748
],
[
6100,
6121
],
[
6671,
6692
],
[
7503,
7524
],
[
8608,
8629
],
[
9409,
9430
]
],
[
[
4094,
4110
]
],
[
[
4607,
4630
]
],
[
[
4926,
4945
]
],
[
[
5301,
5321
]
],
[
[
5709,
5726
]
],
[
[
6080,
6099
]
],
[
[
6647,
6670
]
],
[
[
7479,
7502
]
],
[
[
8585,
8607
]
],
[
[
9390,
9408
]
],
[
[
10115,
10124
]
],
[
[
12031,
12042
]
],
[
[
14103,
14114
]
],
[
[
15396,
15403
]
],
[
[
15767,
15777
]
],
[
[
20461,
20485
],
[
22891,
22915
]
],
[
[
21775,
21797
],
[
23434,
23456
],
[
23973,
23995
],
[
26568,
26590
]
],
[
[
22856,
22876
],
[
23175,
23195
],
[
25108,
25128
],
[
26513,
26533
]
],
[
[
22998,
23018
]
],
[
[
23562,
23584
]
],
[
[
25162,
25176
]
],
[
[
26696,
26705
]
],
[
[
27748,
27765
]
],
[
[
29694,
29708
],
[
31128,
31142
]
],
[
[
29880,
29893
]
],
[
[
32460,
32478
]
],
[
[
32654,
32680
]
],
[
[
32886,
32913
]
],
[
[
33120,
33130
]
],
[
[
33997,
34005
]
],
[
[
34751,
34769
]
],
[
[
38495,
38509
]
],
[
[
39223,
39242
]
],
[
[
39728,
39738
]
],
[
[
40134,
40146
]
],
[
[
47895,
47914
],
[
51476,
51495
]
],
[
[
51454,
51475
]
],
[
[
54260,
54275
],
[
54776,
54791
]
],
[
[
54933,
54950
],
[
55410,
55427
]
],
[
[
55602,
55619
]
],
[
[
57705,
57728
]
],
[
[
59018,
59033
]
],
[
[
59461,
59478
]
],
[
[
60112,
60135
]
],
[
[
60437,
60454
]
],
[
[
62435,
62458
]
],
[
[
63671,
63682
]
],
[
[
64691,
64707
]
],
[
[
65751,
65765
]
],
[
[
69892,
69908
],
[
70368,
70384
]
],
[
[
70433,
70450
]
],
[
[
73445,
73464
]
],
[
[
75258,
75270
]
],
[
[
75671,
75681
]
],
[
[
76082,
76094
],
[
76570,
76582
]
],
[
[
76631,
76646
],
[
77134,
77149
]
],
[
[
77291,
77304
],
[
77792,
77805
]
],
[
[
77947,
77962
],
[
78432,
78447
]
],
[
[
78496,
78509
]
],
[
[
78661,
78674
]
],
[
[
79013,
79026
],
[
79465,
79478
]
],
[
[
79527,
79538
]
],
[
[
79980,
79991
]
],
[
[
81900,
81913
]
],
[
[
84048,
84067
]
],
[
[
85397,
85408
]
],
[
[
85737,
85748
],
[
86177,
86188
]
],
[
[
86237,
86246
]
],
[
[
86674,
86683
]
],
[
[
88210,
88221
]
],
[
[
90327,
90344
]
],
[
[
91604,
91618
]
],
[
[
91993,
92007
],
[
92474,
92488
]
],
[
[
92537,
92549
]
],
[
[
93006,
93018
]
],
[
[
94525,
94539
]
],
[
[
96705,
96719
]
],
[
[
97107,
97121
],
[
97596,
97610
]
],
[
[
97659,
97671
]
],
[
[
98142,
98154
]
],
[
[
99669,
99684
]
]
] |
import logging
import urllib
import requests
import base64
"""
HASS module to read Aerogarde bounty info, later there will be an option to control the light
writen by @epotex
"""
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'aerogarden'
agent = "BountyWiFi/1.1.13 (iPhone; iOS 10.3.2; Scale/2.00)"
port = "8080"
host = "http://ec2-54-86-39-88.compute-1.amazonaws.com:"
#API Calls
Login_call = "/api/Admin/Login"
SetDictPushCount = "/api/CustomData/SetDictPushCount?userID="
QueryUserDevice = "/api/CustomData/QueryUserDevice"
GetUserSetted = "/api/CustomData/GetUserSetted"
QueryDeviceOnline = "/api/CustomData/QueryDeviceOnline"
QueryDeviceStatus = "/api/CustomData/QueryDeviceStatus"
UpdateDeviceConfig ="/api/CustomData/UpdateDeviceConfig"
auth_data =""
encoded_mac = ""
AERO_PARAMETERS = {}
def base64decode(b):
return base64.b64decode(b).decode('utf-8')
def setup(hass, base_config):
config = base_config.get(DOMAIN)
encoded_email = urllib.parse.quote(config['mail'])
encoded_password = urllib.parse.quote(config['password'])
encoded_mac = urllib.parse.quote(config['aerogarden_mac_address'])
auth_data = "mail=" + encoded_email + "&userPwd=" + encoded_password
apiurl = str(host) + str(port) + str(Login_call)
headers = {
'User-Agent': 'BountyWiFi/1.1.13 (iPhone; iOS 10.3.2; Scale/2.00)',
"Content-Type": "application/x-www-form-urlencoded",
"Connection": "keep-alive",
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate"
}
try:
r = requests.post(apiurl, data=auth_data, headers=headers)
responce = r.json()
userID =responce["code"]
device_url = "airGuid=" + encoded_mac + "&userID=" + str(userID)
apiurl = str(host) + str(port) + str(QueryDeviceStatus)
r = requests.post(apiurl, data=str(device_url), headers=headers)
garden_data = r.json()
status = 'online'
#extracted info
config_id= garden_data['configID']
airGuid = garden_data['airGuid']
lightCycle = garden_data['lightCycle']
pumpCycle = garden_data['pumpCycle']
lightTemp = garden_data['lightTemp']
lightStat = garden_data['lightStat']
clock = garden_data['clock']
pumpStat = garden_data['pumpStat']
pumpHydro = garden_data['pumpHydro']
pumpRemind4Hour = garden_data['pumpRemind4Hour']
plantedType = garden_data['plantedType']
garden_name =base64decode(garden_data['plantedName'])
totalDay = garden_data['totalDay']
plantedDay = garden_data['plantedDay']
nutriRemindDay = garden_data['nutriRemindDay']
alarmAllow = garden_data['alarmAllow']
plantedDate = garden_data['plantedDate']
nutrientDate = garden_data['nutrientDate']
updateDate = garden_data['updateDate']
createDate = garden_data['createDate']
swVersion = garden_data['swVersion']
hwVersion = garden_data['hwVersion']
bwVersion = garden_data['bwVersion']
oldPlantedDay = garden_data['oldPlantedDay']
deviceID = garden_data['deviceID']
deviceIP = garden_data['deviceIP']
except RequestException:
_LOGGER.exception("Error communicating with AeroGarden")
status = 'offline'
return False
#display extracted info
hass.states.set('Aerogarden.garden_name',garden_name )
hass.states.set('Aerogarden.config_id',config_id )
hass.states.set('Aerogarden.airGuid',airGuid )
hass.states.set('Aerogarden.lightCycle',lightCycle )
hass.states.set('Aerogarden.pumpCycle',pumpCycle )
hass.states.set('Aerogarden.lightTemp',lightTemp )
hass.states.set('Aerogarden.lightStat',lightStat )
hass.states.set('Aerogarden.clock',clock )
hass.states.set('Aerogarden.pumpStat',pumpStat )
hass.states.set('Aerogarden.pumpHydro',pumpHydro )
hass.states.set('Aerogarden.pumpRemind4Hour',pumpRemind4Hour )
hass.states.set('Aerogarden.totalDay',totalDay )
hass.states.set('Aerogarden.plantedDay',plantedDay )
hass.states.set('Aerogarden.nutriRemindDay',nutriRemindDay )
hass.states.set('Aerogarden.alarmAllow',alarmAllow )
hass.states.set('Aerogarden.plantedDate',plantedDate )
hass.states.set('Aerogarden.nutrientDate',nutrientDate )
hass.states.set('Aerogarden.updateDate',updateDate )
hass.states.set('Aerogarden.createDate',createDate )
hass.states.set('Aerogarden.swVersion',swVersion )
hass.states.set('Aerogarden.hwVersion',hwVersion )
hass.states.set('Aerogarden.bwVersion',bwVersion )
hass.states.set('Aerogarden.oldPlantedDay',oldPlantedDay )
hass.states.set('Aerogarden.deviceID',deviceID )
hass.states.set('Aerogarden.deviceIP',deviceIP )
hass.states.set('Aerogarden.Status', status)
return True
| [
[
[
7,
14
],
[
194,
201
]
],
[
[
22,
28
],
[
964,
970
],
[
1022,
1028
],
[
1079,
1085
]
],
[
[
36,
44
],
[
1542,
1550
],
[
1808,
1816
]
],
[
[
52,
58
],
[
840,
846
]
],
[
[
184,
191
],
[
3209,
3216
]
],
[
[
225,
231
],
[
936,
942
]
],
[
[
247,
252
]
],
[
[
308,
312
],
[
1234,
1238
],
[
1765,
1769
]
],
[
[
322,
326
],
[
1222,
1226
],
[
1753,
1757
]
],
[
[
391,
401
],
[
1246,
1256
]
],
[
[
423,
439
]
],
[
[
485,
500
]
],
[
[
537,
550
]
],
[
[
585,
602
]
],
[
[
641,
658
],
[
1777,
1794
]
],
[
[
697,
715
]
],
[
[
754,
763
]
],
[
[
768,
779
]
],
[
[
786,
801
]
],
[
[
812,
824
],
[
2469,
2481
]
],
[
[
881,
886
]
]
] |
import asyncio
from datetime import datetime, timezone
from functools import wraps
import traceback
import inspect
import logging
import ujson as json_module
import hashlib
import yaml
from aiohttp import web
import aiohttp
import mongoengine
import os
from .engine.objects import Operation, Network, Domain, Log, ObservedHost, TechniqueMapping, Job, Rat, Host, \
ObservedRat, Adversary, CodedStep, ActiveConnection, Agent, AttackTechnique, AttackTactic, SiteUser, Setting, \
Opcodes, Artifactlist, ObservedFile, AttackList, JobException, ObservedSchtask, ObservedProcess, AttackGroup
from .engine.objects import ObservedDomain, ObservedOSVersion, ObservedUser, ObservedShare, ObservedCredential, \
ObservedService, ObservedTimeDelta, ObservedRegKey, ObservedPersistence
from . import authentication as auth
from .engine.database import native_types
from . import ddp
from . import attack
from . import util
from . import interface
from . import extern
log = logging.getLogger(__name__)
routes = []
def api(uri, methods, objects=None, get=None, auth_group=None, headers=None):
"""This is a decorator for web api endpoints
Args:
uri: The URI for the API, can contain keywords denoted by '{}' which indicate
objects: a list of tuples
methods: the list of HTTP methods this API accepts
auth_group: the group that the token must be in for access to this API
headers: A list of headers to return with the Response
"""
if objects is None:
objects = {}
if get is None:
get = {}
if auth_group is None:
auth_group = []
if headers is None:
headers = {}
def decorator(f):
@wraps(f)
async def decorated(req, token, url_match):
kwargs = {}
# Map id to object
for name, _class in objects.items():
if name in url_match:
# If this fails and the request type is 'GET',
# then an exception should be returned
try:
kwargs[name] = _class.objects.with_id(url_match[name])
if kwargs[name] is None:
return web.HTTPBadRequest()
except (mongoengine.errors.ValidationError, ):
# The client has sent an invalid id in the URL
return web.HTTPBadRequest()
# Now set the default get parameters
# For cases where we see args like ?arg1=value1&arg2&...
# arg2 is set to ''
# but change it to True instead
trueified = {k: True if v == '' else v for k, v in req.GET.items()}
for k, v in get.items():
kwargs[k] = trueified.get(k, v)
sig = inspect.signature(f)
if 'token' in sig.parameters:
kwargs['token'] = token
# Finally format the output as json (or jsonfm)
results = await f(req, **kwargs)
if isinstance(results, web.StreamResponse):
return results
else:
json = json_module.dumps(native_types(results), sort_keys=True, indent=4)
return web.Response(text=json, content_type='application/json', headers=headers)
async def entrypoint(req):
host = None
try:
# ensure this member is authorized
token = auth.Token(req.cookies.get('AUTH'))
l = [g for g in auth_group if token.in_group(g)]
if len(l) == 0:
raise auth.NotAuthorized()
# active connections
peername = req.transport.get_extra_info('peername')
if peername is not None:
host_ip, port = peername
if req.host:
local_ip = req.host.split(":")[0]
if local_ip == "localhost":
local_ip = "127.0.0.1"
else:
local_ip = "127.0.0.1"
token_host = None
if token.in_group('agent'):
agent = Agent.objects.with_id(token.session_info['_id'])
if agent is None:
raise auth.NotAuthorized
agent.modify(**{'alive': True})
token_host = agent.host
host = ActiveConnection.objects(ip=host_ip, host=token_host, local_ip=local_ip).first()
if host is None:
host = ActiveConnection(ip=host_ip, host=token_host, local_ip=local_ip, connections=0).save()
host.update(inc__connections=1)
resp = await decorated(req, token, req.match_info)
return resp
except auth.NotAuthorized:
return web.HTTPForbidden()
except Exception:
traceback.print_exc()
results = {'error': 'exception in ' + f.__name__}
output = json_module.dumps(results, sort_keys=True, indent=4)
return web.HTTPInternalServerError(text=output, content_type='application/json')
finally:
if host:
host.update(dec__connections=1)
for method in methods:
routes.append((method, uri, entrypoint))
return decorated
return decorator
def websocket(uri, auth_group=None):
if auth_group is None:
auth_group = []
def decorator(f):
@wraps(f)
async def entrypoint(req):
try:
# ensure this member is authorized
token = auth.Token(req.cookies.get('AUTH'))
l = [g for g in auth_group if token.in_group(g)]
if len(l) == 0:
raise auth.NotAuthorized()
return await f(req)
except auth.NotAuthorized:
return web.HTTPForbidden()
except Exception:
traceback.print_exc()
results = {'error': 'exception in ' + f.__name__}
output = json_module.dumps(results, sort_keys=True, indent=4)
return web.HTTPInternalServerError(text=output, content_type='application/json')
routes.append(('GET', uri, entrypoint))
return entrypoint
return decorator
# Example usage:
# GET /api/jobs
# POST /api/jobs { 'action': 'install_service', 'host': 'mm198673-pc', ... }
@api('/api/jobs', methods=['GET', 'POST'], get={'status': None, 'wait': False}, auth_group=['human', 'agent'])
async def query_jobs(request, token, status, wait):
if request.method == 'GET':
query = {}
if status:
query['status'] = status
if token.in_group('agent'):
agent = Agent.objects.with_id(token.session_info['_id'])
if not agent:
raise auth.NotAuthorized()
# are there any jobs for this agent?
query.update({'agent': agent.id})
jobs = list(Job.objects(**query))
if not len(jobs) and wait is not False:
# Now wait for jobs to be created
try:
jobs = [(await Job.wait_next(query))]
except asyncio.CancelledError:
return
else:
jobs = list(Job.objects(**query))
if not len(jobs) and wait is not False:
jobs = [(await Job.wait_next(query))]
return jobs
elif request.method == 'POST':
# only humans are allowed to create new jobs
token.require_group('human')
json = await request.json()
return Job(**json).save().id
# Example usage:
# GET /api/jobs/<job>
# POST /api/jobs/<job> { 'action': 'install_service', 'host': 'mm198673-pc', ... }
@api('/api/jobs/{job}', methods=['GET', 'PUT', 'DELETE'], objects={'job': Job}, auth_group=['human', 'agent'])
async def query_job(request, token, job):
if request.method == 'GET':
if token.in_group('agent'):
# can only get jobs that are not completed and are for them
if job['status'] in ("created", "pending") and str(job.agent.id) == token.session_info['_id']:
return job
else:
raise auth.NotAuthorized()
else:
return job
elif request.method == 'PUT':
if token.in_group('agent'):
# can only put jobs that are not completed and are for them
if job.status in ("created", "pending") and str(job.agent.id) == token.session_info['_id']:
json = await request.json()
# whitelist legal fields
if 'result' in json['action']:
job['action']['result'] = json['action']['result']
if 'error' in json['action']:
job['action']['error'] = json['action']['error']
if 'exception' in json['action']:
job['action']['exception'] = json['action']['exception']
job['status'] = json.get('status', job.status)
if job['status'] == "failed" and 'error' in job['action'] and job['action']['error'] == "no client":
# Force update the clients list
interface.get_clients(job.agent.host)
# find the rat
try:
iv_name = job['action']["rats"]["args"][0]
iv = Rat.objects(agent=job.agent, name=iv_name)
iv.modify(**{'active': False})
except KeyError:
log.warning("Could not find rat to remove for failed job")
return job.save()
else:
raise auth.NotAuthorized()
else: # human
# Update the job
json = await request.json()
if json['create_time']:
json['create_time'] = datetime.strptime(json['create_time'], "%Y-%m-%dT%H:%M:%S.%f")
return job.save()
elif request.method == 'DELETE':
token.require_group('human')
return job.delete()
# Example usage:
# POST /api/clients
@api('/api/clients', methods=['POST'], auth_group=['agent'])
async def query_clients(request, token):
json = await request.json()
# pid, elevated, executable_path
agen = Agent.objects.with_id(token.session_info['_id'])
# Get the list of known rats
complete_names = {iv.name: iv for iv in Rat.objects(host=agen.host)}
# Filter list for living rats
known_names = {}
for name, element in complete_names.items():
if element.active:
known_names[name] = element
# All of the currently running rats, as returned by the job
active = {x['pid']: x for x in json}
# Enumerate the active rats, and delete dead ones
for name, iv in known_names.items():
if name not in active:
iv.modify(**{'active': False})
else:
a = active.pop(name)
iv.update(**{'elevated': a['elevated'],
'executable': a['executable_path']})
# Any new rats need to be added
for name in active:
Rat(**{'agent': agen,
'host': agen.host,
'name': name,
'elevated': active[name]['elevated'],
'executable': active[name]['executable_path'],
'username': active[name]['username'].lower(),
'active': True}).save()
return None
# Example usage:
# GET /api/networks
# POST /api/networks { domain: 'mitre.org' }
@api('/api/networks', methods=['GET', 'POST'], auth_group=['human'])
async def query_networks(request):
if request.method == 'GET':
return Network.objects
elif request.method == 'POST':
json = await request.json()
network = Network(**json).save()
return network.id
@api('/api/networks/{network}', methods=['GET', 'DELETE'], objects={'network': Network}, auth_group=['human'])
async def query_network(request, network):
if request.method == 'GET':
return network
elif request.method == 'DELETE':
network.delete()
@api('/api/heartbeat', methods=['GET'], auth_group=['agent'])
async def agent_check_in(request, token):
agen = Agent.objects.with_id(token.session_info['_id'])
agen.modify(**{'check_in': datetime.now(timezone.utc), 'alive': True})
return True
@api('/api/hosts', methods=['GET'], auth_group=['human'])
async def query_hosts(request):
return Host.objects
@api('/api/domains', methods=['GET'], auth_group=['human'])
async def query_domains(request):
return Domain.objects
@api('/api/domains/{domain}', methods=['GET'], objects={'domain': Domain}, auth_group=['human'])
async def query_domain(request, domain):
return domain
@api('/api/domains/{domain}/hosts', methods=['GET'], objects={'domain': Domain}, auth_group=['human'])
async def query_domainhosts(request, domain):
return Host.objects(domain=domain)
@api('/api/networks/{network}/hosts', methods=['GET'], objects={'network': Network}, auth_group=['human'])
async def query_networkhosts(request, network):
return network.hosts
@api('/api/networks/{network}/hosts/{host}', methods=['GET', 'PUT', 'DELETE'],
objects={'network': Network, 'host': Host}, auth_group=['human'])
async def query_networkhosthosts(request, network, host):
if request.method == 'GET':
return host
elif request.method == 'PUT':
network.modify(push__hosts=host)
elif request.method == 'DELETE':
network.modify(pull__hosts=host)
@api('/api/hosts/{host}/commands', methods=['GET', 'POST'], objects={'host': Host}, auth_group=['human'])
async def query_commands(request, host):
if request.method == 'GET':
if 'hostname' in request.GET:
hosts = Host.objects(hostname=request.GET['hostname'])
return [x.host_command_result() for x in Job.objects(host__in=hosts)]
else:
return [x.host_command_result() for x in Job.objects(host=host)]
elif request.method == 'POST':
json = await request.json()
return interface.agent_shell_command(host, json['command_line']).id
@api('/api/hosts/{host}/commands/{job}', methods=['GET'], get={'wait': False},
objects={'host': Host, 'job': Job}, auth_group=['human'])
async def query_command(request, wait, host, job):
# start waiting for the job before reloading to avoid missing the update
if wait is not False:
try:
await job.wait_till_completed()
except JobException as e:
log.warning(e.args)
return job.host_command_result()
@api('/api/rats', methods=['GET'], auth_group=['human'])
async def query_ivs(request):
query = {k: v for k, v in request.GET.items() if k == 'hostname'}
return Rat.objects(**query)
@api('/api/rats/{rat}', methods=['GET'], objects={'rat': Rat}, auth_group=['human'])
async def query_iv(rat):
return rat
@api('/api/rats/{rat}/commands', methods=['GET', 'POST'],
objects={'rat': Rat}, auth_group=['human'])
async def query_ivcommands(request, rat):
if request.method == 'GET':
return [x.rat_command_result() for x in Job.objects(agent=rat.agent)]
elif request.method == 'POST':
json = await request.json()
return Job.create_rat_command(rat, json["function"], **json["parameters"]).id
@api('/api/rats/{rat}/commands/{job}', methods=['GET'],
get={'wait': False}, objects={'rat': Rat, 'job': Job}, auth_group=['human'])
async def query_ivcommand(request, wait, rat, job):
# start waiting for the job before reloading to avoid missing the update
if wait is not False:
try:
await job.wait_till_completed()
except JobException as e:
log.warning(e.args)
return job.rat_result()
@api('/api/operations', methods=['GET'], auth_group=['human'])
async def query_operations(request):
return Operation.objects
@api('/api/opcodes', methods=['GET'], auth_group=['human'])
async def get_opcodes(request):
return Opcodes.arguments
@api('/api/networks/{network}/operations', methods=['GET', 'POST'], objects={'network': Network}, auth_group=['human'])
async def query_perations(request, network):
if request.method == 'GET':
return list(Operation.objects(network=network))
elif request.method == 'POST':
json = await request.json()
if json['start_type'] == 'existing' and 'start_rat' not in json:
return None
json['network'] = network
json['status'] = 'start'
json['status_state'] = ''
json['log'] = Log().save()
# Get the adversary
adversary = Adversary.objects.with_id(json['adversary'])
json['steps'] = [x.name for x in adversary.steps]
operation = Operation(**json).save()
return operation.id
@api('/api/networks/{network}/operations/{operation}', methods=['GET', 'PUT', 'DELETE', 'PATCH'], get={'wait': False},
objects={'network': Network, 'operation': Operation}, auth_group=['human'])
async def query_operation(request, network, operation, wait):
if request.method == 'GET':
if wait:
wait = json_module.loads(wait)
wait["id"] = operation.id
log.info("Wait: {}".format(wait))
# TODO fix race condition here
new = list(Operation.objects(**wait))
if len(new) == 0:
del wait["id"]
new = [await operation.wait(wait)]
return new[0]
return operation
elif request.method == 'PUT':
json = await request.json()
json['network_id'] = network.id
json['hosts'] = network.hosts
return operation.update(**json)
elif request.method == 'DELETE':
return operation.delete()
elif request.method == 'PATCH':
json = await request.json()
operation.update(__raw__={'$set': json})
@api('/api/agents', methods=['GET'], auth_group=['human'])
async def query_agents(request):
return Agent.objects
@api('/api/logs', methods=['GET'], auth_group=['human'])
async def query_logs(request):
return Log.objects
@api('/api/logs/{log}', methods=['GET'], objects={'log': Log}, auth_group=['human'])
async def query_log(request, log):
return log
@api('/api/agents/{agent}', methods=['GET'], objects={'agent': Agent}, auth_group=['human'])
async def query_agent(request, agent):
return agent
@api('/api/adversaries', methods=['GET', 'POST'], auth_group=['human'])
async def query_adversaries(request):
if request.method == 'GET':
return Adversary.objects
elif request.method == 'POST':
json = await request.json()
json['artifactlists'] = [Artifactlist.objects.with_id(x) for x in json['artifactlists']]
json['steps'] = [CodedStep.objects.with_id(x) for x in json['steps']]
return Adversary(**json).save().id
@api('/api/adversaries/{adversary}', methods=['GET', 'PUT', 'DELETE'], objects={'adversary': Adversary}, auth_group=['human'])
async def query_adversary(request, adversary):
if request.method == 'GET':
return adversary
elif request.method == 'PUT':
if (adversary.protected):
new_adv = {}
new_adv['name'] = adversary['name']
new_adv['steps'] = adversary['steps']
new_adv['exfil_method'] = adversary['exfil_method']
new_adv['exfil_port'] = adversary['exfil_port']
new_adv['exfil_address'] = adversary['exfil_address']
new_adv['artifactlists'] = adversary['artifactlists']
adversary = Adversary(**new_adv).save()
# Update the adversary
json = await request.json()
json['artifactlists'] = [Artifactlist.objects.with_id(x) for x in json['artifactlists']]
json['steps'] = [CodedStep.objects.with_id(x) for x in json['steps']]
adversary.update(**json)
return adversary.id
elif request.method == 'DELETE':
if not adversary.protected:
return adversary.delete()
@api('/api/step', methods=['GET'], auth_group=['human'])
async def query_step(request):
return CodedStep.objects
@api('/api/site_user', methods=['GET', 'POST'], auth_group=['admin'])
async def query_siteusers(request):
if request.method == 'GET':
return SiteUser.objects.only('username', 'groups', 'email', 'last_login')
elif request.method == 'POST':
json = await request.json()
username = json['username']
email = json.get('email', '')
password = json.get('password', None)
groups = ['human']
if json.get('admin', False):
groups.append('admin')
return auth.register_user(username, groups, password=password, email=email).id
@api('/api/site_user/{user}', methods=['GET', 'DELETE'], objects={'user': SiteUser}, auth_group=['admin'])
async def query_siteuser(request, token, user):
if request.method == 'GET':
return user.only('username', 'groups', 'email', 'last_login')
elif request.method == 'DELETE':
if token.session_info['_id'] != str(user.id):
return user.delete()
@api('/api/site_user/{user}/admin', methods=['PUT', 'DELETE'], objects={'user': SiteUser}, auth_group=['admin'])
async def query_siteuser_admin(request, token, user):
if request.method == 'PUT':
user.modify(push__groups='admin')
elif request.method == 'DELETE':
if SiteUser.objects(groups='admin').count() > 1 and token.session_info['_id'] != str(user.id):
user.modify(pull__groups='admin')
@api('/api/site_user/{user}/password', methods=['POST'], objects={'user': SiteUser}, auth_group=['admin', 'human'])
async def query_siteuser_password(request, token, user):
json = await request.json()
if 'password' in json:
if token.in_group('admin') or token.session_info['_id'] == str(user.id):
auth.user_change_password(user, json['password'])
@api('/api/site_user/{user}/email', methods=['POST'], objects={'user': SiteUser}, auth_group=['admin'])
async def query_siteuser_email(request, user):
json = await request.json()
if 'email' in json:
user.update(email=json['email'])
@api('/api/save_file', methods=['POST'], auth_group=['admin'])
async def save_file(request):
json = await request.json()
if 'edited' in json and 'file' in json:
file_path = util.get_path(json['file'])
if json['file'].startswith("[-d-]") or file_path is None:
return
core = util.encrypt_file(json['edited'])
with open(file_path, 'wb') as handle:
core.tofile(handle)
@api('/api/list_file', methods=['GET'], auth_group=['admin'])
async def list_files(request):
return util.list_files()
@api('/api/load_file', methods=['POST'], auth_group=['admin'])
async def load_file(request):
json = await request.json()
if 'file' in json:
file_path = util.get_path(json['file'])
if json['file'].startswith("[-d-]") or json['file'] == '' or file_path is None:
return
if file_path.startswith('[m]'):
return file_path
with open(file_path, 'rb') as handle:
data = handle.read()
return util.decrypt_file(data)
@api('/api/load_psexec', methods=['GET'], auth_group=['admin'])
async def load_psexec(request):
extern.load_psexec()
Setting.objects.first().update(last_psexec_update=util.tz_utcnow())
@api('/api/load_attack', methods=['GET'], auth_group=['admin'])
async def load_attack(request):
attack.refresh_attack()
Setting.objects.first().update(last_attack_update=util.tz_utcnow())
@api('/api/update_depth', methods=['POST'], auth_group=['admin'])
async def update_recursion_limit(request):
json = await request.json()
if 'new_value' in json:
Setting.objects.first().modify(recursion_limit=json['new_value'])
@api('/api/group_mimic', methods=['GET'], auth_group=['admin', 'human'])
async def group_coverage(request):
temp_list = []
core = {}
for step in CodedStep.objects:
for mapping in step.mapping:
temp_list.append(mapping.technique)
groups = AttackGroup.objects
for entry in groups:
temp = {}
breakdown = {}
decision = []
for tech in entry.techniques:
temp[tech.name] = (tech in temp_list)
decision.append(tech in temp_list)
breakdown['techniques'] = temp
if (False not in decision) and (len(decision) > 2):
breakdown['conclusion'] = 'Can Fully Emulate'
else:
breakdown['conclusion'] = 'Can Not Fully Emulate'
core[entry.name] = breakdown
return core
@api('/api/steps/{step}/mapping', methods=['POST', 'DELETE'], objects={'step': CodedStep}, auth_group=['human'])
async def post_step_mapping(request, step):
if request.method == 'POST':
json = await request.json()
if 'tactics' not in json or 'technique' not in json:
return
tactics = json['tactics']
technique = json['technique']
try:
tech = AttackTechnique.objects.with_id(technique)
for tactic in tactics:
tac = AttackTactic.objects.with_id(tactic)
step.modify(push__mapping=TechniqueMapping(technique=tech, tactic=tac))
except (TypeError, mongoengine.errors.ValidationError):
return
elif request.method == 'DELETE':
json = await request.json()
if 'tactic' not in json or 'technique' not in json:
return
tactic = json['tactic']
technique = json['technique']
try:
tech = AttackTechnique.objects.with_id(technique)
tac = AttackTactic.objects.with_id(tactic)
for mapping in step.mapping:
if mapping.tactic == tac and mapping.technique == tech:
step.modify(pull__mapping=mapping)
except (TypeError, mongoengine.errors.ValidationError):
return
@api('/api/steps/{step}/mapping/load_defaults', methods=['GET'], objects={'step': CodedStep},
auth_group=['human'])
async def get_step_mapping_defaults(request, step):
step.update(mapping=step.default_mapping)
@api('/api/attack_download.json', methods=['GET'], auth_group=['human'], headers={'Content-Disposition': 'attachment'})
async def get_all_attack_stuff(request):
try:
techniques = []
for technique in AttackTechnique.objects:
this_technique = technique.to_dict()
this_technique['tactics'] = [x.name for x in technique.tactics]
del this_technique['_id']
techniques.append(this_technique)
tactics = [x.to_dict() for x in AttackTactic.objects]
for tactic in tactics:
del tactic["_id"]
return {"techniques": techniques, "tactics": tactics}
except (TypeError, mongoengine.errors.ValidationError):
return
@api('/api/generated/{function}', methods=["POST"], auth_group=['admin'])
async def generated_dispatcher(request):
dispatched_function = request.match_info['function']
request_json = await request.json()
job = getattr(interface, dispatched_function)(**request_json)
try:
await job.wait_till_completed()
return job.action['result']
except JobException:
return job.action['error']
@api('/api/artifactlists', methods=['GET', 'POST'], auth_group=['human'])
async def get_artifactlists(request):
if request.method == 'GET':
return Artifactlist.objects
elif request.method == 'POST':
if request.content_type == "application/json":
content = await request.json()
elif request.content_type == "text/x-yaml":
try:
content = format_yaml(await request.text())
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
return web.Response(status=400, text="The yaml was not properly formatted")
else:
return web.Response(status=400)
try:
return Artifactlist(**content).save().id
except (mongoengine.errors.FieldDoesNotExist, mongoengine.errors.ValidationError) as e:
return web.Response(status=400, text=str(e))
@api('/api/artifactlists/{artifactlist}', methods=['GET', 'PUT', 'DELETE'], objects={'artifactlist': Artifactlist}, auth_group=['human'])
async def query_artifactlist(request, artifactlist):
if request.method == 'GET':
return artifactlist
elif request.method == 'PUT':
if request.content_type == "application/json":
content = await request.json()
elif request.content_type == "text/x-yaml":
try:
content = format_yaml(await request.text())
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
return web.Response(status=400, text="The yaml was not properly formatted")
else:
return web.Response(status=400)
try:
artifactlist.update(**content)
return artifactlist.id
except (mongoengine.errors.FieldDoesNotExist, mongoengine.errors.ValidationError) as e:
return web.Response(status=400, text=str(e))
elif request.method == 'DELETE':
return artifactlist.delete()
@api('/api/parse_artifactlist', methods=['POST'], auth_group=['human'])
async def get_parse_artifactlist(request):
try:
parsed = format_yaml(await request.text())
Artifactlist(**parsed)
return parsed
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as e:
return web.Response(status=400, text="The yaml was not properly formatted: \n" + str(e.problem_mark) + '\n ' + str(e.problem))
except (mongoengine.errors.FieldDoesNotExist, mongoengine.errors.ValidationError) as e:
return web.Response(status=400, text=str(e))
def format_yaml(yaml_content):
parsed = yaml.load(yaml_content)
cleaned = {}
for k, v in parsed.items():
if isinstance(v, list) and len(v) == 1 and v[0] is None:
cleaned[k] = []
else:
cleaned[k] = v
return cleaned
@api('/api/bsf/{log}', methods=['GET'], objects={'log': Log}, auth_group=['human'],
headers={'Content-Disposition': 'attachment; filename=\"bsf.json\"'})
async def query_bsf(request, log):
return log["event_stream"]
@api('/api/observed/credentials', methods=['GET'], auth_group=['human'])
async def query_credentials(request):
return ObservedCredential.objects
@api('/api/observed/credentials/{credential}', methods=['GET'],
objects={'credential': ObservedCredential}, auth_group=['human'])
async def query_credential(request, token, credential):
return credential
@api('/api/observed/users', methods=['GET'], auth_group=['human'])
async def query_users(request):
return ObservedUser.objects
@api('/api/observed/users/{user}', methods=['GET'],
objects={'user': ObservedUser}, auth_group=['human'])
async def query_user(request, token, user):
return user
@api('/api/observed/shares', methods=['GET'], auth_group=['human'])
async def query_shares(request):
return ObservedShare.objects
@api('/api/observed/shares/{share}', methods=['GET'],
objects={'share': ObservedShare}, auth_group=['human'])
async def query_share(request, token, share):
return share
@api('/api/observed/files', methods=['GET'], auth_group=['human'])
async def query_files(request):
return ObservedFile.objects
@api('/api/observed/files/{file}', methods=['GET'],
objects={'file': ObservedFile}, auth_group=['human'])
async def query_file(request, token, file):
return file
@api('/api/observed/domains', methods=['GET'], auth_group=['human'])
async def query_domains(request):
return ObservedDomain.objects
@api('/api/observed/domains/{domain}', methods=['GET'],
objects={'domain': ObservedDomain}, auth_group=['human'])
async def query_domain(request, token, domain):
return domain
@api('/api/observed/os_versions', methods=['GET'], auth_group=['human'])
async def query_os_versions(request):
return ObservedOsversion.objects
@api('/api/observed/os_versions/{os_version}', methods=['GET'],
objects={'os_version': ObservedOSVersion}, auth_group=['human'])
async def query_os_version(request, token, os_version):
return os_version
@api('/api/observed/hosts', methods=['GET'], auth_group=['human'])
async def query_hosts(request):
return ObservedHost.objects
@api('/api/observed/hosts/{host}', methods=['GET'],
objects={'host': ObservedHost}, auth_group=['human'])
async def query_host(request, token, host):
return host
@api('/api/observed/schtasks', methods=['GET'], auth_group=['human'])
async def query_schtasks(request):
return ObservedSchtask.objects
@api('/api/observed/schtasks/{schtask}', methods=['GET'],
objects={'schtask': ObservedSchtask}, auth_group=['human'])
async def query_schtask(request, token, schtask):
return schtask
@api('/api/observed/services', methods=['GET'], auth_group=['human'])
async def query_timedeltas(request):
return ObservedService.objects
@api('/api/observed/services/{service}', methods=['GET'],
objects={'service': ObservedService}, auth_group=['human'])
async def query_timedelta(request, token, service):
return service
@api('/api/observed/timedeltas', methods=['GET'], auth_group=['human'])
async def query_timedeltas(request):
return ObservedTimeDelta.objects
@api('/api/observed/timedeltas/{timedelta}', methods=['GET'],
objects={'timedelta': ObservedTimeDelta}, auth_group=['human'])
async def query_timedelta(request, token, timedelta):
return timedelta
@api('/api/observed/rats', methods=['GET'], auth_group=['human'])
async def query_rats(request):
return ObservedRat.objects
@api('/api/observed/rats/{rat}', methods=['GET'],
objects={'rat': ObservedRat}, auth_group=['human'])
async def query_rat(request, token, rat):
return rat
@api('/api/observed/registry_keys', methods=['GET'], auth_group=['human'])
async def query_registry_keys(request):
return ObservedRegKey.objects
@api('/api/observed/registry_keys/{registry_key}', methods=['GET'],
objects={'registry_key': ObservedRegKey}, auth_group=['human'])
async def query_regkey(request, token, registry_key):
return registry_key
@api('/api/observed/persistence', methods=['GET'], auth_group=['human'])
async def query_persistence_all(request):
return ObservedPersistence.objects
@api('/api/observed/persistence/{persistence}', methods=['GET'],
objects={'persistence': ObservedPersistence}, auth_group=['human'])
async def query_persistence(request, token, persistence):
return persistence
@api('/api/observed/processes', methods=['GET'], auth_group=['human'])
async def query_processes(request):
return ObservedProcess.objects
@api('/api/observed/processes/{process}', methods=['GET'],
objects={'process': ObservedProcess}, auth_group=['human'])
async def query_process(request, token, process):
return process
@api('/api/step', methods=['GET'], auth_group=['human'])
async def query_step(request):
return CodedStep.objects
@websocket('/websocket', auth_group=["human"])
async def wb_operation(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
def write_websocket(data):
if not ws.closed:
ws.send_bytes(data)
else:
raise RuntimeError
srv = ddp.DDPServer(write_websocket)
srv.register_collection("operation", Operation)
srv.register_collection("domain", Domain)
srv.register_collection("host", Host)
srv.register_collection("network", Network)
srv.register_collection("rat", Rat)
srv.register_collection("observed_rat", ObservedRat)
srv.register_collection("observed_host", ObservedHost)
srv.register_collection("observed_file", ObservedFile)
srv.register_collection("observed_schtask", ObservedSchtask)
srv.register_collection("job", Job)
srv.register_collection("log", Log)
srv.register_collection("adversary", Adversary)
srv.register_collection("step", CodedStep)
srv.register_collection("active_connection", ActiveConnection)
srv.register_collection("agent", Agent)
srv.register_collection("attack_technique", AttackTechnique)
srv.register_collection("attack_tactic", AttackTactic)
srv.register_collection("attack_list", AttackList)
srv.register_collection("attack_group", AttackGroup)
srv.register_collection("setting", Setting)
srv.register_collection("artifactlist", Artifactlist)
request.app['websockets'].append(ws)
try:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT or msg.type == aiohttp.WSMsgType.BINARY:
srv.parse_message(msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
log.debug('ws connection closed with exception {}'.format(ws.exception()))
finally:
request.app['websockets'].remove(ws)
log.debug('websocket connection closed')
return ws
def init(app):
# setup the generated endpoints
for method, uri, func in routes:
app.router.add_route(method, uri, func)
| [
[
[
7,
14
],
[
7361,
7368
]
],
[
[
36,
44
],
[
10070,
10078
],
[
12535,
12543
]
],
[
[
46,
54
],
[
12548,
12556
]
],
[
[
77,
82
],
[
1695,
1700
],
[
5615,
5620
]
],
[
[
90,
99
],
[
5001,
5010
],
[
6095,
6104
]
],
[
[
107,
114
],
[
2803,
2810
]
],
[
[
122,
129
],
[
975,
982
]
],
[
[
138,
158
],
[
17392,
17403
],
[
3141,
3152
],
[
5114,
5125
],
[
6208,
6219
]
],
[
[
166,
173
]
],
[
[
181,
185
],
[
28072,
28076
],
[
28099,
28103
],
[
29028,
29032
],
[
29055,
29059
],
[
29791,
29795
],
[
29818,
29822
],
[
30176,
30180
]
],
[
[
206,
209
],
[
28148,
28151
],
[
28250,
28253
],
[
28456,
28459
],
[
29104,
29107
],
[
29206,
29209
],
[
29437,
29440
],
[
29864,
29867
],
[
30092,
30095
],
[
35526,
35529
],
[
2212,
2215
],
[
2402,
2405
],
[
3048,
3051
],
[
3231,
3234
],
[
4935,
4938
],
[
5190,
5193
],
[
6029,
6032
],
[
6284,
6287
]
],
[
[
217,
224
],
[
36964,
36971
],
[
37002,
37009
],
[
37101,
37108
]
],
[
[
232,
243
],
[
25583,
25594
],
[
26188,
26199
],
[
27130,
27141
],
[
28357,
28368
],
[
28395,
28406
],
[
29338,
29349
],
[
29376,
29387
],
[
29997,
30008
],
[
30035,
30046
],
[
2261,
2272
]
],
[
[
251,
253
]
],
[
[
283,
292
],
[
17228,
17237
],
[
16138,
16147
],
[
16497,
16506
],
[
17008,
17017
],
[
17566,
17575
],
[
35798,
35807
]
],
[
[
294,
301
],
[
12146,
12153
],
[
13258,
13265
],
[
13469,
13476
],
[
16368,
16375
],
[
17206,
17213
],
[
11911,
11918
],
[
12016,
12023
],
[
35936,
35943
]
],
[
[
303,
309
],
[
12901,
12907
],
[
13065,
13071
],
[
12818,
12824
],
[
35847,
35853
]
],
[
[
311,
314
],
[
18427,
18430
],
[
30461,
30464
],
[
16824,
16827
],
[
18356,
18359
],
[
36300,
36303
]
],
[
[
316,
328
],
[
32816,
32828
],
[
32719,
32731
],
[
36087,
36099
]
],
[
[
330,
346
],
[
25510,
25526
]
],
[
[
348,
351
],
[
8001,
8004
],
[
14500,
14503
],
[
15691,
15694
],
[
7135,
7138
],
[
7315,
7318
],
[
7451,
7454
],
[
7556,
7559
],
[
7776,
7779
],
[
14117,
14120
],
[
14213,
14216
],
[
15392,
15395
],
[
15508,
15511
],
[
36260,
36263
]
],
[
[
353,
356
],
[
15093,
15096
],
[
15242,
15245
],
[
15679,
15682
],
[
9590,
9593
],
[
10614,
10617
],
[
11319,
11322
],
[
15013,
15016
],
[
35980,
35983
]
],
[
[
358,
362
],
[
13486,
13490
],
[
13857,
13861
],
[
14487,
14491
],
[
12698,
12702
],
[
13153,
13157
],
[
14017,
14021
],
[
35891,
35895
]
],
[
[
370,
381
],
[
34144,
34155
],
[
34051,
34062
],
[
36029,
36040
]
],
[
[
383,
392
],
[
19217,
19226
],
[
16885,
16894
],
[
18815,
18824
],
[
19094,
19103
],
[
19826,
19835
],
[
36346,
36355
]
],
[
[
394,
403
],
[
24998,
25007
],
[
26328,
26337
],
[
19026,
19035
],
[
20043,
20052
],
[
20369,
20378
],
[
24271,
24280
],
[
35418,
35427
],
[
36393,
36402
]
],
[
[
405,
421
],
[
36453,
36469
],
[
4489,
4505
],
[
4638,
4654
]
],
[
[
423,
428
],
[
18570,
18575
],
[
6898,
6903
],
[
10487,
10492
],
[
12455,
12460
],
[
18241,
18246
],
[
36508,
36513
],
[
4214,
4219
]
],
[
[
430,
445
],
[
25331,
25346
],
[
25895,
25910
],
[
26686,
26701
],
[
36563,
36578
]
],
[
[
447,
459
],
[
25431,
25443
],
[
25956,
25968
],
[
26961,
26973
],
[
36625,
36637
]
],
[
[
461,
469
],
[
21064,
21072
],
[
21453,
21461
],
[
21876,
21884
],
[
22250,
22258
],
[
20542,
20550
],
[
21662,
21670
]
],
[
[
471,
478
],
[
23601,
23608
],
[
23799,
23806
],
[
24046,
24053
],
[
36790,
36797
]
],
[
[
486,
493
],
[
16261,
16268
]
],
[
[
495,
507
],
[
28597,
28609
],
[
18937,
18949
],
[
19954,
19966
],
[
27769,
27781
],
[
28307,
28319
],
[
29734,
29746
],
[
36843,
36855
]
],
[
[
509,
521
],
[
31823,
31835
],
[
31726,
31738
],
[
36146,
36158
]
],
[
[
523,
533
],
[
36682,
36692
]
],
[
[
535,
547
],
[
14754,
14766
],
[
15946,
15958
],
[
27559,
27571
]
],
[
[
549,
564
],
[
33139,
33154
],
[
33030,
33045
],
[
36208,
36223
]
],
[
[
566,
581
],
[
35209,
35224
],
[
35099,
35114
]
],
[
[
583,
594
],
[
24388,
24399
],
[
36738,
36749
]
],
[
[
623,
637
],
[
32140,
32154
],
[
32035,
32049
]
],
[
[
639,
656
],
[
32488,
32505
]
],
[
[
658,
670
],
[
31203,
31215
],
[
31106,
31118
]
],
[
[
672,
685
],
[
31514,
31527
],
[
31413,
31426
]
],
[
[
687,
705
],
[
30874,
30892
],
[
30753,
30771
]
],
[
[
713,
728
],
[
33476,
33491
],
[
33367,
33382
]
],
[
[
730,
747
],
[
33825,
33842
],
[
33708,
33725
]
],
[
[
749,
763
],
[
34487,
34501
],
[
34364,
34378
]
],
[
[
765,
784
],
[
34855,
34874
],
[
34731,
34750
]
],
[
[
799,
821
],
[
6995,
6999
],
[
8394,
8398
],
[
9882,
9886
],
[
20916,
20920
],
[
22127,
22131
],
[
3457,
3461
],
[
3616,
3620
],
[
4339,
4343
],
[
4892,
4896
],
[
5751,
5755
],
[
5910,
5914
],
[
5986,
5990
]
],
[
[
851,
863
],
[
3159,
3171
]
],
[
[
878,
881
],
[
35726,
35729
]
],
[
[
896,
902
],
[
23771,
23777
]
],
[
[
917,
921
],
[
22618,
22622
],
[
22746,
22750
],
[
22964,
22968
],
[
23152,
23156
],
[
23450,
23454
],
[
23651,
23655
],
[
23849,
23853
]
],
[
[
936,
945
],
[
9396,
9405
],
[
14323,
14332
],
[
27415,
27424
]
],
[
[
960,
966
],
[
23576,
23582
]
],
[
[
969,
972
],
[
9749,
9752
],
[
14785,
14788
],
[
15977,
15980
],
[
17466,
17469
],
[
37142,
37145
],
[
37280,
37283
]
],
[
[
1004,
1010
],
[
37417,
37423
],
[
5406,
5412
],
[
6367,
6373
]
],
[
[
1022,
1025
],
[
6572,
6575
],
[
7928,
7931
],
[
10306,
10309
],
[
11761,
11764
],
[
12068,
12071
],
[
12341,
12344
],
[
12598,
12601
],
[
12714,
12717
],
[
12836,
12839
],
[
12994,
12997
],
[
13184,
13187
],
[
13366,
13369
],
[
13781,
13784
],
[
14387,
14390
],
[
14846,
14849
],
[
15037,
15040
],
[
15164,
15167
],
[
15582,
15585
],
[
16028,
16031
],
[
16159,
16162
],
[
16281,
16284
],
[
17063,
17066
],
[
18139,
18142
],
[
18258,
18261
],
[
18371,
18374
],
[
18508,
18511
],
[
18659,
18662
],
[
19125,
19128
],
[
20271,
20274
],
[
20390,
20393
],
[
20991,
20994
],
[
21374,
21377
],
[
21803,
21806
],
[
22180,
22183
],
[
22430,
22433
],
[
22861,
22864
],
[
22985,
22988
],
[
23477,
23480
],
[
23672,
23675
],
[
23870,
23873
],
[
24115,
24118
],
[
24920,
24923
],
[
26247,
26250
],
[
26468,
26471
],
[
27185,
27188
],
[
27611,
27614
],
[
28497,
28500
],
[
29552,
29555
],
[
30406,
30409
],
[
30632,
30635
],
[
30782,
30785
],
[
30997,
31000
],
[
31129,
31132
],
[
31302,
31305
],
[
31437,
31440
],
[
31617,
31620
],
[
31749,
31752
],
[
31922,
31925
],
[
32060,
32063
],
[
32247,
32250
],
[
32396,
32399
],
[
32610,
32613
],
[
32742,
32745
],
[
32915,
32918
],
[
33056,
33059
],
[
33250,
33253
],
[
33393,
33396
],
[
33589,
33592
],
[
33736,
33739
],
[
33944,
33947
],
[
34073,
34076
],
[
34239,
34242
],
[
34389,
34392
],
[
34606,
34609
],
[
34761,
34764
],
[
34982,
34985
],
[
35125,
35128
],
[
35320,
35323
]
],
[
[
5499,
5508
],
[
35438,
35447
]
],
[
[
6682,
7797
]
],
[
[
8038,
10265
]
],
[
[
10366,
11670
]
],
[
[
11829,
12064
]
],
[
[
12178,
12337
]
],
[
[
12402,
12594
]
],
[
[
12655,
12710
]
],
[
[
12773,
12832
]
],
[
[
12932,
12990
]
],
[
[
13096,
13180
]
],
[
[
13290,
13362
]
],
[
[
13515,
13777
]
],
[
[
13886,
14383
]
],
[
[
14528,
14842
]
],
[
[
14902,
15033
]
],
[
[
15121,
15160
]
],
[
[
15270,
15578
]
],
[
[
15719,
16024
]
],
[
[
16090,
16155
]
],
[
[
16218,
16278
]
],
[
[
16400,
17060
]
],
[
[
17262,
18135
]
],
[
[
18197,
18254
]
],
[
[
18314,
18367
]
],
[
[
18455,
18504
]
],
[
[
18600,
18655
]
],
[
[
18730,
19121
]
],
[
[
19251,
20267
]
],
[
[
20327,
20386
]
],
[
[
20459,
20987
]
],
[
[
21097,
21370
]
],
[
[
21486,
21799
]
],
[
[
21918,
22176
]
],
[
[
22283,
22426
]
],
[
[
22492,
22857
]
],
[
[
22922,
22981
]
],
[
[
23047,
23473
]
],
[
[
23540,
23668
]
],
[
[
23735,
23866
]
],
[
[
23935,
24111
]
],
[
[
24187,
24916
]
],
[
[
25032,
26243
]
],
[
[
26367,
26464
]
],
[
[
26587,
27181
]
],
[
[
27258,
27607
]
],
[
[
27684,
28493
]
],
[
[
28634,
29548
]
],
[
[
29623,
30129
]
],
[
[
30136,
30147
],
[
28018,
28029
],
[
28974,
28985
],
[
29692,
29703
]
],
[
[
30564,
30629
]
],
[
[
30704,
30779
]
],
[
[
30917,
30994
]
],
[
[
31063,
31126
]
],
[
[
31240,
31299
]
],
[
[
31369,
31434
]
],
[
[
31552,
31614
]
],
[
[
31683,
31746
]
],
[
[
31860,
31919
]
],
[
[
31990,
32057
]
],
[
[
32179,
32244
]
],
[
[
32319,
32393
]
],
[
[
32530,
32607
]
],
[
[
32676,
32739
]
],
[
[
32853,
32912
]
],
[
[
32984,
33053
]
],
[
[
33179,
33247
]
],
[
[
33319,
33390
]
],
[
[
33516,
33586
]
],
[
[
33660,
33733
]
],
[
[
33867,
33941
]
],
[
[
34009,
34070
]
],
[
[
34180,
34236
]
],
[
[
34313,
34386
]
],
[
[
34526,
34603
]
],
[
[
34678,
34758
]
],
[
[
34899,
34979
]
],
[
[
35052,
35122
]
],
[
[
35249,
35317
]
],
[
[
35376,
35435
]
],
[
[
35484,
37334
]
],
[
[
37341,
37345
]
]
] |
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('./saved_model')
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('./mobilenet_v3_large_224_dm07_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - mobilenet_v3_large_224_dm07_weight_quant.tflite")
| [
[
[
7,
23
],
[
25,
27
],
[
121,
123
],
[
206,
208
]
],
[
[
109,
118
],
[
179,
188
],
[
263,
272
]
],
[
[
242,
260
],
[
370,
388
]
],
[
[
355,
356
],
[
362,
363
]
]
] |
from typing import List
from . import AccessRequest, FileID, PartSpec, PartsGenerator
class NonCorrelatedSchemesGenerator(object):
def __init__(self, number: int, fraction: float) -> None:
self._number: int = number
self._fraction: float = fraction
self._parts_number: int = 2 ** number
@property
def number(self) -> int:
return self._number
@property
def fraction(self) -> float:
return self._fraction
def parts(self, index: int, total_bytes: int) -> List[PartSpec]:
scheme_parts_number = 2 ** (self._number - 1)
parts: List[PartSpec] = []
for i in range(scheme_parts_number):
# Insert 1 bit at index into binary representation of i
part_index = (((i << 1 >> index) | 1) << index) | (i & ((1 << index) - 1))
containing_schemes = bin(part_index).count('1')
part_bytes = round(total_bytes * (
self._fraction ** containing_schemes
*
(1 - self._fraction) ** (self._number - containing_schemes)
))
parts.append((part_index, part_bytes))
return parts
def access_request(self, index: int, file: FileID, total_bytes: int) -> AccessRequest:
return AccessRequest(file, self.parts(index, total_bytes))
class WithIndex(PartsGenerator):
def __init__(self, generator: 'NonCorrelatedSchemesGenerator', index: int) -> None:
self._generator: NonCorrelatedSchemesGenerator = generator
self._index: int = index
def parts(self, total_bytes: int) -> List[PartSpec]:
return self._generator.parts(self._index, total_bytes)
def access_request(self, file: FileID, total_bytes: int) -> AccessRequest:
return self._generator.access_request(self._index, file, total_bytes)
def with_index(self, index: int) -> WithIndex:
return self.WithIndex(self, index)
| [
[
[
19,
23
],
[
474,
478
],
[
548,
552
],
[
1417,
1421
]
],
[
[
39,
52
],
[
1090,
1103
],
[
1114,
1127
],
[
1554,
1567
]
],
[
[
54,
60
],
[
1061,
1067
],
[
1525,
1531
]
],
[
[
62,
70
],
[
479,
487
],
[
553,
561
],
[
1422,
1430
]
],
[
[
72,
86
],
[
1184,
1198
]
],
[
[
95,
124
],
[
1307,
1336
]
]
] |
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Monitor
@register(Monitor)
class MonitorSerializer(Serializer):
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.guid),
'status': obj.get_status_display(),
'name': obj.name,
'dateCreated': obj.date_added,
}
| [
[
[
23,
38
]
],
[
[
47,
50
],
[
278,
281
]
],
[
[
87,
97
],
[
187,
197
]
],
[
[
99,
107
],
[
145,
153
]
],
[
[
134,
141
],
[
154,
161
]
],
[
[
169,
186
]
]
] |
import requests
headers = {
'X-Api-Key': '123456789',
}
params = {
'test': '2',
'limit': '100',
'w': '4',
}
response = requests.get('http://localhost:28139/synthetics/api/v3/monitors', params=params, headers=headers)
| [
[
[
7,
15
],
[
138,
146
]
],
[
[
17,
24
],
[
227,
234
]
],
[
[
62,
68
],
[
211,
217
]
],
[
[
127,
135
]
]
] |
import time
from datetime import datetime
from requests import Response
import json
import os
import TunnelSSH
import Helpers
import SettingsStorage
import sys
def create_tunnel(tun: dict):
timeout_time: datetime = datetime.fromisoformat(tun["timeout_time"])
if timeout_time <= datetime.utcnow():
Helpers.log_that("Tunnel already not valid")
return
if tun["connection_type"] == Helpers.ConnectionTypeEnum.ssh_tunnel:
TunnelSSH.create_ssh_tunnel(tun)
def destroy_tunnel(tun: dict):
if tun["connection_type"] == Helpers.ConnectionTypeEnum.ssh_tunnel:
TunnelSSH.destroy_ssh_tunnel(tun)
def destroy_expired_tunnels():
for tun in SettingsStorage.datajson["tunnels"]:
timeout_time: datetime = datetime.fromisoformat(tun["timeout_time"])
if timeout_time <= datetime.utcnow():
Helpers.log_that("A tunnel has expired, destroy")
destroy_tunnel(tun)
def act_on_tunnel(tun: dict):
Helpers.log_that(tun)
tunnel_id: int = tun["id"]
type: Helpers.ConnectionTypeEnum = tun["connection_type"]
state: Helpers.ConnectionStateEnum = tun["connection_state"]
port_to_tunnel: int = tun["port_to_tunnel"]
timeout_time: datetime = tun["timeout_time"]
temporaray_pubkey: str = tun["temporary_pubkey_for_agent_ssh"]
remote_ssh_server: str = tun["remote_ssh_server"]
remote_ssh_fingerprint: str = tun["remote_ssh_fingerprint"]
remote_ssh_username: str = tun["remote_ssh_fingerprint"]
reverse_port: int = tun["reverse_port"]
remote_ssh_port: int = tun["remote_ssh_port"]
temporary_tunnel_privkey: str = tun["temporary_tunnel_privkey"]
if state == Helpers.ConnectionStateEnum.connected:
# first check what should we do:
Helpers.log_that("Requesting connection that should already be connected, ignore")
return
elif state == Helpers.ConnectionStateEnum.requested:
Helpers.log_that("Requesting new connection, act upon that!")
create_tunnel(tun)
elif state == Helpers.ConnectionStateEnum.disconnect_requested:
Helpers.log_that("Requesting to destroy the connection id {}".format(tunnel_id))
destroy_tunnel(tun)
def parse_success_resp(resp: Response):
j: dict = resp.json()
keys = j.keys()
if "message" in keys and len(j["message"]) > 0:
Helpers.log_that(j["message"])
if "tunnels_requesting_action" in keys and len(j["tunnels_requesting_action"]) > 0:
Helpers.log_that("There are {} tunnels requesting action:".format(len(j["tunnels_requesting_action"])))
for tun in j["tunnels_requesting_action"]:
act_on_tunnel(tun)
def main():
# Our small local "db" consisting of Tunnels which are active
while True: # Do this all the time
try:
# First check if this is installed, if not, send the installation data
if not SettingsStorage.is_installed:
resp: Response = Helpers.ReqSession.post(SettingsStorage.server_url + "/agents/agent_install",
json=Helpers.get_install_json())
if resp.status_code == 200:
SettingsStorage.is_installed = True
SettingsStorage.datajson["is_installed"] = True
Helpers.log_that("Successfully Installed!")
else:
msg = ""
if "detail" in resp.json().keys():
msg = resp.json()["detail"]
Helpers.log_that(
"Error when trying to install the agent. Code {}, with message {}".format(str(resp.status_code), msg))
# First check if we have any Tunnel that should be disconnected TBD
destroy_expired_tunnels()
Helpers.remove_expired_ssh_auth_keys()
resp: Response = Helpers.ReqSession.post(SettingsStorage.server_url + "/agents/query", json=Helpers.get_query_json())
if resp.status_code == 200:
parse_success_resp(resp)
else:
msg = ""
if "detail" in resp.json().keys():
msg = resp.json()["detail"]
Helpers.log_that(
"Error when querying the API. Code {}, with message {}".format(str(resp.status_code), msg))
except ValueError as e:
Helpers.log_that("Could not process some value" + str(e.args))
except Exception as e:
Helpers.log_that("Could not connect to server " + str(e.args))
datafile = open(os.path.join(sys.path[0], "data.json"), "w")
json.dump(SettingsStorage.datajson, datafile)
datafile.close()
time.sleep(SettingsStorage.interval_seconds)
if __name__ == "__main__":
# execute only if run as a script
main()
| [
[
[
7,
11
],
[
4716,
4720
]
],
[
[
33,
41
],
[
222,
230
],
[
211,
219
],
[
289,
297
],
[
755,
763
],
[
744,
752
],
[
826,
834
],
[
1221,
1229
]
],
[
[
63,
71
],
[
2233,
2241
],
[
2952,
2960
],
[
3865,
3873
]
],
[
[
79,
83
],
[
4637,
4641
]
],
[
[
91,
93
],
[
4584,
4586
]
],
[
[
101,
110
],
[
457,
466
],
[
603,
612
]
],
[
[
118,
125
],
[
316,
323
],
[
410,
417
],
[
556,
563
],
[
857,
864
],
[
975,
982
],
[
1038,
1045
],
[
1101,
1108
],
[
1677,
1684
],
[
1765,
1772
],
[
1881,
1888
],
[
1928,
1935
],
[
2035,
2042
],
[
2093,
2100
],
[
2350,
2357
],
[
2478,
2485
],
[
2963,
2970
],
[
3093,
3100
],
[
3309,
3316
],
[
3531,
3538
],
[
3808,
3815
],
[
3876,
3883
],
[
3951,
3958
],
[
4216,
4223
],
[
4390,
4397
],
[
4496,
4503
]
],
[
[
133,
148
],
[
685,
700
],
[
2900,
2915
],
[
2987,
3002
],
[
3185,
3200
],
[
3241,
3256
],
[
3900,
3915
],
[
4647,
4662
],
[
4727,
4742
]
],
[
[
156,
159
],
[
4597,
4600
]
],
[
[
167,
180
],
[
1998,
2011
]
],
[
[
496,
510
],
[
919,
933
],
[
2182,
2196
]
],
[
[
643,
666
],
[
3770,
3793
]
],
[
[
945,
958
],
[
2645,
2658
]
],
[
[
2208,
2226
],
[
4033,
4051
]
],
[
[
2670,
2674
],
[
4832,
4836
]
]
] |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from cotede.humanqc.humaneval import HumanQC
| [
[
[
124,
131
]
]
] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin_tempest_plugin.common import constants
from senlin_tempest_plugin.common import utils
from senlin_tempest_plugin.functional import base
class TestScalingPolicy(base.BaseSenlinFunctionalTest):
def setUp(self):
super(TestScalingPolicy, self).setUp()
self.profile_id = utils.create_a_profile(self)
self.addCleanup(utils.delete_a_profile, self, self.profile_id)
self.cluster_id = utils.create_a_cluster(self, self.profile_id,
min_size=0, max_size=5,
desired_capacity=1)
self.addCleanup(utils.delete_a_cluster, self, self.cluster_id)
@decorators.attr(type=['functional'])
@decorators.idempotent_id('6b513a5d-75b6-447a-b95d-e17b84ac9ee8')
def test_scaling_policy(self):
# Create a scaling policy targets on CLUSTER_SCALE_OUT action
spec = constants.spec_scaling_policy
spec['properties'] = {
'event': 'CLUSTER_SCALE_OUT',
'adjustment': {
'type': 'CHANGE_IN_CAPACITY',
'number': 2,
'min_step': 1,
'best_effort': True
}
}
policy_id = utils.create_a_policy(self, spec)
scaleout_policy = utils.get_a_policy(self, policy_id)
self.addCleanup(utils.delete_a_policy, self, scaleout_policy['id'])
# Create a scaling policy targets on CLUSTER_SCALE_IN action
spec['properties'] = {
'event': 'CLUSTER_SCALE_IN',
'adjustment': {
'type': 'CHANGE_IN_PERCENTAGE',
'number': 50,
'min_step': 2,
'best_effort': False
}
}
policy_id = utils.create_a_policy(self, spec)
scalein_policy = utils.get_a_policy(self, policy_id)
self.addCleanup(utils.delete_a_policy, self, scalein_policy['id'])
# Attach scale in/out policies to cluster
for policy in [scaleout_policy, scalein_policy]:
utils.cluster_attach_policy(self, self.cluster_id, policy['id'])
self.addCleanup(utils.cluster_detach_policy, self,
self.cluster_id, policy['id'])
# Scale out cluster without count specified
utils.cluster_scale_out(self, self.cluster_id)
# Verify scale out result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(3, cluster['desired_capacity'])
self.assertEqual(3, len(cluster['nodes']))
# Scale out cluster with count set to 1
utils.cluster_scale_out(self, self.cluster_id, count=1)
# Verify scale out result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(4, cluster['desired_capacity'])
self.assertEqual(4, len(cluster['nodes']))
# Keep scaling out cluster with count set to 2 to
# verify best_effort parameter
utils.cluster_scale_out(self, self.cluster_id, count=2)
# Verify scale out result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(5, cluster['desired_capacity'])
self.assertEqual(5, len(cluster['nodes']))
# Scale in cluster without count specified
utils.cluster_scale_in(self, self.cluster_id)
# Verify scale in result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(3, cluster['desired_capacity'])
self.assertEqual(3, len(cluster['nodes']))
# Scale in cluster without count specified to
# verify min_step parameter
utils.cluster_scale_in(self, self.cluster_id)
# Verify scale in result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(1, cluster['desired_capacity'])
self.assertEqual(1, len(cluster['nodes']))
# Keep scaling in cluster with count set to 2 to
# verify best_effort parameter
res = utils.cluster_scale_in(self, self.cluster_id, count=2,
expected_status='FAILED')
# Verify action result and action failure reason
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(1, cluster['desired_capacity'])
self.assertEqual(1, len(cluster['nodes']))
reason = ("Policy check failure: Failed policy '%s': The target "
"capacity (-1) is less than the cluster's "
"min_size (0).") % scalein_policy['name']
self.assertEqual(reason, res)
| [
[
[
570,
580
],
[
1274,
1284
],
[
1316,
1326
]
],
[
[
623,
632
],
[
1501,
1510
]
],
[
[
674,
679
],
[
883,
888
],
[
936,
941
],
[
1009,
1014
],
[
1221,
1226
],
[
1818,
1823
],
[
1878,
1883
],
[
1938,
1943
],
[
2350,
2355
],
[
2409,
2414
],
[
2469,
2474
],
[
2640,
2645
],
[
2733,
2738
],
[
2888,
2893
],
[
2988,
2993
],
[
3250,
3255
],
[
3359,
3364
],
[
3670,
3675
],
[
3779,
3784
],
[
4044,
4049
],
[
4142,
4147
],
[
4446,
4451
],
[
4544,
4549
],
[
4860,
4865
],
[
5054,
5059
]
],
[
[
725,
729
],
[
756,
760
]
],
[
[
738,
755
],
[
824,
841
]
]
] |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import copy
import os
import re
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import html_validation_service
from core.domain import param_domain
from core.domain import state_domain
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
def mock_get_filename_with_dimensions(filename, unused_exp_id):
return html_validation_service.regenerate_image_filename_using_dimensions(
filename, 490, 120)
class ExplorationChangeTests(test_utils.GenericTestBase):
def test_exp_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({'invalid': 'data'})
def test_exp_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
exp_domain.ExplorationChange({'cmd': 'invalid'})
def test_exp_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'property_name': 'content',
'old_value': 'old_value'
})
def test_exp_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name',
'invalid': 'invalid'
})
def test_exp_change_object_with_invalid_exploration_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_exploration_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_invalid_state_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_state_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_create_new(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'create_new',
'category': 'category',
'title': 'title'
})
self.assertEqual(exp_change_object.cmd, 'create_new')
self.assertEqual(exp_change_object.category, 'category')
self.assertEqual(exp_change_object.title, 'title')
def test_exp_change_object_with_add_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'add_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_rename_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name'
})
self.assertEqual(exp_change_object.cmd, 'rename_state')
self.assertEqual(exp_change_object.old_state_name, 'old_state_name')
self.assertEqual(exp_change_object.new_state_name, 'new_state_name')
def test_exp_change_object_with_delete_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'delete_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_edit_state_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'content',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_state_property')
self.assertEqual(exp_change_object.state_name, 'state_name')
self.assertEqual(exp_change_object.property_name, 'content')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_edit_exploration_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_exploration_property')
self.assertEqual(exp_change_object.property_name, 'title')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_migrate_states_schema_to_latest_version(
self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'migrate_states_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
exp_change_object.cmd, 'migrate_states_schema_to_latest_version')
self.assertEqual(exp_change_object.from_version, 'from_version')
self.assertEqual(exp_change_object.to_version, 'to_version')
def test_exp_change_object_with_revert_commit(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 'version_number'
})
self.assertEqual(
exp_change_object.cmd,
exp_models.ExplorationModel.CMD_REVERT_COMMIT)
self.assertEqual(exp_change_object.version_number, 'version_number')
def test_to_dict(self):
exp_change_dict = {
'cmd': 'create_new',
'title': 'title',
'category': 'category'
}
exp_change_object = exp_domain.ExplorationChange(exp_change_dict)
self.assertEqual(exp_change_object.to_dict(), exp_change_dict)
class ExplorationVersionsDiffDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration versions difference domain object."""
def setUp(self):
super(ExplorationVersionsDiffDomainUnitTests, self).setUp()
self.exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, self.exp_id,
assets_list)
self.exploration = exp_fetchers.get_exploration_by_id(self.exp_id)
def test_correct_creation_of_version_diffs(self):
# Rename a state.
self.exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(
exp_versions_diff.old_to_new_state_names, {
'Home': 'Renamed state'
})
self.exploration.version += 1
# Add a state.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Delete state.
self.exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, ['New state'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition and multiple renames.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
self.exploration.rename_state('New state', 'New state2')
self.exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition, rename and deletion.
self.exploration.add_states(['New state 2'])
self.exploration.rename_state('New state 2', 'Renamed state 2')
self.exploration.delete_state('Renamed state 2')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state 2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'Renamed state 2'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 2'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test multiple renames and deletion.
self.exploration.rename_state('New state3', 'Renamed state 3')
self.exploration.rename_state('Renamed state 3', 'Renamed state 4')
self.exploration.delete_state('Renamed state 4')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state3',
'new_state_name': 'Renamed state 3'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Renamed state 3',
'new_state_name': 'Renamed state 4'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 4'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(
exp_versions_diff.deleted_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
def test_cannot_create_exploration_change_with_invalid_change_dict(self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({
'invalid_cmd': 'invalid'
})
def test_cannot_create_exploration_change_with_invalid_cmd(self):
with self.assertRaisesRegexp(
Exception, 'Command invalid_cmd is not allowed'):
exp_domain.ExplorationChange({
'cmd': 'invalid_cmd'
})
def test_cannot_create_exploration_change_with_invalid_state_property(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'state_name': '',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_state_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'invalid_property',
'state_name': '',
'new_value': ''
})
def test_cannot_create_exploration_change_with_invalid_exploration_property(
self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_exploration_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'invalid_property',
'new_value': ''
})
def test_revert_exploration_commit(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 1
})
self.assertEqual(exp_change.version_number, 1)
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 2
})
self.assertEqual(exp_change.version_number, 2)
class ExpVersionReferenceTests(test_utils.GenericTestBase):
def test_create_exp_version_reference_object(self):
exp_version_reference = exp_domain.ExpVersionReference('exp_id', 1)
self.assertEqual(
exp_version_reference.to_dict(), {
'exp_id': 'exp_id',
'version': 1
})
def test_validate_exp_version(self):
with self.assertRaisesRegexp(
Exception,
'Expected version to be an int, received invalid_version'):
exp_domain.ExpVersionReference('exp_id', 'invalid_version')
def test_validate_exp_id(self):
with self.assertRaisesRegexp(
Exception, 'Expected exp_id to be a str, received 0'):
exp_domain.ExpVersionReference(0, 1)
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = state_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = state_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome_dict = init_state.interaction.default_outcome.to_dict()
default_outcome_dict['dest'] = exploration.init_state_name
init_state.update_interaction_default_outcome(default_outcome_dict)
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
old_answer_groups = copy.deepcopy(init_state.interaction.answer_groups)
old_answer_groups.append({
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
})
init_state.update_interaction_answer_groups(old_answer_groups)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = state_domain.SubtitledHtml('feedback_1', '')
exploration.validate()
outcome.labelled_as_correct = 'hello'
self._assert_validation_error(
exploration, 'The "labelled_as_correct" field should be a boolean')
# Test that labelled_as_correct must be False for self-loops, and that
# this causes a strict validation failure but not a normal validation
# failure.
outcome.labelled_as_correct = True
with self.assertRaisesRegexp(
Exception, 'is labelled correct but is a self-loop.'
):
exploration.validate(strict=True)
exploration.validate()
outcome.labelled_as_correct = False
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = [param_domain.ParamChange(
0, 'generator_id', {})]
self._assert_validation_error(
exploration,
'Expected param_change name to be a string, received 0')
outcome.param_changes = []
exploration.validate()
outcome.refresher_exploration_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome refresher_exploration_id to be a string')
outcome.refresher_exploration_id = None
exploration.validate()
outcome.refresher_exploration_id = 'valid_string'
exploration.validate()
outcome.missing_prerequisite_skill_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome missing_prerequisite_skill_id to be a string')
outcome.missing_prerequisite_skill_id = None
exploration.validate()
outcome.missing_prerequisite_skill_id = 'valid_string'
exploration.validate()
# Test that refresher_exploration_id must be None for non-self-loops.
new_state_name = 'New state'
exploration.add_states([new_state_name])
outcome.dest = new_state_name
outcome.refresher_exploration_id = 'another_string'
self._assert_validation_error(
exploration,
'has a refresher exploration ID, but is not a self-loop')
outcome.refresher_exploration_id = None
exploration.validate()
exploration.delete_state(new_state_name)
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
init_state.update_interaction_default_outcome(None)
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
init_state.update_interaction_answer_groups([])
exploration.validate()
# Restore a valid exploration.
interaction.id = 'TextInput'
answer_groups_list = [
answer_group.to_dict() for answer_group in answer_groups]
init_state.update_interaction_answer_groups(answer_groups_list)
init_state.update_interaction_default_outcome(default_outcome.to_dict())
exploration.validate()
init_state.update_interaction_solution({
'answer_is_exclusive': True,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': 'hello_world is a string'
}
})
self._assert_validation_error(
exploration,
re.escape('Hint(s) must be specified if solution is specified'))
init_state.update_interaction_solution(None)
interaction.hints = {}
self._assert_validation_error(
exploration, 'Expected hints to be a list')
interaction.hints = []
# Validate AnswerGroup.
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 1
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected tagged skill misconception id to be a str, received 1')
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id':
'invalid_tagged_skill_misconception_id'
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received '
'invalid_tagged_skill_misconception_id')
init_state.interaction.answer_groups[0].rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
first_answer_group = init_state.interaction.answer_groups[0]
first_answer_group.tagged_skill_misconception_id = None
first_answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule or training data for each'
' answer group.')
exploration.states = {
exploration.init_state_name: (
state_domain.State.create_default_state(
exploration.init_state_name))
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.update_interaction_default_outcome(None)
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration = exp_fetchers.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_get_trainable_states_dict(self):
"""Test the get_trainable_states_dict() method."""
exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=False)
old_states = exp_fetchers.get_exploration_from_model(
exploration_model).states
exploration = exp_fetchers.get_exploration_by_id(exp_id)
# Rename a state to add it in unchanged answer group.
exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [],
'state_names_with_unchanged_answer_groups': ['Renamed state']
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Modify answer groups to trigger change in answer groups.
state = exploration.states['Renamed state']
exploration.states['Renamed state'].interaction.answer_groups.insert(
3, state.interaction.answer_groups[3])
answer_groups = []
for answer_group in state.interaction.answer_groups:
answer_groups.append(answer_group.to_dict())
change_list = [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Renamed state',
'property_name': 'answer_groups',
'new_value': answer_groups
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Add new state to trigger change in answer groups.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'New state', 'Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Delete state.
exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Test addition and multiple renames.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
exploration.rename_state('New state', 'New state2')
exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'Renamed state', 'New state3'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_has_state_name(self):
"""Test for has_state_name."""
demo = exp_domain.Exploration.create_default_exploration('0')
state_names = demo.states.keys()
self.assertEqual(state_names, ['Introduction'])
self.assertEqual(demo.has_state_name('Introduction'), True)
self.assertEqual(demo.has_state_name('Fake state name'), False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
def test_cannot_create_demo_exp_with_invalid_param_changes(self):
demo_exp = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo_exp.to_dict()
new_state = state_domain.State.create_default_state('new_state_name')
new_state.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'myParam',
'generator_id': 'RandomSelector'
})]
demo_dict['states']['new_state_name'] = new_state.to_dict()
demo_dict['param_specs'] = {
'ParamSpec': {'obj_type': 'UnicodeString'}
}
with self.assertRaisesRegexp(
Exception,
'Parameter myParam was used in a state but not '
'declared in the exploration param_specs.'):
exp_domain.Exploration.from_dict(demo_dict)
def test_validate_exploration_category(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.category = 1
with self.assertRaisesRegexp(
Exception, 'Expected category to be a string, received 1'):
exploration.validate()
def test_validate_exploration_objective(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.objective = 1
with self.assertRaisesRegexp(
Exception, 'Expected objective to be a string, received 1'):
exploration.validate()
def test_validate_exploration_blurb(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.blurb = 1
with self.assertRaisesRegexp(
Exception, 'Expected blurb to be a string, received 1'):
exploration.validate()
def test_validate_exploration_language_code(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.language_code = 1
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, received 1'):
exploration.validate()
def test_validate_exploration_author_notes(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.author_notes = 1
with self.assertRaisesRegexp(
Exception, 'Expected author_notes to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states = 1
with self.assertRaisesRegexp(
Exception, 'Expected states to be a dict, received 1'):
exploration.validate()
def test_validate_exploration_outcome_dest(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = None
with self.assertRaisesRegexp(
Exception, 'Every outcome should have a destination.'):
exploration.validate()
def test_validate_exploration_outcome_dest_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = 1
with self.assertRaisesRegexp(
Exception, 'Expected outcome dest to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states_schema_version(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states_schema_version = None
with self.assertRaisesRegexp(
Exception, 'This exploration has no states schema version.'):
exploration.validate()
def test_validate_exploration_auto_tts_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.auto_tts_enabled = 1
with self.assertRaisesRegexp(
Exception, 'Expected auto_tts_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_correctness_feedback_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.correctness_feedback_enabled = 1
with self.assertRaisesRegexp(
Exception,
'Expected correctness_feedback_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_param_specs(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_specs = {
1: param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
with self.assertRaisesRegexp(
Exception, 'Expected parameter name to be a string, received 1'):
exploration.validate()
def test_validate_exploration_param_changes_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = 1
with self.assertRaisesRegexp(
Exception, 'Expected param_changes to be a list, received 1'):
exploration.validate()
def test_validate_exploration_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'invalid',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'No parameter named \'invalid\' exists in this '
'exploration'):
exploration.validate()
def test_validate_exploration_reserved_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'all',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'The exploration-level parameter with name \'all\' is '
'reserved. Please choose a different name.'):
exploration.validate()
def test_validate_exploration_is_non_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.add_states(['DEF'])
default_outcome_dict = {
'dest': 'DEF',
'feedback': {
'content_id': 'default_outcome',
'html': '<p>Default outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': 'refresher_exploration_id',
'missing_prerequisite_skill_id': None
}
exploration.init_state.update_interaction_default_outcome(
default_outcome_dict)
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction has a refresher '
'exploration ID, but is not a self-loop.'):
exploration.validate()
def test_validate_exploration_answer_group_parameter(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
param_changes = [{
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'ParamChange',
'generator_id': 'RandomSelector'
}]
answer_groups = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': param_changes,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
exploration.init_state.update_interaction_answer_groups(answer_groups)
with self.assertRaisesRegexp(
Exception,
'The parameter ParamChange was used in an answer group, '
'but it does not exist in this exploration'):
exploration.validate()
def test_verify_all_states_reachable(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'owner_id')
exploration.validate()
exploration.add_states(['End'])
end_state = exploration.states['End']
end_state.update_interaction_id('EndExploration')
end_state.update_interaction_default_outcome(None)
with self.assertRaisesRegexp(
Exception,
'Please fix the following issues before saving this exploration: '
'1. The following states are not reachable from the initial state: '
'End 2. It is impossible to complete the exploration from the '
'following states: Introduction'):
exploration.validate(strict=True)
def test_update_init_state_name_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
exploration.update_init_state_name('End')
self.assertEqual(exploration.init_state_name, 'End')
with self.assertRaisesRegexp(
Exception,
'Invalid new initial state name: invalid_state;'):
exploration.update_init_state_name('invalid_state')
def test_rename_state_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
self.assertTrue(exploration.states.get('End'))
self.assertFalse(exploration.states.get('new state name'))
exploration.rename_state('End', 'new state name')
self.assertFalse(exploration.states.get('End'))
self.assertTrue(exploration.states.get('new state name'))
with self.assertRaisesRegexp(
Exception, 'State invalid_state does not exist'):
exploration.rename_state('invalid_state', 'new state name')
def test_default_outcome_is_labelled_incorrect_for_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
exploration.validate(strict=True)
(exploration.init_state.interaction.default_outcome
.labelled_as_correct) = True
(exploration.init_state.interaction.default_outcome
.dest) = exploration.init_state_name
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction is labelled '
'correct but is a self-loop'):
exploration.validate(strict=True)
class ExplorationSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSummaryTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
exploration = exp_domain.Exploration.create_default_exploration('eid')
exp_services.save_new_exploration(owner_id, exploration)
self.exp_summary = exp_fetchers.get_exploration_summary_by_id('eid')
def test_validation_passes_with_valid_properties(self):
self.exp_summary.validate()
def test_validation_fails_with_invalid_title(self):
self.exp_summary.title = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected title to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_category(self):
self.exp_summary.category = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected category to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_objective(self):
self.exp_summary.objective = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected objective to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_language_code(self):
self.exp_summary.language_code = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_language_code(self):
self.exp_summary.language_code = 'invalid'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tags(self):
self.exp_summary.tags = 'tags'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected \'tags\' to be a list, received tags'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tag_in_tags(self):
self.exp_summary.tags = ['tag', 2]
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each tag in \'tags\' to be a string, received \'2\''):
self.exp_summary.validate()
def test_validation_fails_with_empty_tag_in_tags(self):
self.exp_summary.tags = ['', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, 'Tags should be non-empty'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_characters_in_tag(self):
self.exp_summary.tags = ['123', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Tags should only contain lowercase '
'letters and spaces, received \'123\'')):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_start(self):
self.exp_summary.tags = [' ab', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \' ab\''):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_end(self):
self.exp_summary.tags = ['ab ', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \'ab \''):
self.exp_summary.validate()
def test_validation_fails_with_adjacent_whitespace_in_tag(self):
self.exp_summary.tags = ['a b', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Adjacent whitespace in tags should '
'be collapsed, received \'a b\'')):
self.exp_summary.validate()
def test_validation_fails_with_duplicate_tags(self):
self.exp_summary.tags = ['abc', 'abc', 'ab']
with self.assertRaisesRegexp(
utils.ValidationError, 'Some tags duplicate each other'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_type(self):
self.exp_summary.ratings = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected ratings to be a dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_keys(self):
self.exp_summary.ratings = {'1': 0, '10': 1}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected ratings to have keys: 1, 2, 3, 4, 5, received 1, 10'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_type_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': 'one', '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected value to be int, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': -1, '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected value to be non-negative, received -1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_scaled_average_rating(self):
self.exp_summary.scaled_average_rating = 'one'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected scaled_average_rating to be float, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_status(self):
self.exp_summary.status = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_community_owned(self):
self.exp_summary.community_owned = '1'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected community_owned to be bool, received 1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributors_summary(self):
self.exp_summary.contributors_summary = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributors_summary to be dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_ids_type(self):
self.exp_summary.owner_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected owner_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_id_in_owner_ids(self):
self.exp_summary.owner_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in owner_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_ids_type(self):
self.exp_summary.editor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected editor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_id_in_editor_ids(self):
self.exp_summary.editor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in editor_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_ids_type(self):
self.exp_summary.voice_artist_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected voice_artist_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_id_in_voice_artists_ids(
self):
self.exp_summary.voice_artist_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in voice_artist_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_ids_type(self):
self.exp_summary.viewer_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected viewer_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_id_in_viewer_ids(self):
self.exp_summary.viewer_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in viewer_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_ids_type(self):
self.exp_summary.contributor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_id_in_contributor_ids(
self):
self.exp_summary.contributor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in contributor_ids to be string, received 2'):
self.exp_summary.validate()
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, self.SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
# Verify SAMPLE_UNTITLED_YAML_CONTENT can be converted to an exploration
# without error.
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_YAML_CONTENT)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_STATE_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
language:
value: ''
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: CodeRepl
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: END
feedback:
- Correct!
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
YAML_CONTENT_V13 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 13
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 10
tags: []
title: Title
""")
YAML_CONTENT_V14 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 14
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: []
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 11
tags: []
title: Title
""")
YAML_CONTENT_V15 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 15
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 12
tags: []
title: Title
""")
YAML_CONTENT_V16 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 16
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V17 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 17
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V18 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 18
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints:
- hint_text: ''
id: TextInput
solution:
explanation: ''
answer_is_exclusive: False
correct_answer: Answer
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V19 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 19
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 14
tags: []
title: Title
""")
YAML_CONTENT_V20 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 20
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 15
tags: []
title: Title
""")
YAML_CONTENT_V21 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 21
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
param_changes: []
states_schema_version: 16
tags: []
title: Title
""")
YAML_CONTENT_V22 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 22
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 17
tags: []
title: Title
""")
YAML_CONTENT_V23 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 23
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 18
tags: []
title: Title
""")
YAML_CONTENT_V24 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 24
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 19
tags: []
title: Title
""")
YAML_CONTENT_V25 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 25
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 20
tags: []
title: Title
""")
YAML_CONTENT_V26 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: Congratulations, you have finished!
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: Title
""")
YAML_CONTENT_V27 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V28 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 28
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 23
tags: []
title: Title
""")
YAML_CONTENT_V29 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 29
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
imageAndRegions:
value:
imagePath: s1ImagePath.png
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
param_changes: []
states_schema_version: 24
tags: []
title: Title
""")
YAML_CONTENT_V30 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 30
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 25
tags: []
title: Title
""")
YAML_CONTENT_V31 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 31
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
new_content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 26
tags: []
title: Title
""")
YAML_CONTENT_V32 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 32
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 27
tags: []
title: Title
""")
YAML_CONTENT_V33 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 33
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 28
tags: []
title: Title
""")
YAML_CONTENT_V34 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 34
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 29
tags: []
title: Title
""")
YAML_CONTENT_V35 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V35
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_cannot_load_from_v6_with_invalid_handler_name(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: invalid_handler_name
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with a name '
'\'submit\' in states v3 to v4 conversion process. '):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_rule(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: invalid_rule
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert default and atomic '
'rules in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_subject(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
subject: invalid_subject
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with an \'answer\' '
'subject in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_interaction_id(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: invalid_id
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Trying to migrate exploration containing non-existent '
'interaction ID'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
language:
value: python
placeholder:
value: ''
postCode:
value: ''
preCode:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: CodeRepl
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Correct!</p>
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
hint_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
hint_1: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v13(self):
"""Test direct loading from a v13 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V13)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v14(self):
"""Test direct loading from a v14 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V14)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v15(self):
"""Test direct loading from a v15 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V15)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v16(self):
"""Test direct loading from a v16 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V16)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v17(self):
"""Test direct loading from a v17 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V17)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v18(self):
"""Test direct loading from a v18 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: ''
id: TextInput
solution:
answer_is_exclusive: false
correct_answer: Answer
explanation:
content_id: solution
html: ''
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
hint_1: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
hint_1: {}
solution: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V18)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v19(self):
"""Test direct loading from a v19 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V19)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v20(self):
"""Test direct loading from a v20 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V20)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v21(self):
"""Test direct loading from a v21 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
allowImproperFraction:
value: true
allowNonzeroIntegerPart:
value: true
customPlaceholder:
value: ''
placeholder:
value: ''
requireSimplestForm:
value: false
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V21)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v22(self):
"""Test direct loading from a v22 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V22)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v23(self):
"""Test direct loading from a v23 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V23)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v24(self):
"""Test direct loading from a v24 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V24)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v25(self):
"""Test direct loading from a v25 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V25)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v26(self):
"""Test direct loading from a v26 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v27(self):
"""Test direct loading from a v27 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v28(self):
"""Test direct loading from a v28 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V28)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v29(self):
"""Test direct loading from a v29 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
highlightRegionsOnHover:
value: false
imageAndRegions:
value:
imagePath: s1ImagePath_height_120_width_120.png
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V29)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v30(self):
"""Test direct loading from a v30 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V30)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v31(self):
"""Test direct loading from a v31 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V31)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v32(self):
"""Test direct loading from a v32 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V32)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v33(self):
"""Test direct loading from a v33 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V33)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_cannot_load_from_yaml_with_no_schema_version(self):
sample_yaml_content = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception, 'Invalid YAML file: no schema version specified.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
def test_cannot_load_from_yaml_with_invalid_schema_version(self):
sample_yaml_content = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 0
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1 to v%s exploration YAML files '
'at present.' % exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
class HTMLMigrationUnitTests(test_utils.GenericTestBase):
"""Test HTML migration."""
YAML_CONTENT_V26_TEXTANGULAR = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
solution: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: Default <p>outcome</p> for state1
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: This is <i>solution</i> for state1
param_changes: []
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p>this <i>is </i>state2
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <div>Outcome1 for state2</div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: <pre>Outcome2 <br>for state2</pre>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is </p>value1 <br>for MultipleChoice
- This is value2<span> for <br>MultipleChoice</span>
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is<div> html1<b> for </b></div>state2</p>
- hint_content:
content_id: hint_2
html: Here is link 2 <oppia-noninteractive-link
text-with-value="&quot;discussion forum&quot;"
url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;">
</oppia-noninteractive-link>
id: MultipleChoiceInput
solution: null
param_changes: []
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: Here is the image1 <i><oppia-noninteractive-image
caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></i>Here is the image2
<div><oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- This <span>is value1 for </span>ItemSelectionInput
rule_type: Equals
- inputs:
x:
- This is value3 for ItemSelectionInput
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- This <span>is value1 for </span>ItemSelection
- This <code>is value2</code> for ItemSelection
- This is value3 for ItemSelection
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: title
""")
# pylint: disable=line-too-long
YAML_CONTENT_V35_IMAGE_DIMENSIONS = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: <p>Default </p><p>outcome</p><p> for state1</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: <p>This is <em>solution</em> for state1</p>
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
solution: {}
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p><p>this <em>is </em>state2</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Outcome1 for state2</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: "<pre>Outcome2 \\nfor state2</pre>"
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is </p><p>value1 <br>for MultipleChoice</p>
- <p>This is value2 for <br>MultipleChoice</p>
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is</p><p> html1<strong> for </strong></p><p>state2</p>
- hint_content:
content_id: hint_2
html: <p>Here is link 2 <oppia-noninteractive-link text-with-value="&quot;discussion
forum&quot;" url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;"> </oppia-noninteractive-link></p>
id: MultipleChoiceInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Here is the image1 </p><oppia-noninteractive-image alt-with-value="&quot;&quot;"
caption-with-value="&quot;&quot;" filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image><p>Here is the image2 </p><oppia-noninteractive-image
alt-with-value="&quot;&quot;" caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- <p>This is value1 for ItemSelectionInput</p>
rule_type: Equals
- inputs:
x:
- <p>This is value3 for ItemSelectionInput</p>
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is value1 for ItemSelection</p>
- <p>This is value2 for ItemSelection</p>
- <p>This is value3 for ItemSelection</p>
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
states_schema_version: 30
tags: []
title: title
""")
YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <p><oppia-noninteractive-image filepath-with-value="&quot;random.png&quot;"></oppia-noninteractive-image>Hello this
is test case to check image tag inside p tag</p>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V35_WITH_IMAGE_CAPTION = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;random_height_490_width_120.png&quot;"></oppia-noninteractive-image><p>Hello
this is test case to check image tag inside p tag</p>
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
# pylint: enable=line-too-long
def test_load_from_v26_textangular(self):
"""Test direct loading from a v26 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26_TEXTANGULAR)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V35_IMAGE_DIMENSIONS)
def test_load_from_v27_without_image_caption(self):
"""Test direct loading from a v27 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V35_WITH_IMAGE_CAPTION)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
"""Gets the default state dict of the exploration."""
return {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': content_str,
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
},
'solicit_answer_details': False,
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': {
'content_id': feconf.DEFAULT_OUTCOME_CONTENT_ID,
'html': ''
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'hints': [],
'id': None,
'solution': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'objective': feconf.DEFAULT_EXPLORATION_OBJECTIVE,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'language_code': 'en',
'correctness_feedback_enabled': False,
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
class HtmlCollectionTests(test_utils.GenericTestBase):
"""Test method to obtain all html strings."""
def test_all_html_strings_are_collected(self):
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title='title', category='category')
exploration.add_states(['state1', 'state2', 'state3', 'state4'])
state1 = exploration.states['state1']
state2 = exploration.states['state2']
state3 = exploration.states['state3']
state4 = exploration.states['state4']
content1_dict = {
'content_id': 'content',
'html': '<blockquote>Hello, this is state1</blockquote>'
}
content2_dict = {
'content_id': 'content',
'html': '<pre>Hello, this is state2</pre>'
}
content3_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state3</p>'
}
content4_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state4</p>'
}
state1.update_content(
state_domain.SubtitledHtml.from_dict(content1_dict))
state2.update_content(
state_domain.SubtitledHtml.from_dict(content2_dict))
state3.update_content(
state_domain.SubtitledHtml.from_dict(content3_dict))
state4.update_content(
state_domain.SubtitledHtml.from_dict(content4_dict))
state1.update_interaction_id('TextInput')
state2.update_interaction_id('MultipleChoiceInput')
state3.update_interaction_id('ItemSelectionInput')
state4.update_interaction_id('DragAndDropSortInput')
customization_args_dict1 = {
'placeholder': {'value': ''},
'rows': {'value': 1}
}
customization_args_dict2 = {
'choices': {'value': [
'<p>This is value1 for MultipleChoice</p>',
'<p>This is value2 for MultipleChoice</p>'
]}
}
customization_args_dict3 = {
'choices': {'value': [
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
'<p>This is value3 for ItemSelection</p>'
]}
}
customization_args_dict4 = {
'choices': {'value': [
'<p>This is value1 for DragAndDropSortInput</p>',
'<p>This is value2 for DragAndDropSortInput</p>',
]}
}
state1.update_interaction_customization_args(customization_args_dict1)
state2.update_interaction_customization_args(customization_args_dict2)
state3.update_interaction_customization_args(customization_args_dict3)
state4.update_interaction_customization_args(customization_args_dict4)
default_outcome_dict1 = {
'dest': 'state2',
'feedback': {
'content_id': 'default_outcome',
'html': '<p>Default outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
}
state1.update_interaction_default_outcome(default_outcome_dict1)
hint_list2 = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}, {
'hint_content': {
'content_id': 'hint_2',
'html': '<p>Hello, this is html2 for state2</p>'
}
}]
state2.update_interaction_hints(hint_list2)
solution_dict1 = {
'interaction_id': '',
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state1.update_interaction_solution(solution_dict1)
answer_group_list2 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}, {
'rule_type': 'Equals',
'inputs': {'x': 1}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome1 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}, {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}],
'outcome': {
'dest': 'state3',
'feedback': {
'content_id': 'feedback_2',
'html': '<p>Outcome2 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
answer_group_list3 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelectionInput</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value3 for ItemSelectionInput</p>'
]}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome for state3</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state2.update_interaction_answer_groups(answer_group_list2)
state3.update_interaction_answer_groups(answer_group_list3)
expected_html_list = [
'',
'',
'<pre>Hello, this is state2</pre>',
'<p>Outcome1 for state2</p>',
'<p>Outcome2 for state2</p>',
'',
'<p>Hello, this is html1 for state2</p>',
'<p>Hello, this is html2 for state2</p>',
'<p>This is value1 for MultipleChoice</p>',
'<p>This is value2 for MultipleChoice</p>',
'<blockquote>Hello, this is state1</blockquote>',
'<p>Default outcome for state1</p>',
'<p>This is solution for state1</p>',
'<p>Hello, this is state3</p>',
'<p>Outcome for state3</p>',
'<p>This is value1 for ItemSelectionInput</p>',
'<p>This is value3 for ItemSelectionInput</p>',
'',
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
'<p>This is value3 for ItemSelection</p>',
'<p>Hello, this is state4</p>',
'',
'<p>This is value1 for DragAndDropSortInput</p>',
'<p>This is value2 for DragAndDropSortInput</p>'
]
actual_outcome_list = exploration.get_all_html_content_strings()
self.assertEqual(actual_outcome_list, expected_html_list)
| [
[
[
703,
707
],
[
9381,
9385
],
[
10611,
10615
],
[
20384,
20388
],
[
38115,
38119
],
[
39495,
39499
]
],
[
[
715,
717
],
[
8141,
8143
],
[
35643,
35645
]
],
[
[
725,
727
],
[
28454,
28456
]
],
[
[
753,
763
],
[
1538,
1548
],
[
1764,
1774
],
[
2066,
2076
],
[
2470,
2480
],
[
2977,
2987
],
[
3472,
3482
],
[
3819,
3829
],
[
4228,
4238
],
[
4558,
4568
],
[
5034,
5044
],
[
5377,
5387
],
[
6058,
6068
],
[
6665,
6675
],
[
7186,
7196
],
[
7732,
7742
],
[
8710,
8720
],
[
8901,
8911
],
[
9473,
9483
],
[
9616,
9626
],
[
10018,
10028
],
[
10163,
10173
],
[
10834,
10844
],
[
10948,
10958
],
[
11112,
11122
],
[
11306,
11316
],
[
11862,
11872
],
[
11977,
11987
],
[
12148,
12158
],
[
12299,
12309
],
[
12865,
12875
],
[
13035,
13045
],
[
13210,
13220
],
[
13361,
13371
],
[
13865,
13875
],
[
14135,
14145
],
[
14321,
14331
],
[
14371,
14381
],
[
14436,
14446
],
[
14594,
14604
],
[
14814,
14824
],
[
14868,
14878
],
[
15160,
15170
],
[
15210,
15220
],
[
15376,
15386
],
[
15602,
15612
],
[
15656,
15666
],
[
15866,
15876
],
[
16084,
16094
],
[
16431,
16441
],
[
16816,
16826
],
[
17030,
17040
],
[
17435,
17445
],
[
32604,
32614
],
[
36370,
36380
],
[
36729,
36739
],
[
37351,
37361
],
[
37761,
37771
],
[
38202,
38212
],
[
38543,
38553
],
[
38842,
38852
],
[
39155,
39165
],
[
39703,
39713
],
[
39817,
39827
],
[
39981,
39991
],
[
40374,
40384
],
[
40673,
40683
],
[
40793,
40803
],
[
40918,
40928
],
[
41116,
41126
],
[
41574,
41584
],
[
41688,
41698
],
[
42055,
42065
],
[
42326,
42336
],
[
43143,
43153
],
[
56226,
56236
],
[
65926,
65936
],
[
66304,
66314
],
[
66631,
66641
],
[
66806,
66816
],
[
66928,
66938
],
[
67083,
67093
],
[
67303,
67313
],
[
67516,
67526
],
[
68168,
68178
],
[
68350,
68360
],
[
68732,
68742
],
[
68902,
68912
],
[
69077,
69087
],
[
142344,
142354
],
[
142635,
142645
],
[
142926,
142936
],
[
143217,
143227
],
[
143508,
143518
],
[
143799,
143809
],
[
146064,
146074
],
[
148240,
148250
],
[
150458,
150468
],
[
152620,
152630
],
[
152843,
152853
],
[
153134,
153144
],
[
156655,
156665
],
[
156942,
156952
],
[
157206,
157216
],
[
160702,
160712
],
[
160960,
160970
],
[
161224,
161234
],
[
161488,
161498
],
[
161752,
161762
],
[
162016,
162026
],
[
165673,
165683
],
[
165931,
165941
],
[
166195,
166205
],
[
169784,
169794
],
[
170042,
170052
],
[
170306,
170316
],
[
170570,
170580
],
[
170834,
170844
],
[
171098,
171108
],
[
171362,
171372
],
[
171626,
171636
],
[
175168,
175178
],
[
175426,
175436
],
[
175690,
175700
],
[
175954,
175964
],
[
176218,
176228
],
[
178551,
178561
],
[
180903,
180913
],
[
180967,
180977
],
[
201372,
201382
],
[
201941,
201951
],
[
202387,
202397
],
[
205113,
205123
],
[
205804,
205814
]
],
[
[
788,
800
],
[
8495,
8507
],
[
34743,
34755
],
[
36082,
36094
],
[
36183,
36195
],
[
56375,
56387
]
],
[
[
825,
837
],
[
8320,
8332
],
[
35822,
35834
],
[
56291,
56303
]
],
[
[
862,
885
],
[
1206,
1229
],
[
201185,
201208
],
[
201754,
201777
]
],
[
[
910,
922
],
[
22881,
22893
],
[
24415,
24427
],
[
32099,
32111
],
[
32373,
32385
],
[
42533,
42545
],
[
48433,
48445
],
[
49412,
49424
],
[
50144,
50156
]
],
[
[
947,
959
],
[
17975,
17987
],
[
18190,
18202
],
[
23494,
23506
],
[
31365,
31377
],
[
42440,
42452
],
[
206728,
206740
],
[
206824,
206836
],
[
206920,
206932
],
[
207016,
207028
]
],
[
[
986,
992
],
[
1071,
1077
],
[
1102,
1108
]
],
[
[
1016,
1026
],
[
1333,
1343
],
[
7896,
7906
],
[
16313,
16323
],
[
17102,
17112
],
[
55976,
55986
],
[
65682,
65692
],
[
67670,
67680
],
[
69260,
69270
],
[
181106,
181116
],
[
202174,
202184
],
[
204945,
204955
],
[
205650,
205660
]
],
[
[
1034,
1040
],
[
8167,
8173
],
[
8388,
8394
],
[
35669,
35675
],
[
35890,
35896
],
[
42143,
42149
],
[
68013,
68019
],
[
204296,
204302
],
[
204385,
204391
],
[
204463,
204469
],
[
204540,
204546
],
[
204599,
204605
],
[
203691,
203697
]
],
[
[
1048,
1053
],
[
1468,
1473
],
[
1694,
1699
],
[
1932,
1937
],
[
2360,
2365
],
[
2819,
2824
],
[
3320,
3325
],
[
8244,
8249
],
[
34870,
34875
],
[
35069,
35074
],
[
35277,
35282
],
[
35746,
35751
],
[
56664,
56669
],
[
56933,
56938
],
[
57207,
57212
],
[
57490,
57495
],
[
57787,
57792
],
[
58030,
58035
],
[
58311,
58316
],
[
58606,
58611
],
[
58866,
58871
],
[
59207,
59212
],
[
59511,
59516
],
[
59822,
59827
],
[
60156,
60161
],
[
60404,
60409
],
[
60679,
60684
],
[
61023,
61028
],
[
61325,
61330
],
[
61629,
61634
],
[
61909,
61914
],
[
62181,
62186
],
[
62480,
62485
],
[
62767,
62772
],
[
63050,
63055
],
[
63341,
63346
],
[
63640,
63645
],
[
63944,
63949
],
[
64281,
64286
],
[
64579,
64584
],
[
64878,
64883
],
[
65180,
65185
],
[
65512,
65517
]
],
[
[
1056,
1066
],
[
7236,
7246
],
[
7415,
7425
],
[
15916,
15926
],
[
16134,
16144
],
[
35994,
36004
]
],
[
[
1135,
1168
],
[
201254,
201287
],
[
201823,
201856
]
],
[
[
1310,
1332
]
],
[
[
7857,
7895
],
[
8027,
8065
]
],
[
[
16288,
16312
]
],
[
[
17075,
17101
]
],
[
[
55952,
55975
],
[
56041,
56064
]
],
[
[
65660,
65681
]
],
[
[
67638,
67669
]
],
[
[
69235,
69259
]
],
[
[
181083,
181105
]
],
[
[
202154,
202173
]
],
[
[
204920,
204944
]
],
[
[
205630,
205649
]
]
] |
import pytest
import os
import os
from collections import defaultdict
from syrupy.extensions.single_file import SingleFileSnapshotExtension
from tempgen.module import Tempgen
from tempgen.parsers import Parsers
from tempgen.transforms import Transforms
from tempgen.tests.helpers import ext_serializer_map
tests_dir = os.path.dirname(os.path.abspath(__file__))
fixture_dir = os.path.join(tests_dir, 'fixtures')
fixture_name = 'test_template'
generated_name = 'generated'
transforms = Transforms().name_transform_map.keys()
extensions = Parsers().ext_parser_map.keys()
serializers = ext_serializer_map
@pytest.fixture(autouse=True)
def tempgen_instance():
return Tempgen()
@pytest.fixture
def tempgen_instances(request):
return [Tempgen() for _ in range(request.param)]
@pytest.mark.parametrize('extension', extensions)
def test_load_template(extension, tempgen_instance, snapshot):
template = os.path.join(fixture_dir, fixture_name + extension)
tempgen_instance.load_template(template)
assert template in tempgen_instance.get_templates()
assert tempgen_instance.get_fields() == snapshot
@pytest.mark.parametrize('extension', extensions)
def test_save_result(extension, tempgen_instance, snapshot):
template = os.path.join(fixture_dir, fixture_name + extension)
tempgen_instance.load_template(template)
replacements = { key: value['value'] for key, value in tempgen_instance.get_fields().items() }
replacements['doer'] = 'Петров П.П.'
replacements['itn'] = '987654321098'
save_path = os.path.join(fixture_dir, generated_name)
tempgen_instance.save_result(template, save_path, replacements)
assert ext_serializer_map[extension](save_path + extension) == snapshot
os.remove(save_path + extension)
@pytest.mark.parametrize('extension', extensions)
@pytest.mark.parametrize('transform', transforms)
@pytest.mark.parametrize('tempgen_instances', [2], indirect=['tempgen_instances'])
def test_independence(extension, transform, tempgen_instances):
instance_0, instance_1 = tempgen_instances
assert instance_0.parsers != instance_1.parsers
assert instance_0.transforms != instance_1.transforms
instance_0.parsers[extension].parse = lambda *args, **kwargs: ({})
instance_0.transforms[transform] = lambda x: x
assert instance_0.parsers != instance_1.parsers
assert instance_0.transforms != instance_1.transforms
| [
[
[
7,
13
],
[
605,
611
],
[
681,
687
],
[
783,
789
],
[
1118,
1124
],
[
1762,
1768
],
[
1812,
1818
],
[
1862,
1868
]
],
[
[
21,
23
]
],
[
[
31,
33
],
[
319,
321
],
[
335,
337
],
[
376,
378
],
[
910,
912
],
[
1243,
1245
],
[
1537,
1539
],
[
1727,
1729
]
],
[
[
58,
69
]
],
[
[
112,
139
]
],
[
[
167,
174
],
[
669,
676
],
[
740,
747
]
],
[
[
203,
210
],
[
538,
545
]
],
[
[
242,
252
],
[
486,
496
]
],
[
[
287,
305
],
[
584,
602
],
[
1658,
1676
]
],
[
[
307,
316
],
[
389,
398
]
],
[
[
362,
373
],
[
923,
934
],
[
1256,
1267
],
[
1550,
1561
]
],
[
[
412,
424
],
[
936,
948
],
[
1269,
1281
]
],
[
[
443,
457
],
[
1563,
1577
]
],
[
[
473,
483
],
[
1849,
1859
]
],
[
[
525,
535
],
[
820,
830
],
[
1155,
1165
],
[
1799,
1809
]
],
[
[
570,
581
]
],
[
[
638,
654
]
],
[
[
700,
717
]
],
[
[
836,
854
]
],
[
[
1171,
1187
]
],
[
[
1948,
1965
]
]
] |
#!/usr/bin/python3
"""Create class"""
class Square:
"""Square class"""
def __init__(self, size=0, position=(0, 0)):
"""Initialize Square"""
self.__size = size
self.position = position
"""if type(size) is not int:
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")"""
def area(self, area=0):
"""defines area"""
return(self.__size * self.__size)
@property
def size(self):
""" define size"""
return self.__size
@size.setter
def size(self, value):
"""Define area"""
if type(value) is not int:
raise TypeError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
self.__size = value
def my_print(self):
"""print Square"""
if self.__size:
for i in range(self.__position[1]):
print()
for j in range(self.__size):
print('{}{}'.format(' ' * self.position[0], '#' * self.__size))
else:
print()
@property
def position(self):
""" position"""
return self.__position
@position.setter
def position(self, value):
if (not isinstance(value, tuple) or
len(value) != 2 or
not isinstance(value[0], int) or
not isinstance(value[1], int) or
value[0] < 0 or
value[1] < 0):
raise TypeError("position must be a tuple of 2 positive integers")
self.__position = value
| [
[
[
46,
52
]
]
] |
#This is a mixin class for signal analyzers to be used by f2_system class
#Todo: Analyzers should be a independent class
#Last modification by Marko Kosunen, marko.kosunen@aalto.fi, 30.07.2018 18:09
import numpy as np
import scipy.signal as sig
import matplotlib as mpl
mpl.use('Agg') #To enble plotting without X
import matplotlib.pyplot as plt
class analyzers_mixin:
#Define signal analyzer methods
def oscilloscope(self,argdict):
ymax=argdict['ymax']
ymin=argdict['ymin']
timex=argdict['timex']
sigin=argdict['sigin']
tstr=argdict['tstr']
printstr=argdict['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
plt.plot(timex, sigin, linewidth=2)
plt.ylim((ymin, ymax));
plt.xlim((np.amin(timex), np.amax(timex)));
#plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);
#plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);
tstr=argdict['tstr']
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Out', **hfont,fontsize=18);
plt.xlabel('Sample (n)', **hfont,fontsize=18);
h.tick_params(labelsize=14)
plt.grid(True);
printstr=argdict['printstr']
figure.savefig(printstr, format='eps', dpi=300);
plt.close("all")
def constellation(self,argdict):
ymax=argdict['ymax']
ymin=argdict['ymin']
I=argdict['I']
Q=argdict['Q']
tstr=argdict['tstr']
printstr=argdict['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
plt.plot(I, Q, linestyle='None', marker='x')
plt.ylim((ymin, ymax));
plt.ylim((1.1*np.amin(Q), 1.1*np.amax(Q)));
plt.xlim((1.1*np.amin(I), 1.1*np.amax(I)));
#plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);
#plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);
tstr=argdict['tstr']
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Q', **hfont,fontsize=18);
plt.xlabel('I', **hfont,fontsize=18);
h.tick_params(labelsize=14)
plt.grid(True);
printstr=argdict['printstr']
figure.savefig(printstr, format='eps', dpi=300);
plt.close("all")
def spectrum_analyzer(self, **kwargs):
#Example argdict
#argdict={'sig':self.signal_gen._Z.Data[i,:,0],'ymax':3, 'ymin':spectrumfloorideal,'nperseg':1024,
# 'tstr' : "Tx, User:%i" %(i),'printstr':"%s/F2_system_Tx_antennas_Spectrum_Rs_%i_k:%i.eps" %(self.picpath, self.Rs, i)}
ymax=kwargs.get('ymax',3)
ymin=kwargs.get('ymin',-80)
nperseg=kwargs.get('nperseg',1024) #Samples for the Welch periodogram seqment
fs=kwargs.get('Rs',self.Rs)
freqx=np.arange(nperseg)/nperseg*fs/1e6
freqx.shape=(-1,1)
sigin=kwargs['sigin']
sigin.shape=(-1,1)
tstr=kwargs['tstr']
printstr=kwargs['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
fs, spe=sig.welch(sigin,fs=self.Rs,nperseg=nperseg,return_onesided=False,scaling='spectrum',axis=0)
spelog=10*np.log10(np.abs(spe)/np.amax(np.abs(spe)))
plt.plot(freqx,spelog, linewidth=2 )
#plt.setp(markerline,'markerfacecolor', 'b','linewidth',2)
#plt.setp(stemlines, 'linestyle','solid','color','b', 'linewidth', 2)
#plt.ylim((np.amin([self.a,self.b]), np.amax([self.a,self.b])));
plt.ylim((ymin, ymax));
plt.xlim((np.amin(freqx), np.amax(freqx)));
#plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);
#plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Normalized Spectrum', **hfont,fontsize=18);
plt.xlabel('Frequency (MHz)', **hfont,fontsize=18);
h.tick_params(labelsize=14)
#for axis in ['top','bottom','left','right']:
#h.spines[axis].set_linewidth(2)
#lgd=plt.legend(loc='upper right', fontsize=14);
##lgd.set_fontsize(12);
plt.grid(True);
figure.savefig(printstr, format='eps', dpi=300);
plt.close("all")
def logic_analyzer(self,argdict):
ymax=argdict['ymax']
ymin=argdict['ymin']
timex=argdict['timex']
sigin=argdict['sigin']
tstr = argdict['tstr']
printstr=argdict['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
markerline, stemlines, baseline = plt.stem(timex, sigin, '-.')
plt.setp(markerline,'markerfacecolor', 'b','linewidth',2)
plt.setp(stemlines, 'linestyle','solid','color','b', 'linewidth', 2)
plt.ylim((ymin, ymax));
plt.xlim((np.amin(timex), np.amax(timex)));
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Out', **hfont,fontsize=18);
plt.xlabel('Sample (n)', **hfont,fontsize=18);
h.tick_params(labelsize=14)
plt.grid(True);
figure.savefig(printstr, format='eps', dpi=300);
def evm_calculator(self,argdict):
reference=argdict['ref']
received=argdict['signal']
#Shape the vectors: time is row observation is colum
#if received.shape[0]<received.shape[1]:
# received=np.transpose(received)
reference.shape=(-1,1)
received.shape=(-1,1)
#RMS for Scaling
rmsref=np.std(reference)
rmsreceived=np.std(received)
EVM=10*np.log10(np.mean(np.mean(np.abs(received/rmsreceived*rmsref-reference)**2,axis=0)/np.mean(np.abs(reference)**2,axis=0)))
self.print_log(type='I', msg="Estimated EVM is %0.2f dB" %(EVM))
return EVM
def ber_calculator(self,argdict):
reference=argdict['ref']
received=argdict['signal']
#Shape the vectors: time is row observation is colum
#if received.shape[0]<received.shape[1]:
# received=np.transpose(received)
#reference.shape=received.shape
reference.shape=(-1,1)
received.shape=(-1,1)
#Discard samples rounded away in transmission
#if received.shape[1] < reference.shape[1]:
# reference=reference[:,0:received.shape[1]]
errors=np.sum(np.sum(np.abs(received-reference),axis=0))/(received.shape[0]*received.shape[1])
errors=np.sum(np.sum(np.abs(received-reference),axis=0))
bits=(received.shape[0]*received.shape[1])
self.print_log(type='I', msg="Received %i errors in %i bits" %(int(errors), int(bits)))
BER=errors/bits
self.print_log(type='I', msg="Resulting BER is %0.3g" %(BER))
return BER
#From Kosta.
def plot_generic(x, y_list, title_str, legend_list, xlabel_str, ylabel_str, xscale, yscale, plot_style_str='o-', xlim=[], ylim=[]):
if (xscale, yscale) == ('linear', 'linear'):
plot_type_str = 'plot'
elif (xscale, yscale) == ('log', 'linear'):
plot_type_str = 'semilogx'
elif (xscale, yscale) == ('linear', 'log'):
plot_type_str = 'semilogy'
elif (xscale, yscale) == ('log', 'log'):
plot_type_str = 'loglog'
else:
raise Exception('xscale = %s, yscale = %s, both should be linear or log!!' % (xscale, yscale))
fig, ax = plt.subplots() # default is 1,1,1
if (isinstance(x[0], list)) and (len(x) == len(y_list)): # several plots with different x values
for x, y in zip(x, y_list):
exec('ax.' + plot_type_str + '(x, y, plot_style_str, linewidth=linewidth)')
else:
if (isinstance(y_list[0], list)): # several plots with the same x values
for y in y_list:
exec('ax.' + plot_type_str + '(x, y, plot_style_str, linewidth=linewidth)')
else: # single plot only
exec('ax.' + plot_type_str + '(x, y_list, plot_style_str, linewidth=linewidth)')
if xlim != []:
plt.xlim(xlim)
if ylim != []:
plt.ylim(ylim)
ax.set_xlabel(xlabel_str, fontsize=fontsize)
plt.ylabel(ylabel_str, fontsize=fontsize)
if title_str == []:
loc_y = 1.05
else:
plt.title(title_str, fontsize=fontsize)
loc_y = 1
if legend_list != []:
plt.legend(legend_list, loc=(0, loc_y))
plt.grid(True, which='both')
ax.tick_params(axis='both', which='major', labelsize=fontsize)
plt.show()
| [
[
[
206,
217
],
[
896,
898
],
[
912,
914
],
[
1917,
1919
],
[
1933,
1935
],
[
1969,
1971
],
[
1985,
1987
],
[
3007,
3009
],
[
3487,
3489
],
[
3496,
3498
],
[
3508,
3510
],
[
3516,
3518
],
[
3843,
3845
],
[
3859,
3861
],
[
5183,
5185
],
[
5199,
5201
],
[
5850,
5852
],
[
5888,
5890
],
[
5920,
5922
],
[
5929,
5931
],
[
5937,
5939
],
[
5945,
5947
],
[
6002,
6004
],
[
6010,
6012
],
[
6692,
6694
],
[
6699,
6701
],
[
6706,
6708
],
[
6795,
6797
],
[
6802,
6804
],
[
6809,
6811
]
],
[
[
225,
244
],
[
3377,
3380
]
],
[
[
252,
269
],
[
271,
274
]
],
[
[
322,
346
],
[
728,
731
],
[
751,
754
],
[
810,
813
],
[
854,
857
],
[
886,
889
],
[
1115,
1118
],
[
1155,
1158
],
[
1203,
1206
],
[
1294,
1297
],
[
1412,
1415
],
[
1736,
1739
],
[
1759,
1762
],
[
1818,
1821
],
[
1871,
1874
],
[
1903,
1906
],
[
1955,
1958
],
[
2184,
2187
],
[
2224,
2227
],
[
2270,
2273
],
[
2352,
2355
],
[
2470,
2473
],
[
3287,
3290
],
[
3310,
3313
],
[
3538,
3541
],
[
3801,
3804
],
[
3833,
3836
],
[
4033,
4036
],
[
4073,
4076
],
[
4137,
4140
],
[
4417,
4420
],
[
4498,
4501
],
[
4845,
4848
],
[
4868,
4871
],
[
4961,
4964
],
[
4998,
5001
],
[
5064,
5067
],
[
5141,
5144
],
[
5173,
5176
],
[
5225,
5228
],
[
5265,
5268
],
[
5313,
5316
],
[
5404,
5407
],
[
7704,
7707
],
[
8328,
8331
],
[
8370,
8373
],
[
8438,
8441
],
[
8543,
8546
],
[
8635,
8638
],
[
8679,
8682
],
[
8779,
8782
]
],
[
[
354,
369
]
],
[
[
7124,
7136
]
]
] |
"""
McsCMOS
~~~~~~~
Wrapper and Helper to access MCS CMOS Data within H5 Files
:copyright: (c) 2018 by Multi Channel Systems MCS GmbH
:license: see LICENSE for more details
"""
import h5py
import numpy as np
class CMOSData(h5py.File):
"""
Wrapper for a HDF5 File containing CMOS Data
"""
def __init__(self, path):
"""
Creates a CMOSData file and links it to a H5 File
:param path: Path to a H5 File containing CMOS Data
:type path: string
"""
super(CMOSData, self).__init__(path, mode='r')
# -- map raw data --
self.raw_data= self['/Data/Recording_0/FrameStream/Stream_0/FrameDataEntity_0/FrameData']
self.conv_factors= self['/Data/Recording_0/FrameStream/Stream_0/FrameDataEntity_0/ConversionFactors']
# - map proxy data -
self.conv_data = CMOSConvProxy(self)
# -- map meta --
self.meta={}
# - from InfoFrame
info_frame= self['/Data/Recording_0/FrameStream/Stream_0/InfoFrame']
for key in info_frame.dtype.names:
if hasattr(info_frame[key][0], "decode"):
self.meta[key]=info_frame[key][0].decode('utf-8')
else:
self.meta[key]=info_frame[key][0]
if("Tick" in self.meta):
self.meta["FrameRate"] = 10.0**6/self.meta["Tick"]
# - from File
for key, value in self.attrs.items():
if hasattr(value, "decode"):
self.meta[key]= value.decode('utf-8')
else:
self.meta[key]= value
# - from Data Group
for key, value in self['/Data'].attrs.items():
if hasattr(value, "decode"):
self.meta[key]= value.decode('utf-8')
else:
self.meta[key]= value
# -- map events --
if("EventStream" in self["Data/Recording_0/"].keys()):
event_group = self["Data/Recording_0/EventStream/Stream_0/"]
event_info = self["Data/Recording_0/EventStream/Stream_0/InfoEvent"]
self.events={}
self.event_frames={}
for key in event_group.keys():
if "EventEntity" in key:
info = event_info["Label"][event_info["EventID"]==int(key.split("_")[1])][0]
self.events[info] = event_group[key][0, 0]
self.event_frames[info] = event_group[key][0, 0]/self.meta["Tick"]
class CMOSConvProxy:
"""
Private Class, should be embedded within a CMOSData Object.
A proxy that transparently converts raw data to calibrated data.
"""
def __init__(self, parent):
"""
Creates a new CMOSConvProxy
:param parent: Object that can provide raw_data and conv_factors
:type parent: CMOSData
"""
self._parent = parent
self.dtype = np.int32
def __getitem__(self, slices):
"""
Sliced access to converted data
:param slices: Data-slices to retrieve
:returns: converted data
"""
raw_data = self._parent.raw_data.__getitem__(slices)
conv_fasctors = self._parent.conv_factors.__getitem__((slices[0], slices[1]))
return (raw_data*conv_fasctors).astype(self.dtype)
@property
def shape(self):
"""
Shape of the data
"""
return self._parent.raw_data.shape
class CMOSSpikes(h5py.File):
"""
Wrapper for a HDF5 File containing CMOS Spike Data.
Spike Information is accessible through the .spike Member,
Waveform Information (if available) through the .waveforms Member.
"""
def __init__(self, path):
super(CMOSSpikes, self).__init__(path)
# -- Check for right structure --
if("data" in self.keys() and "spikes" in self['data'].keys()):
# -- Map Spike-Data to RecordArray
self.spikes = np.core.records.fromarrays(self['data/spikes'][:,:],
names='time, col, row',
formats = 'int64, int64, int64')
# -- Map Waveforms to Array
if("waveforms" in self['data'].keys()):
self.waveforms = self['data/waveforms'][:,:].transpose()
else:
raise IOError(path+ " has no valid CMOSSpikeFile Structure") | [
[
[
222,
226
],
[
265,
269
],
[
3586,
3590
]
],
[
[
235,
246
],
[
3023,
3025
],
[
4094,
4096
]
],
[
[
256,
264
],
[
566,
574
]
],
[
[
2597,
2610
],
[
915,
928
]
],
[
[
3575,
3585
],
[
3855,
3865
]
]
] |
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='namelengthsrc',
parent_name='bar.hoverlabel',
**kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| [
[
[
7,
35
],
[
67,
80
]
],
[
[
44,
66
],
[
257,
279
]
]
] |
from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
from rest_framework_extensions.key_constructor.constructors import bits
class UserKeyConstructor(DefaultKeyConstructor):
user = bits.UserKeyBit()
| [
[
[
67,
88
],
[
188,
209
]
],
[
[
156,
160
],
[
223,
227
]
],
[
[
169,
187
]
]
] |
import numpy as np
from PIL import Image
import sys
def numpy_to_png(source, dest):
image = Image.fromarray(np.load(source))
image.save(dest,"PNG")
if __name__ == "__main__":
source = sys.argv[1]
dest = source.split('.npy')[0] + '.png'
print(source, " to ", dest)
numpy_to_png(source, dest)
| [
[
[
7,
18
],
[
111,
113
]
],
[
[
35,
40
],
[
95,
100
]
],
[
[
48,
51
],
[
192,
195
]
],
[
[
57,
69
],
[
278,
290
]
],
[
[
183,
189
],
[
213,
219
],
[
254,
260
],
[
291,
297
]
],
[
[
206,
210
],
[
270,
274
],
[
299,
303
]
]
] |
# -*- coding: utf-8 -*-
from codecs import open
import logging
import colander
import deform
from itsdangerous import BadData, SignatureExpired
from h import i18n, models, validators
from h.accounts import util
from h.services.user import UserNotActivated
from h.models.user import (
EMAIL_MAX_LENGTH,
USERNAME_MAX_LENGTH,
USERNAME_MIN_LENGTH,
USERNAME_PATTERN,
)
from h.schemas.base import CSRFSchema, JSONSchema
_ = i18n.TranslationString
log = logging.getLogger(__name__)
PASSWORD_MIN_LENGTH = 2 # FIXME: this is ridiculous
USERNAME_BLACKLIST = None
def get_blacklist():
global USERNAME_BLACKLIST
if USERNAME_BLACKLIST is None:
# Try to load the blacklist file from disk. If, for whatever reason, we
# can't load the file, then don't crash out, just log a warning about
# the problem.
try:
with open('h/accounts/blacklist', encoding='utf-8') as fp:
blacklist = fp.readlines()
except (IOError, ValueError):
log.exception('unable to load blacklist')
blacklist = []
USERNAME_BLACKLIST = set(l.strip().lower() for l in blacklist)
return USERNAME_BLACKLIST
def unique_email(node, value):
'''Colander validator that ensures no user with this email exists.'''
request = node.bindings['request']
user = models.User.get_by_email(request.db, value, request.authority)
if user and user.userid != request.authenticated_userid:
msg = _("Sorry, an account with this email address already exists.")
raise colander.Invalid(node, msg)
def unique_username(node, value):
'''Colander validator that ensures the username does not exist.'''
request = node.bindings['request']
user = models.User.get_by_username(request.db, value, request.authority)
if user:
msg = _("This username is already taken.")
raise colander.Invalid(node, msg)
def email_node(**kwargs):
"""Return a Colander schema node for a new user email."""
return colander.SchemaNode(
colander.String(),
validator=colander.All(
validators.Length(max=EMAIL_MAX_LENGTH),
validators.Email(),
unique_email,
),
widget=deform.widget.TextInputWidget(template='emailinput'),
**kwargs)
def unblacklisted_username(node, value, blacklist=None):
'''Colander validator that ensures the username is not blacklisted.'''
if blacklist is None:
blacklist = get_blacklist()
if value.lower() in blacklist:
# We raise a generic "user with this name already exists" error so as
# not to make explicit the presence of a blacklist.
msg = _("Sorry, an account with this username already exists. "
"Please enter another one.")
raise colander.Invalid(node, msg)
def password_node(**kwargs):
"""Return a Colander schema node for an existing user password."""
kwargs.setdefault('widget', deform.widget.PasswordWidget())
return colander.SchemaNode(
colander.String(),
**kwargs)
def new_password_node(**kwargs):
"""Return a Colander schema node for a new user password."""
kwargs.setdefault('widget', deform.widget.PasswordWidget())
return colander.SchemaNode(
colander.String(),
validator=validators.Length(min=PASSWORD_MIN_LENGTH),
**kwargs)
class LoginSchema(CSRFSchema):
username = colander.SchemaNode(
colander.String(),
title=_('Username / email'),
widget=deform.widget.TextInputWidget(autofocus=True),
)
password = colander.SchemaNode(
colander.String(),
title=_('Password'),
widget=deform.widget.PasswordWidget()
)
def validator(self, node, value):
super(LoginSchema, self).validator(node, value)
request = node.bindings['request']
username = value.get('username')
password = value.get('password')
user_service = request.find_service(name='user')
user_password_service = request.find_service(name='user_password')
try:
user = user_service.fetch_for_login(username_or_email=username)
except UserNotActivated:
err = colander.Invalid(node)
err['username'] = _("Please check your email and open the link "
"to activate your account.")
raise err
if user is None:
err = colander.Invalid(node)
err['username'] = _('User does not exist.')
raise err
if not user_password_service.check_password(user, password):
err = colander.Invalid(node)
err['password'] = _('Wrong password.')
raise err
value['user'] = user
class ForgotPasswordSchema(CSRFSchema):
email = colander.SchemaNode(
colander.String(),
validator=colander.All(validators.Email()),
title=_('Email address'),
widget=deform.widget.TextInputWidget(template='emailinput',
autofocus=True),
)
def validator(self, node, value):
super(ForgotPasswordSchema, self).validator(node, value)
request = node.bindings['request']
email = value.get('email')
user = models.User.get_by_email(request.db, email, request.authority)
if user is None:
err = colander.Invalid(node)
err['email'] = _('Unknown email address.')
raise err
value['user'] = user
class RegisterSchema(CSRFSchema):
username = colander.SchemaNode(
colander.String(),
validator=colander.All(
validators.Length(min=USERNAME_MIN_LENGTH,
max=USERNAME_MAX_LENGTH),
colander.Regex(
USERNAME_PATTERN,
msg=_("Must have only letters, numbers, periods, and "
"underscores.")),
unique_username,
unblacklisted_username,
),
title=_('Username'),
hint=_('Must be between {min} and {max} characters, containing only '
'letters, numbers, periods, and underscores.').format(
min=USERNAME_MIN_LENGTH,
max=USERNAME_MAX_LENGTH
),
widget=deform.widget.TextInputWidget(autofocus=True),
)
email = email_node(title=_('Email address'))
password = new_password_node(title=_('Password'))
class ResetCode(colander.SchemaType):
"""Schema type transforming a reset code to a user and back."""
def serialize(self, node, appstruct):
if appstruct is colander.null:
return colander.null
if not isinstance(appstruct, models.User):
raise colander.Invalid(node, '%r is not a User' % appstruct)
request = node.bindings['request']
serializer = request.registry.password_reset_serializer
return serializer.dumps(appstruct.username)
def deserialize(self, node, cstruct):
if cstruct is colander.null:
return colander.null
request = node.bindings['request']
serializer = request.registry.password_reset_serializer
try:
(username, timestamp) = serializer.loads(cstruct,
max_age=72*3600,
return_timestamp=True)
except SignatureExpired:
raise colander.Invalid(node, _('Reset code has expired. Please reset your password again'))
except BadData:
raise colander.Invalid(node, _('Wrong reset code.'))
user = models.User.get_by_username(request.db, username, request.authority)
if user is None:
raise colander.Invalid(node, _('Your reset code is not valid'))
if user.password_updated is not None and timestamp < user.password_updated:
raise colander.Invalid(node,
_('This reset code has already been used.'))
return user
class ResetPasswordSchema(CSRFSchema):
# N.B. this is the field into which the user puts their reset code, but we
# call it `user` because when validated, it will return a `User` object.
user = colander.SchemaNode(
ResetCode(),
title=_('Reset code'),
hint=_('This will be emailed to you.'),
widget=deform.widget.TextInputWidget(disable_autocomplete=True))
password = new_password_node(
title=_('New password'),
widget=deform.widget.PasswordWidget(disable_autocomplete=True))
class EmailChangeSchema(CSRFSchema):
email = email_node(title=_('Email address'))
# No validators: all validation is done on the email field
password = password_node(title=_('Confirm password'),
hide_until_form_active=True)
def validator(self, node, value):
super(EmailChangeSchema, self).validator(node, value)
exc = colander.Invalid(node)
request = node.bindings['request']
svc = request.find_service(name='user_password')
user = request.user
if not svc.check_password(user, value.get('password')):
exc['password'] = _('Wrong password.')
if exc.children:
raise exc
class PasswordChangeSchema(CSRFSchema):
password = password_node(title=_('Current password'),
inactive_label=_('Password'))
new_password = password_node(title=_('New password'),
hide_until_form_active=True)
# No validators: all validation is done on the new_password field and we
# merely assert that the confirmation field is the same.
new_password_confirm = colander.SchemaNode(
colander.String(),
title=_('Confirm new password'),
widget=deform.widget.PasswordWidget(),
hide_until_form_active=True)
def validator(self, node, value):
super(PasswordChangeSchema, self).validator(node, value)
exc = colander.Invalid(node)
request = node.bindings['request']
svc = request.find_service(name='user_password')
user = request.user
if value.get('new_password') != value.get('new_password_confirm'):
exc['new_password_confirm'] = _('The passwords must match')
if not svc.check_password(user, value.get('password')):
exc['password'] = _('Wrong password.')
if exc.children:
raise exc
def validate_url(node, cstruct):
try:
util.validate_url(cstruct)
except ValueError as exc:
raise colander.Invalid(node, str(exc))
def validate_orcid(node, cstruct):
try:
util.validate_orcid(cstruct)
except ValueError as exc:
raise colander.Invalid(node, str(exc))
class EditProfileSchema(CSRFSchema):
display_name = colander.SchemaNode(
colander.String(),
missing=None,
validator=validators.Length(max=30),
title=_('Display name'))
description = colander.SchemaNode(
colander.String(),
missing=None,
validator=validators.Length(max=250),
widget=deform.widget.TextAreaWidget(
max_length=250,
rows=4,
),
title=_('Description'))
location = colander.SchemaNode(
colander.String(),
missing=None,
validator=validators.Length(max=100),
title=_('Location'))
link = colander.SchemaNode(
colander.String(),
missing=None,
validator=colander.All(
validators.Length(max=250),
validate_url),
title=_('Link'))
orcid = colander.SchemaNode(
colander.String(),
missing=None,
validator=validate_orcid,
title=_('ORCID'),
hint=_('ORCID provides a persistent identifier for researchers (see orcid.org).'))
class NotificationsSchema(CSRFSchema):
types = (('reply', _('Email me when someone replies to one of my annotations.'),),)
notifications = colander.SchemaNode(
colander.Set(),
widget=deform.widget.CheckboxChoiceWidget(
omit_label=True,
values=types),
)
class CreateUserAPISchema(JSONSchema):
"""Validate a user JSON object."""
schema = {
'type': 'object',
'properties': {
'authority': {
'type': 'string',
'format': 'hostname',
},
'username': {
'type': 'string',
'minLength': 3,
'maxLength': 30,
'pattern': '^[A-Za-z0-9._]+$',
},
'email': {
'type': 'string',
'format': 'email',
},
},
'required': [
'authority',
'username',
'email',
],
}
def includeme(config):
pass
| [
[
[
43,
47
],
[
873,
877
]
],
[
[
55,
62
],
[
466,
473
]
],
[
[
71,
79
],
[
3431,
3439
],
[
3460,
3468
],
[
3599,
3607
],
[
3628,
3636
],
[
4819,
4827
],
[
4848,
4856
],
[
4885,
4893
],
[
5575,
5583
],
[
5604,
5612
],
[
5641,
5649
],
[
5778,
5786
],
[
6465,
6473
],
[
8242,
8250
],
[
9716,
9724
],
[
9745,
9753
],
[
10844,
10852
],
[
10873,
10881
],
[
11011,
11019
],
[
11040,
11048
],
[
11279,
11287
],
[
11308,
11316
],
[
11436,
11444
],
[
11465,
11473
],
[
11524,
11532
],
[
11643,
11651
],
[
11672,
11680
],
[
12014,
12022
],
[
12043,
12051
],
[
1562,
1570
],
[
1891,
1899
],
[
2020,
2028
],
[
2049,
2057
],
[
2086,
2094
],
[
2809,
2817
],
[
3014,
3022
],
[
3043,
3051
],
[
3255,
3263
],
[
3284,
3292
],
[
4223,
4231
],
[
4450,
4458
],
[
4639,
4647
],
[
5394,
5402
],
[
6623,
6631
],
[
6657,
6665
],
[
6740,
6748
],
[
7019,
7027
],
[
7053,
7061
],
[
7448,
7456
],
[
7576,
7584
],
[
7751,
7759
],
[
7911,
7919
],
[
8957,
8965
],
[
10007,
10015
],
[
10593,
10601
],
[
10753,
10761
]
],
[
[
87,
93
],
[
3531,
3537
],
[
3691,
3697
],
[
4968,
4974
],
[
6291,
6297
],
[
8378,
8384
],
[
8518,
8524
],
[
9820,
9826
],
[
11142,
11148
],
[
12074,
12080
],
[
2237,
2243
],
[
2971,
2977
],
[
3212,
3218
]
],
[
[
119,
126
],
[
7549,
7556
]
],
[
[
128,
144
],
[
7412,
7428
]
],
[
[
160,
164
],
[
437,
441
]
],
[
[
166,
172
],
[
1347,
1353
],
[
1747,
1753
],
[
5287,
5293
],
[
6708,
6714
],
[
7639,
7645
]
],
[
[
174,
184
],
[
4898,
4908
],
[
5667,
5677
],
[
10932,
10942
],
[
11099,
11109
],
[
11367,
11377
],
[
11550,
11560
],
[
2112,
2122
],
[
2165,
2175
],
[
3321,
3331
]
],
[
[
208,
212
],
[
10522,
10526
],
[
10680,
10684
]
],
[
[
241,
257
],
[
4187,
4203
]
],
[
[
290,
306
],
[
2134,
2150
]
],
[
[
312,
331
],
[
5744,
5763
],
[
6245,
6264
]
],
[
[
337,
356
],
[
5689,
5708
],
[
6208,
6227
]
],
[
[
362,
378
],
[
5810,
5826
]
],
[
[
409,
419
],
[
3403,
3413
],
[
4794,
4804
],
[
5547,
5557
],
[
8062,
8072
],
[
8601,
8611
],
[
9301,
9311
],
[
10812,
10822
],
[
11892,
11902
]
],
[
[
421,
431
],
[
12200,
12210
]
],
[
[
433,
434
],
[
3493,
3494
],
[
3661,
3662
],
[
4933,
4934
],
[
5848,
5849
],
[
6029,
6030
],
[
6057,
6058
],
[
6373,
6374
],
[
6432,
6433
],
[
8298,
8299
],
[
8328,
8329
],
[
8484,
8485
],
[
8643,
8644
],
[
8761,
8762
],
[
9349,
9350
],
[
9416,
9417
],
[
9470,
9471
],
[
9778,
9779
],
[
10973,
10974
],
[
11245,
11246
],
[
11409,
11410
],
[
11619,
11620
],
[
11761,
11762
],
[
11786,
11787
],
[
11928,
11929
],
[
1485,
1486
],
[
1840,
1841
],
[
2692,
2693
],
[
4276,
4277
],
[
4503,
4504
],
[
4692,
4693
],
[
5444,
5445
],
[
7471,
7472
],
[
7599,
7600
],
[
7774,
7775
],
[
7969,
7970
],
[
9203,
9204
],
[
10276,
10277
],
[
10401,
10402
]
],
[
[
460,
463
],
[
1020,
1023
]
],
[
[
495,
514
],
[
3343,
3362
]
],
[
[
548,
566
],
[
634,
652
]
],
[
[
580,
593
],
[
2489,
2502
]
],
[
[
1196,
1208
],
[
2197,
2209
]
],
[
[
1596,
1611
],
[
5951,
5966
]
],
[
[
1925,
1935
],
[
6356,
6366
],
[
8626,
8636
]
],
[
[
2315,
2337
],
[
5980,
6002
]
],
[
[
2843,
2856
],
[
8741,
8754
],
[
9329,
9342
],
[
9450,
9463
]
],
[
[
3086,
3103
],
[
6408,
6425
],
[
8451,
8468
]
],
[
[
3391,
3402
],
[
3781,
3792
]
],
[
[
4773,
4793
],
[
5142,
5162
]
],
[
[
5532,
5546
]
],
[
[
6455,
6464
],
[
8271,
8280
]
],
[
[
8042,
8061
]
],
[
[
8583,
8600
],
[
8895,
8912
]
],
[
[
9280,
9300
],
[
9942,
9962
]
],
[
[
10476,
10488
],
[
11590,
11602
]
],
[
[
10632,
10646
],
[
11731,
11745
]
],
[
[
10794,
10811
]
],
[
[
11872,
11891
]
],
[
[
12180,
12199
]
],
[
[
12852,
12861
]
],
[
[
1097,
1115
],
[
1171,
1189
]
]
] |
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import re
import unittest
from webkitpy.common.host import Host
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
from webkitpy.w3c.test_converter import _W3CTestConverter
DUMMY_FILENAME = 'dummy.html'
DUMMY_PATH = 'dummy/testharness/path'
class W3CTestConverterTest(unittest.TestCase):
# FIXME: When we move to using a MockHost, this method should be removed, since
# then we can just pass in a dummy dir path
def fake_dir_path(self, dirname):
filesystem = Host().filesystem
webkit_root = WebKitFinder(filesystem).webkit_base()
return filesystem.abspath(filesystem.join(webkit_root, "LayoutTests", "css", dirname))
def test_read_prefixed_property_list(self):
""" Tests that the current list of properties requiring the -webkit- prefix load correctly """
# FIXME: We should be passing in a MockHost here ...
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
prop_list = converter.prefixed_properties
self.assertTrue(prop_list, 'No prefixed properties found')
def test_convert_for_webkit_nothing_to_convert(self):
""" Tests convert_for_webkit() using a basic test that has nothing to convert """
test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_no_conversion_happened(converted, test_html)
def test_convert_for_webkit_harness_only(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path("harnessonly")
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
converter.feed(test_html)
converter.close()
converted = converter.output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, [])
def test_convert_for_webkit_properties_only(self):
""" Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test1@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_for_webkit_harness_and_properties(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_test_harness_paths(self):
""" Tests convert_testharness_paths() with a test that uses all three testharness files """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path('testharnesspaths')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 2, 1)
def test_convert_vendor_prefix_js_paths(self):
test_html = """<head>
<script src="/common/vendor-prefix.js">
</head>
"""
fake_dir_path = self.fake_dir_path('adapterjspaths')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
new_html = BeautifulSoup(converted[1])
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/common/vendor-prefix.js')
self.assertEquals(len(new_html.findAll(src=orig_path_pattern)), 0, 'vendor-prefix.js path was not converted')
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
new_relpath = os.path.relpath(resources_dir, fake_dir_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(new_html.findAll(src=relpath_pattern)), 1, 'vendor-prefix.js relative path not correct')
def test_convert_prefixed_properties(self):
""" Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
10 in one style block + 5 in another style
block + 5 inline styles, including one with multiple prefixed properties.
The properties in the test content are in all sorts of wack formatting.
"""
test_html = """<html>
<style type="text/css"><![CDATA[
.block1 {
width: 300px;
height: 300px
}
.block2 {
@test0@: propvalue;
}
.block3{@test1@: propvalue;}
.block4 { @test2@:propvalue; }
.block5{ @test3@ :propvalue; }
#block6 { @test4@ : propvalue; }
#block7
{
@test5@: propvalue;
}
#block8 { @test6@: propvalue; }
#block9:pseudo
{
@test7@: propvalue;
@test8@: propvalue propvalue propvalue;
}
]]></style>
</head>
<body>
<div id="elem1" style="@test9@: propvalue;"></div>
<div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
<div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
<div id="elem3" style="@test14@:propvalue"></div>
</body>
<style type="text/css"><![CDATA[
.block10{ @test15@: propvalue; }
.block11{ @test16@: propvalue; }
.block12{ @test17@: propvalue; }
#block13:pseudo
{
@test18@: propvalue;
@test19@: propvalue;
}
]]></style>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_prefixed_properties(converted, test_content[0])
def test_hides_all_instructions_for_manual_testers(self):
test_html = """<body>
<h1 class="instructions">Hello manual tester!</h1>
<p class="instructions some_other_class">This is how you run this test.</p>
<p style="willbeoverwritten" class="instructions">...</p>
<doesntmatterwhichtagitis class="some_other_class instructions">...</p>
<p>Legit content may contain the instructions string</p>
</body>
"""
expected_test_html = """<body>
<h1 class="instructions" style="display:none">Hello manual tester!</h1>
<p class="instructions some_other_class" style="display:none">This is how you run this test.</p>
<p class="instructions" style="display:none">...</p>
<doesntmatterwhichtagitis class="some_other_class instructions" style="display:none">...</p>
<p>Legit content may contain the instructions string</p>
</body>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.assertEqual(converted[1], expected_test_html)
def test_convert_attributes_if_needed(self):
""" Tests convert_attributes_if_needed() using a reference file that has some relative src paths """
test_html = """<html>
<head>
<script src="../../some-script.js"></script>
<style src="../../../some-style.css"></style>
</head>
<body>
<img src="../../../../some-image.jpg">
</body>
</html>
"""
test_reference_support_info = {'reference_relpath': '../', 'files': ['../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg'], 'elements': ['script', 'style', 'img']}
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_reference_relative_paths(converted, test_reference_support_info)
def verify_conversion_happened(self, converted):
self.assertTrue(converted, "conversion didn't happen")
def verify_no_conversion_happened(self, converted, original):
self.assertEqual(converted[1], original, 'test should not have been converted')
def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
if isinstance(converted, basestring):
converted = BeautifulSoup(converted)
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/resources/testharness')
self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
new_relpath = os.path.relpath(resources_dir, test_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
def verify_prefixed_properties(self, converted, test_properties):
self.assertEqual(len(set(converted[0])), len(set(test_properties)), 'Incorrect number of properties converted')
for test_prop in test_properties:
self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
def verify_reference_relative_paths(self, converted, reference_support_info):
idx = 0
for path in reference_support_info['files']:
expected_path = re.sub(reference_support_info['reference_relpath'], '', path, 1)
element = reference_support_info['elements'][idx]
expected_tag = '<' + element + ' src=\"' + expected_path + '\">'
self.assertTrue(expected_tag in converted[1], 'relative path ' + path + ' was not converted correcty')
idx += 1
def generate_test_content(self, full_property_list, num_test_properties, html):
"""Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
test_properties = []
count = 0
while count < num_test_properties:
test_properties.append(full_property_list[count])
count += 1
# Replace the tokens in the testhtml with the test properties. Walk backward
# through the list to replace the double-digit tokens first
index = len(test_properties) - 1
while index >= 0:
# Use the unprefixed version
test_prop = test_properties[index].replace('-webkit-', '')
# Replace the token
html = html.replace('@test' + str(index) + '@', test_prop)
index -= 1
return (test_properties, html)
| [
[
[
1342,
1344
],
[
8612,
8614
],
[
13954,
13956
]
],
[
[
1352,
1354
],
[
8346,
8348
],
[
8684,
8686
],
[
13654,
13656
],
[
14022,
14024
],
[
14838,
14840
]
],
[
[
1362,
1370
],
[
1743,
1751
]
],
[
[
1405,
1409
],
[
1962,
1966
]
],
[
[
1459,
1472
],
[
3295,
3308
],
[
5089,
5102
],
[
6287,
6300
],
[
7337,
7350
],
[
7975,
7988
],
[
10408,
10421
],
[
11679,
11692
],
[
12635,
12648
]
],
[
[
1515,
1527
],
[
2002,
2014
]
],
[
[
1574,
1587
],
[
8212,
8225
],
[
13438,
13451
]
],
[
[
1628,
1645
],
[
2370,
2387
],
[
3229,
3246
],
[
4002,
4019
],
[
4925,
4942
],
[
6218,
6235
],
[
7268,
7285
],
[
7906,
7923
],
[
10246,
10263
],
[
11613,
11630
],
[
12546,
12563
]
],
[
[
1647,
1661
],
[
2400,
2414
],
[
3259,
3273
],
[
4035,
4049
],
[
4958,
4972
],
[
6251,
6265
],
[
7301,
7315
],
[
7939,
7953
],
[
10276,
10290
],
[
11643,
11657
],
[
12576,
12590
]
],
[
[
1677,
1687
],
[
2388,
2398
],
[
3247,
3257
],
[
10264,
10274
],
[
11631,
11641
],
[
12564,
12574
]
],
[
[
1722,
1742
]
]
] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2020_03_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
| [
[
[
486,
499
],
[
1015,
1028
]
],
[
[
507,
515
]
],
[
[
551,
576
],
[
2454,
2479
],
[
8643,
8668
],
[
11159,
11184
],
[
18650,
18675
]
],
[
[
578,
595
],
[
4074,
4091
],
[
10238,
10255
],
[
13175,
13192
],
[
20980,
20997
]
],
[
[
597,
616
],
[
2514,
2533
],
[
8703,
8722
],
[
11219,
11238
],
[
18710,
18729
]
],
[
[
618,
639
],
[
2486,
2507
],
[
8675,
8696
],
[
11191,
11212
],
[
18682,
18703
]
],
[
[
641,
650
],
[
3972,
3981
],
[
10136,
10145
],
[
13073,
13082
],
[
20874,
20883
]
],
[
[
681,
690
],
[
21100,
21109
]
],
[
[
723,
739
],
[
1227,
1243
]
],
[
[
782,
793
],
[
1244,
1255
]
],
[
[
795,
807
],
[
1257,
1269
]
],
[
[
839,
848
],
[
7002,
7011
],
[
7280,
7289
],
[
17016,
17025
],
[
17294,
17303
]
],
[
[
850,
859
],
[
6909,
6918
],
[
16923,
16932
]
],
[
[
861,
874
]
],
[
[
914,
928
],
[
4124,
4138
],
[
10288,
10302
],
[
13225,
13239
],
[
21030,
21044
]
],
[
[
977,
987
],
[
6783,
6793
],
[
16784,
16794
]
],
[
[
1004,
1010
],
[
1902,
1908
]
],
[
[
1107,
1110
],
[
1285,
1288
],
[
1292,
1295
]
],
[
[
1112,
1120
],
[
1217,
1225
]
],
[
[
1122,
1126
],
[
1275,
1279
]
],
[
[
1128,
1135
]
],
[
[
1137,
1145
]
],
[
[
1147,
1155
],
[
1208,
1216
]
],
[
[
1157,
1164
],
[
1181,
1188
]
],
[
[
1166,
1171
]
],
[
[
1177,
1178
],
[
1272,
1273
]
],
[
[
1198,
1205
]
],
[
[
1305,
1328
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.