hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f712c86400d482f402d2dea0381b0ef298481b24
| 80
|
py
|
Python
|
tadpole/template/app/lib/__init__.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | 2
|
2017-12-02T07:02:31.000Z
|
2020-10-13T02:20:18.000Z
|
tadpole/template/app/lib/__init__.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | null | null | null |
tadpole/template/app/lib/__init__.py
|
echoyuanliang/pine
|
22175e6aea0ca9b02d6542677b27a690c1501c9c
|
[
"MIT"
] | 1
|
2018-04-23T04:59:38.000Z
|
2018-04-23T04:59:38.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
create at 2017/11/5 by allen
"""
| 11.428571
| 32
| 0.5875
| true
| true
|
|
f712c86751b2c032848e2d896cff8be52f3866a2
| 1,979
|
py
|
Python
|
computorv2/types/function.py
|
ayoubyt/ComputorV2
|
d3aef2802b795327b8b3b190e0744d39508638b3
|
[
"MIT"
] | null | null | null |
computorv2/types/function.py
|
ayoubyt/ComputorV2
|
d3aef2802b795327b8b3b190e0744d39508638b3
|
[
"MIT"
] | null | null | null |
computorv2/types/function.py
|
ayoubyt/ComputorV2
|
d3aef2802b795327b8b3b190e0744d39508638b3
|
[
"MIT"
] | null | null | null |
from inspect import signature
from typing import Callable, Any, List
import re
import copy
from .type import Type
class Function(Type):
def __init__(self, fn: Callable[..., Any], name: str = "anonymouse") -> None:
self.name = name
self.vars = list(signature(fn).parameters)
self.expr = "[built-in]"
self.fn = fn
self.varnum = len(signature(fn).parameters)
def __call__(self, *args, **kwds):
return self.fn(*args, **kwds)
def __str__(self) -> str:
return f"{self.name}({','.join(self.vars)})={self.expr}"
class ListFunction(Function):
pattern = r"[a-zA-Z]+\(.+\)"
def __init__(self, expr: str, vars: List[str], name: str = "anonymouse") -> None:
self.name = name
self.expr = expr
self.vars = vars
self.varnum = len(vars)
from ..expression import infix_to_rpnlist
rpn_list = infix_to_rpnlist(expr)
for i in range(len(rpn_list)):
if (rpn_list[i] in vars):
rpn_list[i] = str(vars.index(rpn_list[i]))
self.rpn_list = rpn_list
def __call__(self, *args, **kwds):
res = copy.deepcopy(self.rpn_list)
for i in range(len(self.rpn_list)):
if isinstance(res[i], str) and res[i].isdigit():
res[i] = args[int(res[i])]
from ..expression import eval_rpn
return eval_rpn(res)
def subvars(self):
# a function to replace variables with there values
def f(m: re.Match):
from ..ft_global import user_vars
word = m.group().lower()
if word in user_vars and not isinstance(user_vars[word], Function):
return(str(user_vars[word]))
else:
return(m.group())
result = re.sub(r"[a-zA-Z]+", f, self.expr)
return result.strip()
def __str__(self) -> str:
result = self.subvars()
return f"{self.name}({','.join(self.vars)}) = {result}"
| 32.983333
| 86
| 0.573017
|
from inspect import signature
from typing import Callable, Any, List
import re
import copy
from .type import Type
class Function(Type):
def __init__(self, fn: Callable[..., Any], name: str = "anonymouse") -> None:
self.name = name
self.vars = list(signature(fn).parameters)
self.expr = "[built-in]"
self.fn = fn
self.varnum = len(signature(fn).parameters)
def __call__(self, *args, **kwds):
return self.fn(*args, **kwds)
def __str__(self) -> str:
return f"{self.name}({','.join(self.vars)})={self.expr}"
class ListFunction(Function):
pattern = r"[a-zA-Z]+\(.+\)"
def __init__(self, expr: str, vars: List[str], name: str = "anonymouse") -> None:
self.name = name
self.expr = expr
self.vars = vars
self.varnum = len(vars)
from ..expression import infix_to_rpnlist
rpn_list = infix_to_rpnlist(expr)
for i in range(len(rpn_list)):
if (rpn_list[i] in vars):
rpn_list[i] = str(vars.index(rpn_list[i]))
self.rpn_list = rpn_list
def __call__(self, *args, **kwds):
res = copy.deepcopy(self.rpn_list)
for i in range(len(self.rpn_list)):
if isinstance(res[i], str) and res[i].isdigit():
res[i] = args[int(res[i])]
from ..expression import eval_rpn
return eval_rpn(res)
def subvars(self):
def f(m: re.Match):
from ..ft_global import user_vars
word = m.group().lower()
if word in user_vars and not isinstance(user_vars[word], Function):
return(str(user_vars[word]))
else:
return(m.group())
result = re.sub(r"[a-zA-Z]+", f, self.expr)
return result.strip()
def __str__(self) -> str:
result = self.subvars()
return f"{self.name}({','.join(self.vars)}) = {result}"
| true
| true
|
f712c8a3fe4a7f1fe2a76a5437c55a3f6b98cd80
| 12,509
|
py
|
Python
|
gracebot/gracebot.py
|
Roald87/GraceDB
|
5c7e6cc93a33b00c1c30ce040ef26326c003630d
|
[
"Apache-2.0"
] | 7
|
2019-05-16T20:08:11.000Z
|
2021-10-07T03:15:00.000Z
|
gracebot/gracebot.py
|
Roald87/GraceDB
|
5c7e6cc93a33b00c1c30ce040ef26326c003630d
|
[
"Apache-2.0"
] | 24
|
2019-07-07T06:14:12.000Z
|
2021-09-21T18:50:50.000Z
|
gracebot/gracebot.py
|
Roald87/GraceDB
|
5c7e6cc93a33b00c1c30ce040ef26326c003630d
|
[
"Apache-2.0"
] | null | null | null |
import logging
from collections import Counter, defaultdict
import aiogram
from aiogram import Bot, types
from aiogram.utils.emoji import emojize
from detector import Detector
from gwevents import Events, time_ago
from keyboard import InlineKeyboard
from permanentset import PermanentSet
class GraceBot(Bot):
def __init__(self, token: str):
super().__init__(token=token)
self.events: Events = Events()
self.events.update_all()
self.event_keyboards: dict = defaultdict(InlineKeyboard)
self.new_event_messages_send: PermanentSet = PermanentSet(
"new_event_messages_send.txt", str
)
self.subscribers: PermanentSet = PermanentSet("subscribers.txt", int)
self.event_types: dict = {
# Probability that the source is a binary black hole merger (both
# objects heavier than 5 solar masses)
"BBH": "binary black hole merger",
# Probability that the source is a binary neutron star merger
# (both objects lighter than 3 solar masses)
"BNS": "binary neutron star merger",
# Probability that the source is a neutron star-black hole merger
# (primary heavier than 5 solar masses, secondary lighter than 3
# solar masses)
"NSBH": "neutron star black hole merger",
# Probability that the source is terrestrial(i.e., a background
# noise fluctuation or a glitch)
"Terrestrial": "terrestrial",
# Probability that the source has at least one object between 3 and
# 5 solar masses
"MassGap": "mass gap",
}
async def send_preliminary(self, message):
event_id = event_id_from_message(message)
logging.info(f"Event to update from preliminary message: {event_id}")
if event_id in self.new_event_messages_send.data:
return
else:
self.events.update_events_last_week()
self.new_event_messages_send.add(event_id)
text = f"A new event has been measured!\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_update(self, message):
event_id = event_id_from_message(message)
self.events.update_single(event_id)
text = f"Event {event_id} has been updated.\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_retraction(self, message):
event_id = event_id_from_message(message)
text = f"Event {event_id} has been retracted. The event details were:\n\n"
await self._send_event_info_to_all_users(event_id, text)
self.events.update_all()
async def _send_event_info_to_all_users(self, event_id: str, pre_text: str) -> None:
for user_id in self.subscribers.data:
try:
await self.send_event_info(user_id, event_id, pre_text)
except aiogram.utils.exceptions.BotBlocked:
logging.info(f"User {user_id} has blocked the bot.")
continue
async def send_event_info(
self, chat_id: str, event_id: str, pre_text: str = ""
) -> None:
"""
Send information of a specific event to the user.
Parameters
----------
chat_id : str
Where to send the message to.
event_id : str
The event to send the information about.
pre_text : str
Will be added to the beginning of the message.
Returns
-------
None
"""
try:
event = self.events.data[event_id]
except KeyError:
logging.error(f"Warning couldn't find event with id {event_id}")
return
link = f"https://gracedb.ligo.org/superevents/{event_id}/view/"
text = (
pre_text + f"*{event_id.upper()}*\n" + f"{time_ago(event['created'])}\n\n"
)
try:
event_type = self.events.get_likely_event_type(event_id)
confidence = self.events.data[event_id]["event_types"][event_type]
text += (
f"Unconfirmed {self.event_types[event_type]} ({confidence:.2%}) event."
)
distance_mean = round(event["distance_mean_Mly"] / 1000, 2)
distance_std = round(event["distance_std_Mly"] / 1000, 2)
text = (
text[:-1] + f" at {distance_mean} ± {distance_std} billion light years."
)
instruments = self.events.data[event_id]["instruments_long"]
text += f" The event was measured by {inline_list(instruments)}."
except KeyError:
pass
text += f"\n\n[Event page]({link})"
await self.send_message(chat_id, text, parse_mode="markdown")
try:
with open(self.events.picture(event_id), "rb") as picture:
await self.send_photo(chat_id, picture)
except FileNotFoundError:
logging.error("Couldn't find the event image")
return None
async def send_welcome_message(self, message: types.Message) -> None:
"""
Send a welcome message to the user.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
text = (
"Stay up-to-date on LIGO/Virgo gravitational wave events!\n"
"\n"
"You can /subscribe to automatically receive a message whenever a new event is "
"measured, or an existing event is updated. Use /unsubscribe to stop receiving "
"messages.\n"
"\n"
"Furthermore you can check out the /latest event, or select a past /event. "
"Use /stats to see and overview of all O3 events or view the live detector /status."
)
await self.send_message(message.chat.id, text)
async def send_latest(self, message: types.Message) -> None:
"""
Send some details of the most recent gravitational wave event.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
event_id = list(self.events.latest)[0]
await self.send_event_info(message.chat.id, event_id)
@property
def event_keys(self) -> list:
return [f"{id}_{info['most_likely']}" for id, info in self.events.data.items()]
async def send_event_selector(self, message: types.Message) -> None:
"""
User can select any event from the O3 run and get a message with the details.
Parameters
----------
message : types.Message
Returns
-------
None
"""
self.event_keyboards[message.chat.id] = InlineKeyboard(
self.event_keys, rows=4, columns=2
)
await self.send_message(
chat_id=message.chat.id,
text="Select the event you want to see the details of.",
reply_markup=self.event_keyboards[message.chat.id],
)
async def event_selector_callback_handler(self, query: types.CallbackQuery) -> None:
"""
This is called when the user presses a button to select an event.
Parameters
----------
query : types.CallbackQuery
Callback query which contains info on which message the InlineKeyboard is
attached to.
Returns
-------
None
"""
await query.answer() # send answer to close the rounding circle
answer_data = query.data
logging.debug(f"answer_data={answer_data}")
user_id = query.from_user.id
valid_event_ids = self.event_keyboards[user_id].visible_keys
if answer_data in valid_event_ids:
event_id, _ = answer_data.split("_")
await self.send_event_info(user_id, event_id)
else:
await self.event_keyboards[user_id].update(query)
async def send_o3_stats(self, message: types.Message) -> None:
"""
Send some statistics of observational run 3 (O3).
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
# TODO take confirmed from other source since it will not be updated
# in graceDB if they are confirmed. For that use:
# https://www.gw-openscience.org/catalog/GWTC-1-confident/html/
event_counter = Counter(
[info["most_likely"] for info in self.events.data.values()]
)
unconfirmed_bbh = event_counter["BBH"]
unconfirmed_bns = event_counter["BNS"]
unconfirmed_nsbh = event_counter["NSBH"]
unconfirmed_mg = event_counter["MassGap"]
terrestrial = event_counter["Terrestrial"]
text = (
f"Observational run 3 has detected *{len(self.events.data)}* "
"events since April 1st 2019.\n\n"
""
"*Event types*\n"
f"Binary black hole mergers: *{unconfirmed_bbh}*.\n"
f"Binary neutron star mergers: *{unconfirmed_bns}*.\n"
f"Neutron star black hole mergers: *{unconfirmed_nsbh}*\n"
f"At least one object between 3 and 5 solar masses: *{unconfirmed_mg}*.\n"
f"Likely terrestrial (false alarm): *{terrestrial}*.\n"
)
await self.send_message(message.chat.id, text, parse_mode="markdown")
async def send_detector_status(self, message: types.Message) -> None:
"""
Send status of all three detectors to the user.
Parameters
----------
message : types.Message
The message send by the user.
Returns
-------
None
"""
detectors = [Detector("Hanford"), Detector("Livingston"), Detector("Virgo")]
detector_status = []
for detector in detectors:
hours = detector.status_duration.days * 24 + (
detector.status_duration.seconds // 3600
)
minutes = (detector.status_duration.seconds % 3600) // 60
detector_status.append(
f"{emojize(detector.status_icon)} {detector.name}: "
f"{detector.status} {hours}h {minutes}m"
)
text = "\n".join(detector_status)
await self.send_message(message.chat.id, text)
async def add_subscriber(self, message: types.Message) -> None:
"""
Add the user from the message to the subscriber list.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
user_id = message.chat.id
if self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are already subscribed.")
else:
self.subscribers.add(message.chat.id)
await self.send_message(
user_id, "You will now receive the latest event updates."
)
async def remove_subscriber(self, message: types.Message) -> None:
"""
Remove the user from the message from the subscriber list.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
user_id = message.chat.id
if not self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are not subscribed.")
else:
self.subscribers.remove(message.chat.id)
await self.send_message(
user_id, "You will no longer receive the latest event updates."
)
def event_id_from_message(message: types.Message) -> str:
"""
Return the event id which is assumed to come right after the command.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
The event id.
"""
try:
event_id = message.text.split(" ")[-1]
except KeyError:
event_id = None
return event_id
def inline_list(items):
if len(items) == 0:
return ""
elif len(items) == 1:
return items[0]
else:
return ", ".join(items[:-1]) + f" and {items[-1]}"
| 33.268617
| 96
| 0.587257
|
import logging
from collections import Counter, defaultdict
import aiogram
from aiogram import Bot, types
from aiogram.utils.emoji import emojize
from detector import Detector
from gwevents import Events, time_ago
from keyboard import InlineKeyboard
from permanentset import PermanentSet
class GraceBot(Bot):
def __init__(self, token: str):
super().__init__(token=token)
self.events: Events = Events()
self.events.update_all()
self.event_keyboards: dict = defaultdict(InlineKeyboard)
self.new_event_messages_send: PermanentSet = PermanentSet(
"new_event_messages_send.txt", str
)
self.subscribers: PermanentSet = PermanentSet("subscribers.txt", int)
self.event_types: dict = {
"BBH": "binary black hole merger",
"BNS": "binary neutron star merger",
"NSBH": "neutron star black hole merger",
"Terrestrial": "terrestrial",
"MassGap": "mass gap",
}
async def send_preliminary(self, message):
event_id = event_id_from_message(message)
logging.info(f"Event to update from preliminary message: {event_id}")
if event_id in self.new_event_messages_send.data:
return
else:
self.events.update_events_last_week()
self.new_event_messages_send.add(event_id)
text = f"A new event has been measured!\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_update(self, message):
event_id = event_id_from_message(message)
self.events.update_single(event_id)
text = f"Event {event_id} has been updated.\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_retraction(self, message):
event_id = event_id_from_message(message)
text = f"Event {event_id} has been retracted. The event details were:\n\n"
await self._send_event_info_to_all_users(event_id, text)
self.events.update_all()
async def _send_event_info_to_all_users(self, event_id: str, pre_text: str) -> None:
for user_id in self.subscribers.data:
try:
await self.send_event_info(user_id, event_id, pre_text)
except aiogram.utils.exceptions.BotBlocked:
logging.info(f"User {user_id} has blocked the bot.")
continue
async def send_event_info(
self, chat_id: str, event_id: str, pre_text: str = ""
) -> None:
try:
event = self.events.data[event_id]
except KeyError:
logging.error(f"Warning couldn't find event with id {event_id}")
return
link = f"https://gracedb.ligo.org/superevents/{event_id}/view/"
text = (
pre_text + f"*{event_id.upper()}*\n" + f"{time_ago(event['created'])}\n\n"
)
try:
event_type = self.events.get_likely_event_type(event_id)
confidence = self.events.data[event_id]["event_types"][event_type]
text += (
f"Unconfirmed {self.event_types[event_type]} ({confidence:.2%}) event."
)
distance_mean = round(event["distance_mean_Mly"] / 1000, 2)
distance_std = round(event["distance_std_Mly"] / 1000, 2)
text = (
text[:-1] + f" at {distance_mean} ± {distance_std} billion light years."
)
instruments = self.events.data[event_id]["instruments_long"]
text += f" The event was measured by {inline_list(instruments)}."
except KeyError:
pass
text += f"\n\n[Event page]({link})"
await self.send_message(chat_id, text, parse_mode="markdown")
try:
with open(self.events.picture(event_id), "rb") as picture:
await self.send_photo(chat_id, picture)
except FileNotFoundError:
logging.error("Couldn't find the event image")
return None
async def send_welcome_message(self, message: types.Message) -> None:
text = (
"Stay up-to-date on LIGO/Virgo gravitational wave events!\n"
"\n"
"You can /subscribe to automatically receive a message whenever a new event is "
"measured, or an existing event is updated. Use /unsubscribe to stop receiving "
"messages.\n"
"\n"
"Furthermore you can check out the /latest event, or select a past /event. "
"Use /stats to see and overview of all O3 events or view the live detector /status."
)
await self.send_message(message.chat.id, text)
async def send_latest(self, message: types.Message) -> None:
event_id = list(self.events.latest)[0]
await self.send_event_info(message.chat.id, event_id)
@property
def event_keys(self) -> list:
return [f"{id}_{info['most_likely']}" for id, info in self.events.data.items()]
async def send_event_selector(self, message: types.Message) -> None:
self.event_keyboards[message.chat.id] = InlineKeyboard(
self.event_keys, rows=4, columns=2
)
await self.send_message(
chat_id=message.chat.id,
text="Select the event you want to see the details of.",
reply_markup=self.event_keyboards[message.chat.id],
)
async def event_selector_callback_handler(self, query: types.CallbackQuery) -> None:
await query.answer()
answer_data = query.data
logging.debug(f"answer_data={answer_data}")
user_id = query.from_user.id
valid_event_ids = self.event_keyboards[user_id].visible_keys
if answer_data in valid_event_ids:
event_id, _ = answer_data.split("_")
await self.send_event_info(user_id, event_id)
else:
await self.event_keyboards[user_id].update(query)
async def send_o3_stats(self, message: types.Message) -> None:
event_counter = Counter(
[info["most_likely"] for info in self.events.data.values()]
)
unconfirmed_bbh = event_counter["BBH"]
unconfirmed_bns = event_counter["BNS"]
unconfirmed_nsbh = event_counter["NSBH"]
unconfirmed_mg = event_counter["MassGap"]
terrestrial = event_counter["Terrestrial"]
text = (
f"Observational run 3 has detected *{len(self.events.data)}* "
"events since April 1st 2019.\n\n"
""
"*Event types*\n"
f"Binary black hole mergers: *{unconfirmed_bbh}*.\n"
f"Binary neutron star mergers: *{unconfirmed_bns}*.\n"
f"Neutron star black hole mergers: *{unconfirmed_nsbh}*\n"
f"At least one object between 3 and 5 solar masses: *{unconfirmed_mg}*.\n"
f"Likely terrestrial (false alarm): *{terrestrial}*.\n"
)
await self.send_message(message.chat.id, text, parse_mode="markdown")
async def send_detector_status(self, message: types.Message) -> None:
detectors = [Detector("Hanford"), Detector("Livingston"), Detector("Virgo")]
detector_status = []
for detector in detectors:
hours = detector.status_duration.days * 24 + (
detector.status_duration.seconds // 3600
)
minutes = (detector.status_duration.seconds % 3600) // 60
detector_status.append(
f"{emojize(detector.status_icon)} {detector.name}: "
f"{detector.status} {hours}h {minutes}m"
)
text = "\n".join(detector_status)
await self.send_message(message.chat.id, text)
async def add_subscriber(self, message: types.Message) -> None:
user_id = message.chat.id
if self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are already subscribed.")
else:
self.subscribers.add(message.chat.id)
await self.send_message(
user_id, "You will now receive the latest event updates."
)
async def remove_subscriber(self, message: types.Message) -> None:
user_id = message.chat.id
if not self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are not subscribed.")
else:
self.subscribers.remove(message.chat.id)
await self.send_message(
user_id, "You will no longer receive the latest event updates."
)
def event_id_from_message(message: types.Message) -> str:
try:
event_id = message.text.split(" ")[-1]
except KeyError:
event_id = None
return event_id
def inline_list(items):
if len(items) == 0:
return ""
elif len(items) == 1:
return items[0]
else:
return ", ".join(items[:-1]) + f" and {items[-1]}"
| true
| true
|
f712c8de43a8c629aa786fd8a8da9cc2f3bc7f9a
| 1,788
|
py
|
Python
|
optmlstat/test/test_basic_functions.py
|
sungheeyun/optmlstat
|
11d529c915bf27976da9157471a6dbf7df34d205
|
[
"MIT"
] | 4
|
2020-04-08T03:48:59.000Z
|
2020-12-30T01:39:52.000Z
|
optmlstat/test/test_basic_functions.py
|
sungheeyun/optmlstat
|
11d529c915bf27976da9157471a6dbf7df34d205
|
[
"MIT"
] | null | null | null |
optmlstat/test/test_basic_functions.py
|
sungheeyun/optmlstat
|
11d529c915bf27976da9157471a6dbf7df34d205
|
[
"MIT"
] | 1
|
2020-04-08T04:04:00.000Z
|
2020-04-08T04:04:00.000Z
|
import unittest
from logging import Logger, getLogger
from numpy import ndarray, power, allclose
from numpy.random import randn
from freq_used.logging_utils import set_logging_basic_config
from optmlstat.functions.function_base import FunctionBase
from optmlstat.functions.example_functions import get_sum_of_square_function, get_sum_function
logger: Logger = getLogger()
class TestBasicFunctions(unittest.TestCase):
num_inputs: int = 30
num_data_points: int = 100
x_array_2d: ndarray
@classmethod
def setUpClass(cls) -> None:
set_logging_basic_config(__file__)
cls.x_array_2d = randn(cls.num_data_points, cls.num_inputs)
def test_sum_of_squares_function(self):
y_array_1d: ndarray = TestBasicFunctions._get_y_array_1d(
get_sum_of_square_function(TestBasicFunctions.num_inputs)
)
true_y_array_1d: ndarray = power(TestBasicFunctions.x_array_2d, 2.0).sum(axis=1)
logger.info(y_array_1d.shape)
logger.info(true_y_array_1d.shape)
logger.info(allclose(y_array_1d, true_y_array_1d))
self.assertTrue(allclose(y_array_1d, true_y_array_1d))
def test_sum_function(self):
y_array_1d: ndarray = TestBasicFunctions._get_y_array_1d(get_sum_function(TestBasicFunctions.num_inputs))
true_y_array_1d: ndarray = power(TestBasicFunctions.x_array_2d, 1.0).sum(axis=1)
logger.info(y_array_1d.shape)
logger.info(true_y_array_1d.shape)
logger.info(allclose(y_array_1d, true_y_array_1d))
self.assertTrue(allclose(y_array_1d, true_y_array_1d))
@classmethod
def _get_y_array_1d(cls, function: FunctionBase) -> ndarray:
return function.get_y_values_2d(cls.x_array_2d).ravel()
if __name__ == "__main__":
unittest.main()
| 33.111111
| 113
| 0.75
|
import unittest
from logging import Logger, getLogger
from numpy import ndarray, power, allclose
from numpy.random import randn
from freq_used.logging_utils import set_logging_basic_config
from optmlstat.functions.function_base import FunctionBase
from optmlstat.functions.example_functions import get_sum_of_square_function, get_sum_function
logger: Logger = getLogger()
class TestBasicFunctions(unittest.TestCase):
num_inputs: int = 30
num_data_points: int = 100
x_array_2d: ndarray
@classmethod
def setUpClass(cls) -> None:
set_logging_basic_config(__file__)
cls.x_array_2d = randn(cls.num_data_points, cls.num_inputs)
def test_sum_of_squares_function(self):
y_array_1d: ndarray = TestBasicFunctions._get_y_array_1d(
get_sum_of_square_function(TestBasicFunctions.num_inputs)
)
true_y_array_1d: ndarray = power(TestBasicFunctions.x_array_2d, 2.0).sum(axis=1)
logger.info(y_array_1d.shape)
logger.info(true_y_array_1d.shape)
logger.info(allclose(y_array_1d, true_y_array_1d))
self.assertTrue(allclose(y_array_1d, true_y_array_1d))
def test_sum_function(self):
y_array_1d: ndarray = TestBasicFunctions._get_y_array_1d(get_sum_function(TestBasicFunctions.num_inputs))
true_y_array_1d: ndarray = power(TestBasicFunctions.x_array_2d, 1.0).sum(axis=1)
logger.info(y_array_1d.shape)
logger.info(true_y_array_1d.shape)
logger.info(allclose(y_array_1d, true_y_array_1d))
self.assertTrue(allclose(y_array_1d, true_y_array_1d))
@classmethod
def _get_y_array_1d(cls, function: FunctionBase) -> ndarray:
return function.get_y_values_2d(cls.x_array_2d).ravel()
if __name__ == "__main__":
unittest.main()
| true
| true
|
f712c989452d45e22101d40c8a7502a2c3174e77
| 9,381
|
py
|
Python
|
client-py/src/utils/IoTDBRpcDataSet.py
|
SilverNarcissus/incubator-iotdb
|
ea5529280a57125f4aa48a026a2b24d394e15ee5
|
[
"Apache-2.0"
] | 1
|
2020-12-26T11:28:50.000Z
|
2020-12-26T11:28:50.000Z
|
client-py/src/utils/IoTDBRpcDataSet.py
|
SilverNarcissus/incubator-iotdb
|
ea5529280a57125f4aa48a026a2b24d394e15ee5
|
[
"Apache-2.0"
] | null | null | null |
client-py/src/utils/IoTDBRpcDataSet.py
|
SilverNarcissus/incubator-iotdb
|
ea5529280a57125f4aa48a026a2b24d394e15ee5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from IoTDBConstants import *
from thrift.transport import TTransport
from iotdb.rpc.TSIService import TSFetchResultsReq, TSCloseOperationReq
class IoTDBRpcDataSet(object):
TIMESTAMP_STR = "Time"
# VALUE_IS_NULL = "The value got by %s (column name) is NULL."
START_INDEX = 2
FLAG = 0x80
def __init__(self, sql, column_name_list, column_type_list, column_name_index, ignore_timestamp, query_id,
client, session_id, query_data_set, fetch_size):
self.__session_id = session_id
self.__ignore_timestamp = ignore_timestamp
self.__sql = sql
self.__query_id = query_id
self.__client = client
self.__fetch_size = fetch_size
self.__column_size = len(column_name_list)
self.__column_name_list = []
self.__column_type_list = []
self.__column_ordinal_dict = {}
if not ignore_timestamp:
self.__column_name_list.append(IoTDBRpcDataSet.TIMESTAMP_STR)
self.__column_type_list.append(TSDataType.INT64)
self.__column_ordinal_dict[IoTDBRpcDataSet.TIMESTAMP_STR] = 1
if column_name_index is not None:
self.__column_type_deduplicated_list = [None for _ in range(len(column_name_index))]
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
index = column_name_index[name]
self.__column_ordinal_dict[name] = index + IoTDBRpcDataSet.START_INDEX
self.__column_type_deduplicated_list[index] = TSDataType[column_type_list[i]]
else:
index = IoTDBRpcDataSet.START_INDEX
self.__column_type_deduplicated_list = []
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
self.__column_ordinal_dict[name] = index
index += 1
self.__column_type_deduplicated_list.append(TSDataType[column_type_list[i]])
self.__time_bytes = bytes(0)
self.__current_bitmap = [bytes(0) for _ in range(len(self.__column_type_deduplicated_list))]
self.__value = [None for _ in range(len(self.__column_type_deduplicated_list))]
self.__query_data_set = query_data_set
self.__is_closed = False
self.__empty_resultSet = False
self.__has_cached_record = False
self.__rows_index = 0
def close(self):
if self.__is_closed:
return
if self.__client is not None:
try:
status = self.__client.closeOperation(TSCloseOperationReq(self.__session_id, self.__query_id))
print("close session {}, message: {}".format(self.__session_id, status.message))
except TTransport.TException as e:
print("close session {} failed because: ".format(self.__session_id), e)
raise Exception
self.__is_closed = True
self.__client = None
def next(self):
if self.has_cached_result():
self.construct_one_row()
return True
if self.__empty_resultSet:
return False
if self.fetch_results():
self.construct_one_row()
return True
return False
def has_cached_result(self):
return (self.__query_data_set is not None) and (len(self.__query_data_set.time) != 0)
def construct_one_row(self):
# simulating buffer, read 8 bytes from data set and discard first 8 bytes which have been read.
self.__time_bytes = self.__query_data_set.time[:8]
self.__query_data_set.time = self.__query_data_set.time[8:]
for i in range(len(self.__query_data_set.bitmapList)):
bitmap_buffer = self.__query_data_set.bitmapList[i]
# another 8 new rows, should move the bitmap buffer position to next byte
if self.__rows_index % 8 == 0:
self.__current_bitmap[i] = bitmap_buffer[0]
self.__query_data_set.bitmapList[i] = bitmap_buffer[1:]
if not self.is_null(i, self.__rows_index):
value_buffer = self.__query_data_set.valueList[i]
data_type = self.__column_type_deduplicated_list[i]
# simulating buffer
if data_type == TSDataType.BOOLEAN:
self.__value[i] = value_buffer[:1]
self.__query_data_set.valueList[i] = value_buffer[1:]
elif data_type == TSDataType.INT32:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.INT64:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.FLOAT:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.DOUBLE:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.TEXT:
length = int.from_bytes(value_buffer[:4], byteorder="big", signed=False)
self.__value[i] = value_buffer[4: 4 + length]
self.__query_data_set.valueList[i] = value_buffer[4 + length:]
else:
print("unsupported data type {}.".format(data_type))
# could raise exception here
self.__rows_index += 1
self.__has_cached_record = True
def fetch_results(self):
self.__rows_index = 0
request = TSFetchResultsReq(self.__session_id, self.__sql, self.__fetch_size, self.__query_id, True)
try:
resp = self.__client.fetchResults(request)
if not resp.hasResultSet:
self.__empty_resultSet = True
else:
self.__query_data_set = resp.queryDataSet
return resp.hasResultSet
except TTransport.TException as e:
print("Cannot fetch result from server, because of network connection: ", e)
def is_null(self, index, row_num):
bitmap = self.__current_bitmap[index]
shift = row_num % 8
return ((IoTDBRpcDataSet.FLAG >> shift) & (bitmap & 0xff)) == 0
def is_null_by_index(self, column_index):
index = self.__column_ordinal_dict[self.find_column_name_by_index(column_index)] - IoTDBRpcDataSet.START_INDEX
# time column will never be None
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def is_null_by_name(self, column_name):
index = self.__column_ordinal_dict[column_name] - IoTDBRpcDataSet.START_INDEX
# time column will never be None
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def find_column_name_by_index(self, column_index):
if column_index <= 0:
raise Exception("Column index should start from 1")
if column_index > len(self.__column_name_list):
raise Exception("column index {} out of range {}".format(column_index, self.__column_size))
return self.__column_name_list[column_index - 1]
def get_fetch_size(self):
return self.__fetch_size
def set_fetch_size(self, fetch_size):
self.__fetch_size = fetch_size
def get_column_names(self):
return self.__column_name_list
def get_column_types(self):
return self.__column_type_list
def get_column_size(self):
return self.__column_size
def get_ignore_timestamp(self):
return self.__ignore_timestamp
def get_column_ordinal_dict(self):
return self.__column_ordinal_dict
def get_column_type_deduplicated_list(self):
return self.__column_type_deduplicated_list
def get_values(self):
return self.__value
def get_time_bytes(self):
return self.__time_bytes
def get_has_cached_record(self):
return self.__has_cached_record
| 42.640909
| 118
| 0.644601
|
from IoTDBConstants import *
from thrift.transport import TTransport
from iotdb.rpc.TSIService import TSFetchResultsReq, TSCloseOperationReq
class IoTDBRpcDataSet(object):
TIMESTAMP_STR = "Time"
START_INDEX = 2
FLAG = 0x80
def __init__(self, sql, column_name_list, column_type_list, column_name_index, ignore_timestamp, query_id,
client, session_id, query_data_set, fetch_size):
self.__session_id = session_id
self.__ignore_timestamp = ignore_timestamp
self.__sql = sql
self.__query_id = query_id
self.__client = client
self.__fetch_size = fetch_size
self.__column_size = len(column_name_list)
self.__column_name_list = []
self.__column_type_list = []
self.__column_ordinal_dict = {}
if not ignore_timestamp:
self.__column_name_list.append(IoTDBRpcDataSet.TIMESTAMP_STR)
self.__column_type_list.append(TSDataType.INT64)
self.__column_ordinal_dict[IoTDBRpcDataSet.TIMESTAMP_STR] = 1
if column_name_index is not None:
self.__column_type_deduplicated_list = [None for _ in range(len(column_name_index))]
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
index = column_name_index[name]
self.__column_ordinal_dict[name] = index + IoTDBRpcDataSet.START_INDEX
self.__column_type_deduplicated_list[index] = TSDataType[column_type_list[i]]
else:
index = IoTDBRpcDataSet.START_INDEX
self.__column_type_deduplicated_list = []
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
self.__column_ordinal_dict[name] = index
index += 1
self.__column_type_deduplicated_list.append(TSDataType[column_type_list[i]])
self.__time_bytes = bytes(0)
self.__current_bitmap = [bytes(0) for _ in range(len(self.__column_type_deduplicated_list))]
self.__value = [None for _ in range(len(self.__column_type_deduplicated_list))]
self.__query_data_set = query_data_set
self.__is_closed = False
self.__empty_resultSet = False
self.__has_cached_record = False
self.__rows_index = 0
def close(self):
if self.__is_closed:
return
if self.__client is not None:
try:
status = self.__client.closeOperation(TSCloseOperationReq(self.__session_id, self.__query_id))
print("close session {}, message: {}".format(self.__session_id, status.message))
except TTransport.TException as e:
print("close session {} failed because: ".format(self.__session_id), e)
raise Exception
self.__is_closed = True
self.__client = None
def next(self):
if self.has_cached_result():
self.construct_one_row()
return True
if self.__empty_resultSet:
return False
if self.fetch_results():
self.construct_one_row()
return True
return False
def has_cached_result(self):
return (self.__query_data_set is not None) and (len(self.__query_data_set.time) != 0)
def construct_one_row(self):
self.__time_bytes = self.__query_data_set.time[:8]
self.__query_data_set.time = self.__query_data_set.time[8:]
for i in range(len(self.__query_data_set.bitmapList)):
bitmap_buffer = self.__query_data_set.bitmapList[i]
if self.__rows_index % 8 == 0:
self.__current_bitmap[i] = bitmap_buffer[0]
self.__query_data_set.bitmapList[i] = bitmap_buffer[1:]
if not self.is_null(i, self.__rows_index):
value_buffer = self.__query_data_set.valueList[i]
data_type = self.__column_type_deduplicated_list[i]
if data_type == TSDataType.BOOLEAN:
self.__value[i] = value_buffer[:1]
self.__query_data_set.valueList[i] = value_buffer[1:]
elif data_type == TSDataType.INT32:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.INT64:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.FLOAT:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.DOUBLE:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.TEXT:
length = int.from_bytes(value_buffer[:4], byteorder="big", signed=False)
self.__value[i] = value_buffer[4: 4 + length]
self.__query_data_set.valueList[i] = value_buffer[4 + length:]
else:
print("unsupported data type {}.".format(data_type))
self.__rows_index += 1
self.__has_cached_record = True
def fetch_results(self):
self.__rows_index = 0
request = TSFetchResultsReq(self.__session_id, self.__sql, self.__fetch_size, self.__query_id, True)
try:
resp = self.__client.fetchResults(request)
if not resp.hasResultSet:
self.__empty_resultSet = True
else:
self.__query_data_set = resp.queryDataSet
return resp.hasResultSet
except TTransport.TException as e:
print("Cannot fetch result from server, because of network connection: ", e)
def is_null(self, index, row_num):
bitmap = self.__current_bitmap[index]
shift = row_num % 8
return ((IoTDBRpcDataSet.FLAG >> shift) & (bitmap & 0xff)) == 0
def is_null_by_index(self, column_index):
index = self.__column_ordinal_dict[self.find_column_name_by_index(column_index)] - IoTDBRpcDataSet.START_INDEX
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def is_null_by_name(self, column_name):
index = self.__column_ordinal_dict[column_name] - IoTDBRpcDataSet.START_INDEX
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def find_column_name_by_index(self, column_index):
if column_index <= 0:
raise Exception("Column index should start from 1")
if column_index > len(self.__column_name_list):
raise Exception("column index {} out of range {}".format(column_index, self.__column_size))
return self.__column_name_list[column_index - 1]
def get_fetch_size(self):
return self.__fetch_size
def set_fetch_size(self, fetch_size):
self.__fetch_size = fetch_size
def get_column_names(self):
return self.__column_name_list
def get_column_types(self):
return self.__column_type_list
def get_column_size(self):
return self.__column_size
def get_ignore_timestamp(self):
return self.__ignore_timestamp
def get_column_ordinal_dict(self):
return self.__column_ordinal_dict
def get_column_type_deduplicated_list(self):
return self.__column_type_deduplicated_list
def get_values(self):
return self.__value
def get_time_bytes(self):
return self.__time_bytes
def get_has_cached_record(self):
return self.__has_cached_record
| true
| true
|
f712c99545e91e441d992d541c1ccf79df97e92d
| 5,158
|
py
|
Python
|
mlfromscratch/unsupervised_learning/gaussian_mixture_model.py
|
mmderakhshani/ML-From-Scratch
|
4593ffbb64399ed9eaf4921d6c0d5430d81ec1cb
|
[
"MIT"
] | 6
|
2017-09-14T08:36:04.000Z
|
2021-11-06T12:23:30.000Z
|
mlfromscratch/unsupervised_learning/gaussian_mixture_model.py
|
ZexinYan/ML-From-Scratch
|
86ccc5273e0182b66c5d93c428f75dad61d8ced3
|
[
"MIT"
] | null | null | null |
mlfromscratch/unsupervised_learning/gaussian_mixture_model.py
|
ZexinYan/ML-From-Scratch
|
86ccc5273e0182b66c5d93c428f75dad61d8ced3
|
[
"MIT"
] | 1
|
2018-01-02T22:45:05.000Z
|
2018-01-02T22:45:05.000Z
|
from __future__ import division, print_function
import sys
import os
import math
import random
from sklearn import datasets
import numpy as np
# Import helper functions
from mlfromscratch.utils.data_manipulation import normalize
from mlfromscratch.utils.data_operation import euclidean_distance, calculate_covariance_matrix
from mlfromscratch.unsupervised_learning import PCA
from mlfromscratch.utils import Plot
class GaussianMixtureModel():
"""A probabilistic clustering method for determining groupings among data samples.
Parameters:
-----------
k: int
The number of clusters the algorithm will form.
max_iterations: int
The number of iterations the algorithm will run for if it does
not converge before that.
tolerance: float
If the difference of the results from one iteration to the next is
smaller than this value we will say that the algorithm has converged.
"""
def __init__(self, k=2, max_iterations=2000, tolerance=1e-8):
self.k = k
self.parameters = []
self.max_iterations = max_iterations
self.tolerance = tolerance
self.responsibilities = []
self.sample_assignments = None
self.responsibility = None
# Initialize gaussian randomly
def _init_random_gaussians(self, X):
n_samples = np.shape(X)[0]
self.priors = (1 / self.k) * np.ones(self.k)
for i in range(self.k):
params = {}
params["mean"] = X[np.random.choice(range(n_samples))]
params["cov"] = calculate_covariance_matrix(X)
self.parameters.append(params)
# Likelihood
def multivariate_gaussian(self, X, params):
n_features = np.shape(X)[1]
mean = params["mean"]
covar = params["cov"]
determinant = np.linalg.det(covar)
likelihoods = np.zeros(np.shape(X)[0])
for i, sample in enumerate(X):
d = n_features # dimension
coeff = (1.0 / (math.pow((2.0 * math.pi), d / 2)
* math.sqrt(determinant)))
exponent = math.exp(-0.5 * (sample - mean).T.dot(np.linalg.pinv(covar)).dot((sample - mean)))
likelihoods[i] = coeff * exponent
return likelihoods
# Calculate the likelihood over all samples
def _get_likelihoods(self, X):
n_samples = np.shape(X)[0]
likelihoods = np.zeros((n_samples, self.k))
for i in range(self.k):
likelihoods[
:, i] = self.multivariate_gaussian(
X, self.parameters[i])
return likelihoods
# Calculate the responsibility
def _expectation(self, X):
# Calculate probabilities of X belonging to the different clusters
weighted_likelihoods = self._get_likelihoods(X) * self.priors
sum_likelihoods = np.expand_dims(
np.sum(weighted_likelihoods, axis=1), axis=1)
# Determine responsibility as P(X|y)*P(y)/P(X)
self.responsibility = weighted_likelihoods / sum_likelihoods
# Assign samples to cluster that has largest probability
self.sample_assignments = self.responsibility.argmax(axis=1)
# Save value for convergence check
self.responsibilities.append(np.max(self.responsibility, axis=1))
# Update the parameters and priors
def _maximization(self, X):
# Iterate through clusters and recalculate mean and covariance
for i in range(self.k):
resp = np.expand_dims(self.responsibility[:, i], axis=1)
mean = (resp * X).sum(axis=0) / resp.sum()
covariance = (X - mean).T.dot((X - mean) * resp) / resp.sum()
self.parameters[i]["mean"], self.parameters[
i]["cov"] = mean, covariance
# Update weights
n_samples = np.shape(X)[0]
self.priors = self.responsibility.sum(axis=0) / n_samples
# Covergence if || likehood - last_likelihood || < tolerance
def _converged(self, X):
if len(self.responsibilities) < 2:
return False
diff = np.linalg.norm(
self.responsibilities[-1] - self.responsibilities[-2])
# print ("Likelihood update: %s (tol: %s)" % (diff, self.tolerance))
return diff <= self.tolerance
# Run GMM and return the cluster indices
def predict(self, X):
# Initialize the gaussians randomly
self._init_random_gaussians(X)
# Run EM until convergence or for max iterations
for _ in range(self.max_iterations):
self._expectation(X) # E-step
self._maximization(X) # M-step
# Check convergence
if self._converged(X):
break
# Make new assignments and return them
self._expectation(X)
return self.sample_assignments
def main():
# Load the dataset
X, y = datasets.make_blobs()
# Cluster the data
clf = GaussianMixtureModel(k=3)
y_pred = clf.predict(X)
p = MatplotlibWrapper()
p.plot_in_2d(X, y_pred, title="GMM Clustering")
p.plot_in_2d(X, y, title="Actual Clustering")
if __name__ == "__main__":
main()
| 35.819444
| 105
| 0.630865
|
from __future__ import division, print_function
import sys
import os
import math
import random
from sklearn import datasets
import numpy as np
from mlfromscratch.utils.data_manipulation import normalize
from mlfromscratch.utils.data_operation import euclidean_distance, calculate_covariance_matrix
from mlfromscratch.unsupervised_learning import PCA
from mlfromscratch.utils import Plot
class GaussianMixtureModel():
def __init__(self, k=2, max_iterations=2000, tolerance=1e-8):
self.k = k
self.parameters = []
self.max_iterations = max_iterations
self.tolerance = tolerance
self.responsibilities = []
self.sample_assignments = None
self.responsibility = None
def _init_random_gaussians(self, X):
n_samples = np.shape(X)[0]
self.priors = (1 / self.k) * np.ones(self.k)
for i in range(self.k):
params = {}
params["mean"] = X[np.random.choice(range(n_samples))]
params["cov"] = calculate_covariance_matrix(X)
self.parameters.append(params)
def multivariate_gaussian(self, X, params):
n_features = np.shape(X)[1]
mean = params["mean"]
covar = params["cov"]
determinant = np.linalg.det(covar)
likelihoods = np.zeros(np.shape(X)[0])
for i, sample in enumerate(X):
d = n_features
coeff = (1.0 / (math.pow((2.0 * math.pi), d / 2)
* math.sqrt(determinant)))
exponent = math.exp(-0.5 * (sample - mean).T.dot(np.linalg.pinv(covar)).dot((sample - mean)))
likelihoods[i] = coeff * exponent
return likelihoods
def _get_likelihoods(self, X):
n_samples = np.shape(X)[0]
likelihoods = np.zeros((n_samples, self.k))
for i in range(self.k):
likelihoods[
:, i] = self.multivariate_gaussian(
X, self.parameters[i])
return likelihoods
def _expectation(self, X):
weighted_likelihoods = self._get_likelihoods(X) * self.priors
sum_likelihoods = np.expand_dims(
np.sum(weighted_likelihoods, axis=1), axis=1)
self.responsibility = weighted_likelihoods / sum_likelihoods
self.sample_assignments = self.responsibility.argmax(axis=1)
self.responsibilities.append(np.max(self.responsibility, axis=1))
def _maximization(self, X):
for i in range(self.k):
resp = np.expand_dims(self.responsibility[:, i], axis=1)
mean = (resp * X).sum(axis=0) / resp.sum()
covariance = (X - mean).T.dot((X - mean) * resp) / resp.sum()
self.parameters[i]["mean"], self.parameters[
i]["cov"] = mean, covariance
n_samples = np.shape(X)[0]
self.priors = self.responsibility.sum(axis=0) / n_samples
def _converged(self, X):
if len(self.responsibilities) < 2:
return False
diff = np.linalg.norm(
self.responsibilities[-1] - self.responsibilities[-2])
return diff <= self.tolerance
def predict(self, X):
self._init_random_gaussians(X)
for _ in range(self.max_iterations):
self._expectation(X)
self._maximization(X)
if self._converged(X):
break
self._expectation(X)
return self.sample_assignments
def main():
X, y = datasets.make_blobs()
clf = GaussianMixtureModel(k=3)
y_pred = clf.predict(X)
p = MatplotlibWrapper()
p.plot_in_2d(X, y_pred, title="GMM Clustering")
p.plot_in_2d(X, y, title="Actual Clustering")
if __name__ == "__main__":
main()
| true
| true
|
f712cb48c2676caa70abd3795aeccf02d5574156
| 3,509
|
py
|
Python
|
lnbits/core/views/generic.py
|
frankhinek/lnbits
|
30f5eef3b547ba0626ee91bcf4bfc0dcdf8265a1
|
[
"MIT"
] | null | null | null |
lnbits/core/views/generic.py
|
frankhinek/lnbits
|
30f5eef3b547ba0626ee91bcf4bfc0dcdf8265a1
|
[
"MIT"
] | 1
|
2020-08-08T13:11:34.000Z
|
2020-08-08T16:38:09.000Z
|
lnbits/core/views/generic.py
|
dennisreimann/lnbits
|
8ea72c9934feff6307c116a91d28236f2d9f6e2a
|
[
"MIT"
] | null | null | null |
from flask import g, abort, redirect, request, render_template, send_from_directory, url_for
from http import HTTPStatus
from os import getenv, path
from lnbits.core import core_app
from lnbits.decorators import check_user_exists, validate_uuids
from lnbits.settings import SERVICE_FEE
from ..crud import (
create_account,
get_user,
update_user_extension,
create_wallet,
delete_wallet,
)
@core_app.route("/favicon.ico")
def favicon():
return send_from_directory(path.join(core_app.root_path, "static"), "favicon.ico")
@core_app.route("/")
def home():
return render_template("core/index.html", lnurl=request.args.get("lightning", None))
@core_app.route("/extensions")
@validate_uuids(["usr"], required=True)
@check_user_exists()
def extensions():
extension_to_enable = request.args.get("enable", type=str)
extension_to_disable = request.args.get("disable", type=str)
if extension_to_enable and extension_to_disable:
abort(HTTPStatus.BAD_REQUEST, "You can either `enable` or `disable` an extension.")
if extension_to_enable:
update_user_extension(user_id=g.user.id, extension=extension_to_enable, active=1)
elif extension_to_disable:
update_user_extension(user_id=g.user.id, extension=extension_to_disable, active=0)
return render_template("core/extensions.html", user=get_user(g.user.id))
@core_app.route("/wallet")
@validate_uuids(["usr", "wal"])
def wallet():
user_id = request.args.get("usr", type=str)
wallet_id = request.args.get("wal", type=str)
wallet_name = request.args.get("nme", type=str)
service_fee = int(SERVICE_FEE) if int(SERVICE_FEE) == SERVICE_FEE else SERVICE_FEE
# just wallet_name: create a new user, then create a new wallet for user with wallet_name
# just user_id: return the first user wallet or create one if none found (with default wallet_name)
# user_id and wallet_name: create a new wallet for user with wallet_name
# user_id and wallet_id: return that wallet if user is the owner
# nothing: create everything
if not user_id:
user = get_user(create_account().id)
else:
user = get_user(user_id) or abort(HTTPStatus.NOT_FOUND, "User does not exist.")
allowed_users = getenv("LNBITS_ALLOWED_USERS", "all")
if allowed_users != "all" and user_id not in allowed_users.split(","):
abort(HTTPStatus.UNAUTHORIZED, f"User not authorized.")
if not wallet_id:
if user.wallets and not wallet_name:
wallet = user.wallets[0]
else:
wallet = create_wallet(user_id=user.id, wallet_name=wallet_name)
return redirect(url_for("core.wallet", usr=user.id, wal=wallet.id))
if wallet_id not in user.wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
return render_template("core/wallet.html", user=user, wallet=user.get_wallet(wallet_id), service_fee=service_fee)
@core_app.route("/deletewallet")
@validate_uuids(["usr", "wal"], required=True)
@check_user_exists()
def deletewallet():
wallet_id = request.args.get("wal", type=str)
user_wallet_ids = g.user.wallet_ids
if wallet_id not in user_wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
else:
delete_wallet(user_id=g.user.id, wallet_id=wallet_id)
user_wallet_ids.remove(wallet_id)
if user_wallet_ids:
return redirect(url_for("core.wallet", usr=g.user.id, wal=user_wallet_ids[0]))
return redirect(url_for("core.home"))
| 35.09
| 117
| 0.717013
|
from flask import g, abort, redirect, request, render_template, send_from_directory, url_for
from http import HTTPStatus
from os import getenv, path
from lnbits.core import core_app
from lnbits.decorators import check_user_exists, validate_uuids
from lnbits.settings import SERVICE_FEE
from ..crud import (
create_account,
get_user,
update_user_extension,
create_wallet,
delete_wallet,
)
@core_app.route("/favicon.ico")
def favicon():
return send_from_directory(path.join(core_app.root_path, "static"), "favicon.ico")
@core_app.route("/")
def home():
return render_template("core/index.html", lnurl=request.args.get("lightning", None))
@core_app.route("/extensions")
@validate_uuids(["usr"], required=True)
@check_user_exists()
def extensions():
extension_to_enable = request.args.get("enable", type=str)
extension_to_disable = request.args.get("disable", type=str)
if extension_to_enable and extension_to_disable:
abort(HTTPStatus.BAD_REQUEST, "You can either `enable` or `disable` an extension.")
if extension_to_enable:
update_user_extension(user_id=g.user.id, extension=extension_to_enable, active=1)
elif extension_to_disable:
update_user_extension(user_id=g.user.id, extension=extension_to_disable, active=0)
return render_template("core/extensions.html", user=get_user(g.user.id))
@core_app.route("/wallet")
@validate_uuids(["usr", "wal"])
def wallet():
user_id = request.args.get("usr", type=str)
wallet_id = request.args.get("wal", type=str)
wallet_name = request.args.get("nme", type=str)
service_fee = int(SERVICE_FEE) if int(SERVICE_FEE) == SERVICE_FEE else SERVICE_FEE
if not user_id:
user = get_user(create_account().id)
else:
user = get_user(user_id) or abort(HTTPStatus.NOT_FOUND, "User does not exist.")
allowed_users = getenv("LNBITS_ALLOWED_USERS", "all")
if allowed_users != "all" and user_id not in allowed_users.split(","):
abort(HTTPStatus.UNAUTHORIZED, f"User not authorized.")
if not wallet_id:
if user.wallets and not wallet_name:
wallet = user.wallets[0]
else:
wallet = create_wallet(user_id=user.id, wallet_name=wallet_name)
return redirect(url_for("core.wallet", usr=user.id, wal=wallet.id))
if wallet_id not in user.wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
return render_template("core/wallet.html", user=user, wallet=user.get_wallet(wallet_id), service_fee=service_fee)
@core_app.route("/deletewallet")
@validate_uuids(["usr", "wal"], required=True)
@check_user_exists()
def deletewallet():
wallet_id = request.args.get("wal", type=str)
user_wallet_ids = g.user.wallet_ids
if wallet_id not in user_wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
else:
delete_wallet(user_id=g.user.id, wallet_id=wallet_id)
user_wallet_ids.remove(wallet_id)
if user_wallet_ids:
return redirect(url_for("core.wallet", usr=g.user.id, wal=user_wallet_ids[0]))
return redirect(url_for("core.home"))
| true
| true
|
f712ccf8aad1c576c4bb7d20447015501ae5c134
| 961
|
py
|
Python
|
src/quo/layout/dummy.py
|
chouette254/quo
|
8979afd118e77d3d0f93f9fbe8711efada7158c5
|
[
"MIT"
] | 1
|
2021-02-15T03:56:00.000Z
|
2021-02-15T03:56:00.000Z
|
src/quo/layout/dummy.py
|
chouette254/quo
|
8979afd118e77d3d0f93f9fbe8711efada7158c5
|
[
"MIT"
] | 3
|
2021-02-22T11:49:23.000Z
|
2021-02-28T06:47:41.000Z
|
src/quo/layout/dummy.py
|
secretuminc/quo
|
c4f77d52f015c612d32ed0fc2fc79545af598f10
|
[
"MIT"
] | null | null | null |
"""
Dummy layout. Used when somebody creates an `Application` without specifying a
`Layout`.
"""
from quo.text import HTML
from quo.keys import KeyBinder
from quo.keys.key_binding.key_processor import KeyPressEvent
from .containers import Window
from .controls import FormattedTextControl
from .dimension import D
from .layout import Layout
__all__ = [
"create_dummy_layout",
]
E = KeyPressEvent
def create_dummy_layout() -> Layout:
"""
Create a dummy layout for use in an 'Application' that doesn't have a
layout specified. When ENTER is pressed, the application quits.
"""
kb = KeyBinder()
@kb.add("enter")
def enter(event: E) -> None:
event.app.exit()
control = FormattedTextControl(
HTML("No layout specified. Press <reverse>ENTER</reverse> to quit."),
key_bindings=kb,
)
window = Window(content=control, height=D(min=1))
return Layout(container=window, focused_element=window)
| 25.289474
| 78
| 0.707596
|
from quo.text import HTML
from quo.keys import KeyBinder
from quo.keys.key_binding.key_processor import KeyPressEvent
from .containers import Window
from .controls import FormattedTextControl
from .dimension import D
from .layout import Layout
__all__ = [
"create_dummy_layout",
]
E = KeyPressEvent
def create_dummy_layout() -> Layout:
kb = KeyBinder()
@kb.add("enter")
def enter(event: E) -> None:
event.app.exit()
control = FormattedTextControl(
HTML("No layout specified. Press <reverse>ENTER</reverse> to quit."),
key_bindings=kb,
)
window = Window(content=control, height=D(min=1))
return Layout(container=window, focused_element=window)
| true
| true
|
f712cd35d53a7f9e2846be7bc0df488a47a60401
| 581
|
py
|
Python
|
koco/patch.py
|
inmoonlight/koco
|
1ad92c91ffe47f2455190970024c948554923770
|
[
"MIT"
] | 24
|
2020-05-20T14:51:59.000Z
|
2021-12-12T13:08:57.000Z
|
koco/patch.py
|
alisonykim/koco
|
1ad92c91ffe47f2455190970024c948554923770
|
[
"MIT"
] | null | null | null |
koco/patch.py
|
alisonykim/koco
|
1ad92c91ffe47f2455190970024c948554923770
|
[
"MIT"
] | 2
|
2021-01-24T20:55:05.000Z
|
2022-03-17T09:36:18.000Z
|
import logging
import zipfile
import wget
from .utils import DOWNLOAD_DIR, make_dirs
baseurl = 'https://codeload.github.com/kocohub/{}/zip/master'
logger = logging.getLogger(__name__)
def download_dataset(dataset, verbose=True):
make_dirs(DOWNLOAD_DIR)
url = baseurl.format(dataset)
wget.download(url, f'{DOWNLOAD_DIR}/{dataset}.zip')
unzip(f'{DOWNLOAD_DIR}/{dataset}.zip')
if verbose:
logger.info(f'Dataset {dataset} downloaded to {DOWNLOAD_DIR}.')
def unzip(zippath):
with zipfile.ZipFile(zippath) as z:
z.extractall(DOWNLOAD_DIR)
| 23.24
| 71
| 0.719449
|
import logging
import zipfile
import wget
from .utils import DOWNLOAD_DIR, make_dirs
baseurl = 'https://codeload.github.com/kocohub/{}/zip/master'
logger = logging.getLogger(__name__)
def download_dataset(dataset, verbose=True):
make_dirs(DOWNLOAD_DIR)
url = baseurl.format(dataset)
wget.download(url, f'{DOWNLOAD_DIR}/{dataset}.zip')
unzip(f'{DOWNLOAD_DIR}/{dataset}.zip')
if verbose:
logger.info(f'Dataset {dataset} downloaded to {DOWNLOAD_DIR}.')
def unzip(zippath):
with zipfile.ZipFile(zippath) as z:
z.extractall(DOWNLOAD_DIR)
| true
| true
|
f712cd5980cb2bd7a24133bce003672fd878a49a
| 5,271
|
py
|
Python
|
ptsemseg/loader/mapillary_vistas_loader.py
|
EEEGUI/Mapillary-vistas-semseg
|
d07a107fd08a7536f09f25e426a6f15033cbb609
|
[
"MIT"
] | 14
|
2019-07-17T06:04:15.000Z
|
2021-09-24T08:00:52.000Z
|
ptsemseg/loader/mapillary_vistas_loader.py
|
EEEGUI/Mapillary-vistas-semseg
|
d07a107fd08a7536f09f25e426a6f15033cbb609
|
[
"MIT"
] | null | null | null |
ptsemseg/loader/mapillary_vistas_loader.py
|
EEEGUI/Mapillary-vistas-semseg
|
d07a107fd08a7536f09f25e426a6f15033cbb609
|
[
"MIT"
] | 5
|
2019-07-17T06:03:58.000Z
|
2022-01-26T08:57:33.000Z
|
import os
import json
import torch
import numpy as np
from torch.utils import data
from PIL import Image
from ptsemseg.utils import recursive_glob
from ptsemseg.augmentations import Compose, RandomHorizontallyFlip, RandomRotate
class mapillaryVistasLoader(data.Dataset):
def __init__(
self,
root,
split="training",
img_size=(1025, 2049),
is_transform=True,
augmentations=None,
test_mode=False,
):
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 9
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array([80.5423, 91.3162, 81.4312])
self.files = {}
if not test_mode:
self.images_base = os.path.join(self.root, self.split, "images")
self.annotations_base = os.path.join(self.root, self.split, "labels")
self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".jpg")
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
self.class_names, self.class_ids, self.class_colors, self.class_major_ids = self.parse_config()
self.ignore_id = 250
def parse_config(self):
with open(os.path.join(self.root, "config.json")) as config_file:
config = json.load(config_file)
labels = config["labels"]
class_names = []
class_ids = []
class_colors = []
class_major_ids = []
for label_id, label in enumerate(labels):
class_names.append(label["readable"])
class_ids.append(label_id)
class_colors.append(label["color"])
class_major_ids.append(label['majorclass'])
print("There are {} labels in the config file".format(len(set(class_major_ids))))
return class_names, class_ids, class_colors, class_major_ids
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(
self.annotations_base, os.path.basename(img_path).replace(".jpg", ".png")
)
img = Image.open(img_path)
lbl = Image.open(lbl_path)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
if self.img_size == ("same", "same"):
pass
else:
img = img.resize(
(self.img_size[1], self.img_size[0]), resample=Image.LANCZOS
) # uint8 with RGB mode
lbl = lbl.resize((self.img_size[1], self.img_size[0]))
img = np.array(img).astype(np.float64) / 255.0
img = torch.from_numpy(img.transpose(2, 0, 1)).float() # From HWC to CHW
#
# lbl = torch.from_numpy(np.array(lbl)).long()
# lbl[lbl == 65] = self.ignore_id
#
lbl = torch.from_numpy(np.array(lbl)).long()
lbl[lbl == self.ignore_id] = 65
lbl = self.encode_segmap(lbl)
lbl[lbl == 0] = self.ignore_id
return img, lbl
def decode_segmap(self, temp):
class_major_colors = [[0, 0, 0],
[70, 70, 70],
[180, 165, 180],
[128, 64, 64],
[220, 20, 60],
[255, 255, 255],
[70, 130, 180],
[250, 170, 30],
[0, 0, 142]]
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, len(class_major_colors)):
r[temp == l] = class_major_colors[l][0]
g[temp == l] = class_major_colors[l][1]
b[temp == l] = class_major_colors[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
# rgb[:, :, 0] = r / 255.0
# rgb[:, :, 1] = g / 255.0
# rgb[:, :, 2] = b / 255.0
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
return rgb
def encode_segmap(self, mask):
# Put all void classes to zero
for id in self.class_ids:
mask[mask == id] = self.class_major_ids[id]+100
mask = mask - 100
return mask
if __name__ == "__main__":
augment = Compose([RandomHorizontallyFlip(0.5), RandomRotate(6)])
local_path = "/home/lin/Documents/dataset/mapillary"
dst = mapillaryVistasLoader(
local_path, split='validation', img_size=(512, 1024), is_transform=True, augmentations=None
)
bs = 1
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=4, shuffle=True)
for i, data_samples in enumerate(trainloader):
x = dst.decode_segmap(data_samples[1][0].numpy())
x = Image.fromarray(np.uint8(x))
x.show()
| 33.788462
| 103
| 0.554544
|
import os
import json
import torch
import numpy as np
from torch.utils import data
from PIL import Image
from ptsemseg.utils import recursive_glob
from ptsemseg.augmentations import Compose, RandomHorizontallyFlip, RandomRotate
class mapillaryVistasLoader(data.Dataset):
def __init__(
self,
root,
split="training",
img_size=(1025, 2049),
is_transform=True,
augmentations=None,
test_mode=False,
):
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 9
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array([80.5423, 91.3162, 81.4312])
self.files = {}
if not test_mode:
self.images_base = os.path.join(self.root, self.split, "images")
self.annotations_base = os.path.join(self.root, self.split, "labels")
self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".jpg")
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
self.class_names, self.class_ids, self.class_colors, self.class_major_ids = self.parse_config()
self.ignore_id = 250
def parse_config(self):
with open(os.path.join(self.root, "config.json")) as config_file:
config = json.load(config_file)
labels = config["labels"]
class_names = []
class_ids = []
class_colors = []
class_major_ids = []
for label_id, label in enumerate(labels):
class_names.append(label["readable"])
class_ids.append(label_id)
class_colors.append(label["color"])
class_major_ids.append(label['majorclass'])
print("There are {} labels in the config file".format(len(set(class_major_ids))))
return class_names, class_ids, class_colors, class_major_ids
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(
self.annotations_base, os.path.basename(img_path).replace(".jpg", ".png")
)
img = Image.open(img_path)
lbl = Image.open(lbl_path)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
if self.img_size == ("same", "same"):
pass
else:
img = img.resize(
(self.img_size[1], self.img_size[0]), resample=Image.LANCZOS
)
lbl = lbl.resize((self.img_size[1], self.img_size[0]))
img = np.array(img).astype(np.float64) / 255.0
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
lbl = torch.from_numpy(np.array(lbl)).long()
lbl[lbl == self.ignore_id] = 65
lbl = self.encode_segmap(lbl)
lbl[lbl == 0] = self.ignore_id
return img, lbl
def decode_segmap(self, temp):
class_major_colors = [[0, 0, 0],
[70, 70, 70],
[180, 165, 180],
[128, 64, 64],
[220, 20, 60],
[255, 255, 255],
[70, 130, 180],
[250, 170, 30],
[0, 0, 142]]
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, len(class_major_colors)):
r[temp == l] = class_major_colors[l][0]
g[temp == l] = class_major_colors[l][1]
b[temp == l] = class_major_colors[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
return rgb
def encode_segmap(self, mask):
for id in self.class_ids:
mask[mask == id] = self.class_major_ids[id]+100
mask = mask - 100
return mask
if __name__ == "__main__":
augment = Compose([RandomHorizontallyFlip(0.5), RandomRotate(6)])
local_path = "/home/lin/Documents/dataset/mapillary"
dst = mapillaryVistasLoader(
local_path, split='validation', img_size=(512, 1024), is_transform=True, augmentations=None
)
bs = 1
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=4, shuffle=True)
for i, data_samples in enumerate(trainloader):
x = dst.decode_segmap(data_samples[1][0].numpy())
x = Image.fromarray(np.uint8(x))
x.show()
| true
| true
|
f712cde8cb3f42ff94400d66337b26560b041a31
| 1,728
|
py
|
Python
|
02_Arrays/max_non_negative_subArray.py
|
Sheetal0601/InterviewBit
|
72ba1507278dafac6e5fb81da20d372e3d141348
|
[
"MIT"
] | 61
|
2018-02-18T08:16:31.000Z
|
2022-02-17T17:18:57.000Z
|
02_Arrays/max_non_negative_subArray.py
|
Sheetal0601/InterviewBit
|
72ba1507278dafac6e5fb81da20d372e3d141348
|
[
"MIT"
] | 1
|
2018-02-23T20:06:18.000Z
|
2019-12-29T18:52:20.000Z
|
02_Arrays/max_non_negative_subArray.py
|
Sheetal0601/InterviewBit
|
72ba1507278dafac6e5fb81da20d372e3d141348
|
[
"MIT"
] | 30
|
2018-03-28T19:02:23.000Z
|
2021-07-06T20:00:14.000Z
|
# Max Non Negative SubArray
# https://www.interviewbit.com/problems/max-non-negative-subarray/
#
# Find out the maximum sub-array of non negative numbers from an array.
# The sub-array should be continuous. That is, a sub-array created by choosing
# the second and fourth element and skipping the third element is invalid.
#
# Maximum sub-array is defined in terms of the sum of the elements in the sub-array.
# Sub-array A is greater than sub-array B if sum(A) > sum(B).
#
# Example:
#
# A : [1, 2, 5, -7, 2, 3]
# The two sub-arrays are [1, 2, 5] [2, 3].
# The answer is [1, 2, 5] as its sum is larger than [2, 3]
#
# NOTE: If there is a tie, then compare with segment's length and return segment which has maximum length
# NOTE 2: If there is still a tie, then return the segment with minimum starting index
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
# @param A : list of integers
# @return a list of integers
def maxset(self, A):
max_sum = max_left = max_right = -1
tmp_sum = left = 0
for i, elem in enumerate(A):
if elem >= 0:
tmp_sum += elem
else:
if tmp_sum > max_sum:
max_sum, max_left, max_right = tmp_sum, left, i
tmp_sum = 0
left = i + 1
else:
if tmp_sum > max_sum:
max_left, max_right = left, len(A)
return [] if max_left == max_right == -1 else A[max_left: max_right]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
s = Solution()
print(s.maxset([0, 0, -1, 0]))
| 36.765957
| 105
| 0.538194
|
# NOTE 2: If there is still a tie, then return the segment with minimum starting index
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
# @param A : list of integers
# @return a list of integers
def maxset(self, A):
max_sum = max_left = max_right = -1
tmp_sum = left = 0
for i, elem in enumerate(A):
if elem >= 0:
tmp_sum += elem
else:
if tmp_sum > max_sum:
max_sum, max_left, max_right = tmp_sum, left, i
tmp_sum = 0
left = i + 1
else:
if tmp_sum > max_sum:
max_left, max_right = left, len(A)
return [] if max_left == max_right == -1 else A[max_left: max_right]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
s = Solution()
print(s.maxset([0, 0, -1, 0]))
| true
| true
|
f712ce0b1470383ff19a2fba7bee20d030c85cf0
| 454
|
py
|
Python
|
models/dist_phold/experiment.py
|
MISTCARRYYOU/PythonPDEVS
|
53cad29832b3c489ab037bdc487affcbf1e3f408
|
[
"Apache-2.0"
] | 1
|
2018-09-19T14:42:28.000Z
|
2018-09-19T14:42:28.000Z
|
models/dist_phold/experiment.py
|
MISTCARRYYOU/PythonPDEVS
|
53cad29832b3c489ab037bdc487affcbf1e3f408
|
[
"Apache-2.0"
] | null | null | null |
models/dist_phold/experiment.py
|
MISTCARRYYOU/PythonPDEVS
|
53cad29832b3c489ab037bdc487affcbf1e3f408
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:21:35.000Z
|
2021-01-14T12:21:35.000Z
|
import model
import logging
import sys
sys.path.append('../../src/')
from simulator import Simulator
sys.setrecursionlimit(50000)
model = model.AutoDistPHOLD(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]))
sim = Simulator(model)
#sim.setVerbose(None)
sim.setTerminationTime(200)
sim.setMessageCopy('custom')
sim.setStateSaving("custom")
sim.setMemoization(True)
sim.setGVTInterval(5)
#sim.setGVTInterval(30)
#sim.setShowProgress()
sim.simulate()
| 22.7
| 81
| 0.768722
|
import model
import logging
import sys
sys.path.append('../../src/')
from simulator import Simulator
sys.setrecursionlimit(50000)
model = model.AutoDistPHOLD(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]))
sim = Simulator(model)
sim.setTerminationTime(200)
sim.setMessageCopy('custom')
sim.setStateSaving("custom")
sim.setMemoization(True)
sim.setGVTInterval(5)
sim.simulate()
| true
| true
|
f712ce2546498b7793961eedc7b6a1340fa62324
| 340
|
py
|
Python
|
persons/api_urls.py
|
mbaragiola/heimdallerp
|
8d32131a20bd0f3609d772ac437f4f24622abfc7
|
[
"0BSD"
] | 8
|
2016-04-07T11:58:42.000Z
|
2019-06-24T01:38:12.000Z
|
persons/api_urls.py
|
mbaragiola/heimdallerp
|
8d32131a20bd0f3609d772ac437f4f24622abfc7
|
[
"0BSD"
] | null | null | null |
persons/api_urls.py
|
mbaragiola/heimdallerp
|
8d32131a20bd0f3609d772ac437f4f24622abfc7
|
[
"0BSD"
] | null | null | null |
from django.conf.urls import include, url
from persons import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'physicaladdresses', views.PhysicalAddressViewSet)
router.register(r'companies', views.CompanyViewSet)
app_name = 'persons'
urlpatterns = [
url(r'^', include(router.urls)),
]
| 26.153846
| 67
| 0.788235
|
from django.conf.urls import include, url
from persons import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'physicaladdresses', views.PhysicalAddressViewSet)
router.register(r'companies', views.CompanyViewSet)
app_name = 'persons'
urlpatterns = [
url(r'^', include(router.urls)),
]
| true
| true
|
f712ce60e828ba1f2912f4462a14e369e218292b
| 1,692
|
py
|
Python
|
tests/jenkins/python/install_conda.py
|
E3SM-Project/acme_processflow
|
84110cab08f7897d1489a6dc925258580a5d2bff
|
[
"MIT"
] | 3
|
2019-02-06T23:46:36.000Z
|
2022-02-28T01:39:26.000Z
|
tests/jenkins/python/install_conda.py
|
E3SM-Project/acme_processflow
|
84110cab08f7897d1489a6dc925258580a5d2bff
|
[
"MIT"
] | 62
|
2017-09-15T00:14:53.000Z
|
2018-03-23T22:07:12.000Z
|
tests/jenkins/python/install_conda.py
|
E3SM-Project/acme_processflow
|
84110cab08f7897d1489a6dc925258580a5d2bff
|
[
"MIT"
] | 4
|
2018-07-26T16:07:07.000Z
|
2021-06-02T12:01:48.000Z
|
import argparse
import os
import sys
from Util import SUCCESS, FAILURE
from Util import run_cmd
parser = argparse.ArgumentParser(description="install conda",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-w", "--workdir",
help="working directory -- miniconda will be installed in a subdirectory under this directory")
args = parser.parse_args()
workdir = args.workdir
# create a unique dir
if os.path.isdir(workdir):
print("Work directory {} already exists".format(workdir))
conda_bin = os.path.join(workdir, 'miniconda', 'bin')
if os.path.isdir(conda_bin):
print('Miniconda seems to be already installed')
sys.exit(FAILURE)
else:
os.makedirs(workdir)
# get miniconda
source_url = 'https://repo.continuum.io/miniconda/Miniconda2-4.3.31-Linux-x86_64.sh'
conda_script = os.path.join(workdir, 'miniconda2.sh')
cmd = "wget --no-check {url} -O {the_script}".format(
url=source_url,
the_script=conda_script)
ret_code = run_cmd(cmd, True, False, True)
if ret_code != SUCCESS:
sys.exit(FAILURE)
# install miniconda
conda_path = os.path.join(workdir, 'miniconda2')
cmd = "bash {conda_script} -b -p {conda_path}".format(
conda_script=conda_script,
conda_path=conda_path)
ret_code = run_cmd(cmd, True, False, True)
if ret_code != SUCCESS:
sys.exit(FAILURE)
# check conda command
conda_cmd = os.path.join(conda_path, 'bin', 'conda')
cmd = "ls -l {conda_cmd}".format(conda_cmd=conda_cmd)
ret_code = run_cmd(cmd, True, False, True)
if ret_code == SUCCESS:
print("\nMiniconda is successfully installed under~: {}".format(workdir))
sys.exit(ret_code)
| 29.172414
| 115
| 0.706856
|
import argparse
import os
import sys
from Util import SUCCESS, FAILURE
from Util import run_cmd
parser = argparse.ArgumentParser(description="install conda",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-w", "--workdir",
help="working directory -- miniconda will be installed in a subdirectory under this directory")
args = parser.parse_args()
workdir = args.workdir
if os.path.isdir(workdir):
print("Work directory {} already exists".format(workdir))
conda_bin = os.path.join(workdir, 'miniconda', 'bin')
if os.path.isdir(conda_bin):
print('Miniconda seems to be already installed')
sys.exit(FAILURE)
else:
os.makedirs(workdir)
source_url = 'https://repo.continuum.io/miniconda/Miniconda2-4.3.31-Linux-x86_64.sh'
conda_script = os.path.join(workdir, 'miniconda2.sh')
cmd = "wget --no-check {url} -O {the_script}".format(
url=source_url,
the_script=conda_script)
ret_code = run_cmd(cmd, True, False, True)
if ret_code != SUCCESS:
sys.exit(FAILURE)
conda_path = os.path.join(workdir, 'miniconda2')
cmd = "bash {conda_script} -b -p {conda_path}".format(
conda_script=conda_script,
conda_path=conda_path)
ret_code = run_cmd(cmd, True, False, True)
if ret_code != SUCCESS:
sys.exit(FAILURE)
conda_cmd = os.path.join(conda_path, 'bin', 'conda')
cmd = "ls -l {conda_cmd}".format(conda_cmd=conda_cmd)
ret_code = run_cmd(cmd, True, False, True)
if ret_code == SUCCESS:
print("\nMiniconda is successfully installed under~: {}".format(workdir))
sys.exit(ret_code)
| true
| true
|
f712cf680f5b3fed52d193239019e11f40f5a36d
| 854
|
py
|
Python
|
threads/simple_threads.py
|
bchekuri/python101
|
b025bb52c56ee69647310a6f883c88f80697a2d0
|
[
"MIT"
] | null | null | null |
threads/simple_threads.py
|
bchekuri/python101
|
b025bb52c56ee69647310a6f883c88f80697a2d0
|
[
"MIT"
] | null | null | null |
threads/simple_threads.py
|
bchekuri/python101
|
b025bb52c56ee69647310a6f883c88f80697a2d0
|
[
"MIT"
] | null | null | null |
# Simple Threads Pool
from multiprocessing.dummy import Pool as ThreadPool
from datetime import date
from datetime import datetime
import time
multiply_results = []
def squareNumber(n):
multiply_results.append(n ** 2)
dt_string = datetime.now().strftime("%H:%M:%S")
millis = int(round(time.time() * 1000))
print("Each Thread Time - %d" % millis)
time.sleep(n)
return n ** 2
# function to be mapped over
def calculateParallel(numbers, threads=10):
pool = ThreadPool(threads)
results = pool.map(squareNumber, numbers)
pool.close()
#pool.join()
return results
if __name__ == "__main__":
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
squaredNumbers = calculateParallel(numbers, 15)
for n in multiply_results:
print(n)
print("Results Length - %d" % len(multiply_results))
| 28.466667
| 65
| 0.669789
|
from multiprocessing.dummy import Pool as ThreadPool
from datetime import date
from datetime import datetime
import time
multiply_results = []
def squareNumber(n):
multiply_results.append(n ** 2)
dt_string = datetime.now().strftime("%H:%M:%S")
millis = int(round(time.time() * 1000))
print("Each Thread Time - %d" % millis)
time.sleep(n)
return n ** 2
def calculateParallel(numbers, threads=10):
pool = ThreadPool(threads)
results = pool.map(squareNumber, numbers)
pool.close()
return results
if __name__ == "__main__":
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
squaredNumbers = calculateParallel(numbers, 15)
for n in multiply_results:
print(n)
print("Results Length - %d" % len(multiply_results))
| true
| true
|
f712d08b33b9d9be7a030ff73142cdebe8d78a2d
| 176
|
py
|
Python
|
Code Bundle/Chapter03/tests/test_slow.py
|
ghanigreen/pytest_code
|
dbdcc322b3469c62ad328043060518edf2b2d83f
|
[
"MIT"
] | 46
|
2018-06-28T04:40:08.000Z
|
2022-02-14T05:36:48.000Z
|
Code Bundle/Chapter03/tests/test_slow.py
|
ghanigreen/pytest_code
|
dbdcc322b3469c62ad328043060518edf2b2d83f
|
[
"MIT"
] | null | null | null |
Code Bundle/Chapter03/tests/test_slow.py
|
ghanigreen/pytest_code
|
dbdcc322b3469c62ad328043060518edf2b2d83f
|
[
"MIT"
] | 22
|
2018-06-10T23:20:29.000Z
|
2022-02-24T06:47:18.000Z
|
import pytest
@pytest.mark.slow
def test_long_computation():
...
@pytest.mark.timeout(10, method="thread")
def test_topology_sort():
...
def test_foo():
pass
| 11
| 41
| 0.664773
|
import pytest
@pytest.mark.slow
def test_long_computation():
...
@pytest.mark.timeout(10, method="thread")
def test_topology_sort():
...
def test_foo():
pass
| true
| true
|
f712d258066fe75e06fb27363746e555f80b0561
| 13,566
|
py
|
Python
|
trainer/trainer.py
|
vinay-swamy/gMVP
|
62202baa0769dfe0e47c230e78dffa42fb1280f1
|
[
"MIT"
] | 2
|
2021-04-24T03:23:40.000Z
|
2021-06-28T11:51:10.000Z
|
trainer/trainer.py
|
vinay-swamy/gMVP
|
62202baa0769dfe0e47c230e78dffa42fb1280f1
|
[
"MIT"
] | null | null | null |
trainer/trainer.py
|
vinay-swamy/gMVP
|
62202baa0769dfe0e47c230e78dffa42fb1280f1
|
[
"MIT"
] | 2
|
2021-09-14T13:03:38.000Z
|
2022-03-23T02:49:19.000Z
|
import time
import json
import argparse
import os
import sys
import logging
import shutil
from datetime import datetime
import glob
import random
from scipy.stats import mannwhitneyu
from scipy.stats import spearmanr
import numpy as np
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
import tensorflow as tf
import tensorflow_addons as tfa
#from optimization import create_optimizer
from model_attention import ModelAttention
from dataset import build_dataset
from loss import compute_loss
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.config.threading.set_intra_op_parallelism_threads(60)
tf.config.threading.set_inter_op_parallelism_threads(60)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
class LearningRate(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, base_lr, end_learning_rate, warmup_steps, decay_steps):
super(LearningRate, self).__init__()
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
if decay_steps == 0:
self.poly_decay_fn = lambda x: self.base_lr
else:
self.poly_decay_fn = tf.keras.optimizers.schedules.PolynomialDecay(
base_lr,
decay_steps,
end_learning_rate=end_learning_rate,
power=1.0)
def __call__(self, step):
lr = tf.cond(
step < self.warmup_steps, lambda: self.base_lr * tf.cast(
step + 1, tf.float32) / tf.cast(self.warmup_steps, tf.float32),
lambda: self.poly_decay_fn(step - self.warmup_steps))
#if step % 100 == 0:
# tf.print('learning_rate', step, lr)
return lr
class TestMetric(object):
def __init__(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def reset_states(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def update_state(self, targets, preds):
self._targets = tf.concat(
[self._targets, tf.cast(targets, tf.int32)], axis=-1)
self._preds = tf.concat(
[self._preds, tf.cast(preds, tf.float32)], axis=-1)
def result_auROC(self):
try:
auROC = roc_auc_score(self._targets.numpy(), self._preds.numpy())
return auROC
except:
return 0.0
def result_auPR(self):
try:
precision, recall, _ = precision_recall_curve(
self._targets.numpy(), self._preds.numpy())
auPR = auc(recall, precision)
return auPR
except:
return 0.0
def result_pvalue(self):
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
mtest = mannwhitneyu(all_pred[all_label == 1],
all_pred[all_label == 0],
alternative='two-sided')
pvalue = mtest.pvalue
return pvalue
def result_total(self):
res = self._targets.numpy()
return res.shape[0]
def result_neg(self):
res = self._targets.numpy()
return res.shape[0] - np.sum(res)
def result_pos(self):
res = self._targets.numpy()
return np.sum(res)
def result_corr(self):
try:
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
corr, pvalue = spearmanr(all_pred, all_label)
return corr, pvalue
except:
return 0.0
def result_max(self):
try:
all_pred = self._preds.numpy()
return np.max(all_pred)
except:
return 0.0
def train_single_gpu(config, args):
#setup logger
str_t = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
train_dir = f'./res/{str_t}'
config['train']['train_dir'] = train_dir
os.makedirs(train_dir)
os.makedirs(train_dir + '/result')
os.makedirs(train_dir + '/model')
fh = logging.FileHandler(f'{train_dir}/train.log')
fh.setFormatter(logging_formatter)
logger.addHandler(fh)
logger.info(json.dumps(config, indent=4))
#train and validate files
batch_size = config['train']['batch_size']
input_config = config['input']
input_base_dir = input_config['base_dir']
all_files = glob.glob(input_base_dir + '/' + input_config['train'][:-1] +
args.random + '*tfrec')
#all_files = glob.glob('../dataset/tf/f_v1_w64_2021_v2' + '/' +
# input_config['train'][:-1] + args.random + '*tfrec')
random.seed(2020)
random.shuffle(all_files)
train_files, validate_files = [], []
for i in range(10):
if i == args.cv:
validate_files.append(all_files[i])
else:
train_files.append(all_files[i])
print(train_files)
print(validate_files)
asd = glob.glob(input_base_dir + '/' + 'ASD' + '.tfrec')
ndd = glob.glob(input_base_dir + '/' + 'NDD' + '.tfrec')
control = glob.glob(input_base_dir + '/' + 'Control' + '.tfrec')
brca2 = glob.glob(input_base_dir + '/' + 'BRCA2' + '.tfrec')
pparg = glob.glob(input_base_dir + '/' + 'PPARG' + '.tfrec')
#train_files += pparg
train_dataset = build_dataset(train_files, batch_size)
validate_dataset = build_dataset(validate_files, batch_size)
#model
model_type = config['train']['model_type']
if model_type == 'attention':
model = ModelAttention(config['model'])
else:
raise ValueError(f'model type {model_type} does not exist.')
#learning rate
init_learning_rate = config['train']['learning_rate']
end_learning_rate = config['train']['end_learning_rate']
'''
warmup_epochs = config['train']['warmup_epochs']
decay_epochs = config['train']['decay_epochs']
training_samples = 0
for inputs in train_dataset:
training_samples += inputs[0].shape[0]
logger.info(f'training_samples= {training_samples}')
batches_each_epoch = int(training_samples / batch_size)
warmup_steps = batches_each_epoch * warmup_epochs
decay_steps = batches_each_epoch * decay_epochs
'''
warmup_steps, decay_steps = config['train']['warmup_steps'], config[
'train']['decay_steps']
learning_rate = LearningRate(init_learning_rate,
end_learning_rate=end_learning_rate,
warmup_steps=warmup_steps,
decay_steps=decay_steps)
#training algorithm
opt = config['train'].get('opt', 'adam')
if opt == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
#optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
elif opt == 'adamw':
weight_decay_rate = config['train']['weight_decay_rate']
optimizer = tfa.optimizers.AdamW(
weight_decay=weight_decay_rate,
learning_rate=learning_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
)
'''
optimizer = create_optimizer(init_learning_rate,
decay_steps + warmup_steps,
warmup_steps,
end_lr=end_learning_rate,
optimizer_type='adamw')
'''
else:
raise NotImplementedError(f"opt {opt} not NotImplementedError")
#metrics
metric_train_loss = tf.keras.metrics.Mean(name='train_loss')
metric_test_loss = tf.keras.metrics.Mean(name='test_loss')
metric_test = TestMetric()
#summary
train_log_dir = f'{train_dir}/summary/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
def _update_histogram_summary():
with train_summary_writer.as_default():
for var in model.trainable_variables:
if 'kernel:' in var.name or 'gamma:' in var.name or 'beta:' in var.name:
tf.summary.histogram(var.name,
var,
step=optimizer.iterations)
def _update_gradient_norm_summary(var, grad):
with train_summary_writer.as_default():
for v, g in zip(var, grad):
if 'kernel:' in v.name or 'gamma:' in v.name or 'beta:' in v.name:
tf.summary.scalar(f'gradient_norm/{v.name}',
tf.norm(g, ord='euclidean'),
step=optimizer.iterations)
@tf.function(input_signature=[validate_dataset.element_spec])
def test_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
logit = model((ref_aa, alt_aa, feature), False, padding_mask)
loss = compute_loss(label, logit)
pred = model.predict_from_logit(logit)
return var, label, pred, loss
def _save_res(var_id, target, pred, name, epoch):
with open(f'{train_dir}/result/epoch_{epoch}_{name}.score', 'w') as f:
f.write('var\ttarget\tScore\n')
for a, c, d in zip(var_id, target, pred):
f.write('{}\t{:d}\t{:f}\n'.format(a.numpy().decode('utf-8'),
int(c), d))
return True
def test(test_dataset,
data_name,
epoch,
auc=False,
pvalue=False,
corr=False):
metric_test_loss.reset_states()
metric_test.reset_states()
all_pred, all_label, all_var = [], [], []
for step, sample in enumerate(test_dataset):
var, label, pred, loss = test_step(sample)
metric_test.update_state(label, pred)
metric_test_loss.update_state(loss)
all_pred.extend(list(pred))
all_label.extend(list(label))
all_var.extend(list(var))
all_var = np.array(all_var)
all_label = np.array(all_label)
all_pred = np.array(all_pred)
_save_res(all_var, all_label, all_pred, data_name, epoch)
if auc:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} auPR= {metric_test.result_auPR()} auROC= {metric_test.result_auROC()} max= {metric_test.result_max()}'
)
if pvalue:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} pvalue= {metric_test.result_pvalue()}'
)
if corr:
corr, pvalue = metric_test.result_corr()
logger.info(
f'{data_name} pos= {metric_test.result_total()} corr= {corr} pvalue= {pvalue} max= {metric_test.result_max()}'
)
return metric_test_loss.result()
@tf.function(input_signature=[train_dataset.element_spec])
def train_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
with tf.GradientTape() as tape:
logit = model((ref_aa, alt_aa, feature), True, padding_mask)
loss = compute_loss(label, logit)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
metric_train_loss.update_state(loss)
#if optimizer.iterations % 512 == 0:
# _update_gradient_norm_summary(model.trainable_variables, gradients)
return loss
EPOCHS = 512
watch_loss = 10000.0
watch_epoch = -1
patience_epochs = 5
for epoch in range(EPOCHS):
start = time.time()
for step, samples in enumerate(train_dataset):
loss = train_step(samples)
#tf.print(
# f'lr= {learning_rate(global_step)} wd={weight_decay(global_step)}'
#)
#model summary
if optimizer.iterations == 1:
model.summary(print_fn=logger.info)
#logging kernel weights
#if (optimizer.iterations + 1) % 512 == 0:
# _update_histogram_summary()
logger.info(f'Epoch {epoch} Loss {metric_train_loss.result():.4f}')
metric_train_loss.reset_states()
model.save_weights(f'{train_dir}/model/epoch-{epoch}.h5')
#validate and test
validate_loss = test(validate_dataset,
'validate',
epoch,
pvalue=False,
auc=True,
corr=False)
if validate_loss < watch_loss:
watch_loss = validate_loss
watch_epoch = epoch
#denovo
if epoch - watch_epoch == patience_epochs:
logger.info(f'best_epoch {watch_epoch} min_loss= {watch_loss}')
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--random', type=str, default='0')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
train_single_gpu(config, args)
if __name__ == '__main__':
main()
| 33.830424
| 231
| 0.597523
|
import time
import json
import argparse
import os
import sys
import logging
import shutil
from datetime import datetime
import glob
import random
from scipy.stats import mannwhitneyu
from scipy.stats import spearmanr
import numpy as np
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
import tensorflow as tf
import tensorflow_addons as tfa
from model_attention import ModelAttention
from dataset import build_dataset
from loss import compute_loss
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.config.threading.set_intra_op_parallelism_threads(60)
tf.config.threading.set_inter_op_parallelism_threads(60)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
class LearningRate(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, base_lr, end_learning_rate, warmup_steps, decay_steps):
super(LearningRate, self).__init__()
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
if decay_steps == 0:
self.poly_decay_fn = lambda x: self.base_lr
else:
self.poly_decay_fn = tf.keras.optimizers.schedules.PolynomialDecay(
base_lr,
decay_steps,
end_learning_rate=end_learning_rate,
power=1.0)
def __call__(self, step):
lr = tf.cond(
step < self.warmup_steps, lambda: self.base_lr * tf.cast(
step + 1, tf.float32) / tf.cast(self.warmup_steps, tf.float32),
lambda: self.poly_decay_fn(step - self.warmup_steps))
return lr
class TestMetric(object):
def __init__(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def reset_states(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def update_state(self, targets, preds):
self._targets = tf.concat(
[self._targets, tf.cast(targets, tf.int32)], axis=-1)
self._preds = tf.concat(
[self._preds, tf.cast(preds, tf.float32)], axis=-1)
def result_auROC(self):
try:
auROC = roc_auc_score(self._targets.numpy(), self._preds.numpy())
return auROC
except:
return 0.0
def result_auPR(self):
try:
precision, recall, _ = precision_recall_curve(
self._targets.numpy(), self._preds.numpy())
auPR = auc(recall, precision)
return auPR
except:
return 0.0
def result_pvalue(self):
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
mtest = mannwhitneyu(all_pred[all_label == 1],
all_pred[all_label == 0],
alternative='two-sided')
pvalue = mtest.pvalue
return pvalue
def result_total(self):
res = self._targets.numpy()
return res.shape[0]
def result_neg(self):
res = self._targets.numpy()
return res.shape[0] - np.sum(res)
def result_pos(self):
res = self._targets.numpy()
return np.sum(res)
def result_corr(self):
try:
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
corr, pvalue = spearmanr(all_pred, all_label)
return corr, pvalue
except:
return 0.0
def result_max(self):
try:
all_pred = self._preds.numpy()
return np.max(all_pred)
except:
return 0.0
def train_single_gpu(config, args):
str_t = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
train_dir = f'./res/{str_t}'
config['train']['train_dir'] = train_dir
os.makedirs(train_dir)
os.makedirs(train_dir + '/result')
os.makedirs(train_dir + '/model')
fh = logging.FileHandler(f'{train_dir}/train.log')
fh.setFormatter(logging_formatter)
logger.addHandler(fh)
logger.info(json.dumps(config, indent=4))
batch_size = config['train']['batch_size']
input_config = config['input']
input_base_dir = input_config['base_dir']
all_files = glob.glob(input_base_dir + '/' + input_config['train'][:-1] +
args.random + '*tfrec')
random.seed(2020)
random.shuffle(all_files)
train_files, validate_files = [], []
for i in range(10):
if i == args.cv:
validate_files.append(all_files[i])
else:
train_files.append(all_files[i])
print(train_files)
print(validate_files)
asd = glob.glob(input_base_dir + '/' + 'ASD' + '.tfrec')
ndd = glob.glob(input_base_dir + '/' + 'NDD' + '.tfrec')
control = glob.glob(input_base_dir + '/' + 'Control' + '.tfrec')
brca2 = glob.glob(input_base_dir + '/' + 'BRCA2' + '.tfrec')
pparg = glob.glob(input_base_dir + '/' + 'PPARG' + '.tfrec')
train_dataset = build_dataset(train_files, batch_size)
validate_dataset = build_dataset(validate_files, batch_size)
model_type = config['train']['model_type']
if model_type == 'attention':
model = ModelAttention(config['model'])
else:
raise ValueError(f'model type {model_type} does not exist.')
init_learning_rate = config['train']['learning_rate']
end_learning_rate = config['train']['end_learning_rate']
warmup_steps, decay_steps = config['train']['warmup_steps'], config[
'train']['decay_steps']
learning_rate = LearningRate(init_learning_rate,
end_learning_rate=end_learning_rate,
warmup_steps=warmup_steps,
decay_steps=decay_steps)
opt = config['train'].get('opt', 'adam')
if opt == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif opt == 'adamw':
weight_decay_rate = config['train']['weight_decay_rate']
optimizer = tfa.optimizers.AdamW(
weight_decay=weight_decay_rate,
learning_rate=learning_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
)
'''
optimizer = create_optimizer(init_learning_rate,
decay_steps + warmup_steps,
warmup_steps,
end_lr=end_learning_rate,
optimizer_type='adamw')
'''
else:
raise NotImplementedError(f"opt {opt} not NotImplementedError")
metric_train_loss = tf.keras.metrics.Mean(name='train_loss')
metric_test_loss = tf.keras.metrics.Mean(name='test_loss')
metric_test = TestMetric()
train_log_dir = f'{train_dir}/summary/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
def _update_histogram_summary():
with train_summary_writer.as_default():
for var in model.trainable_variables:
if 'kernel:' in var.name or 'gamma:' in var.name or 'beta:' in var.name:
tf.summary.histogram(var.name,
var,
step=optimizer.iterations)
def _update_gradient_norm_summary(var, grad):
with train_summary_writer.as_default():
for v, g in zip(var, grad):
if 'kernel:' in v.name or 'gamma:' in v.name or 'beta:' in v.name:
tf.summary.scalar(f'gradient_norm/{v.name}',
tf.norm(g, ord='euclidean'),
step=optimizer.iterations)
@tf.function(input_signature=[validate_dataset.element_spec])
def test_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
logit = model((ref_aa, alt_aa, feature), False, padding_mask)
loss = compute_loss(label, logit)
pred = model.predict_from_logit(logit)
return var, label, pred, loss
def _save_res(var_id, target, pred, name, epoch):
with open(f'{train_dir}/result/epoch_{epoch}_{name}.score', 'w') as f:
f.write('var\ttarget\tScore\n')
for a, c, d in zip(var_id, target, pred):
f.write('{}\t{:d}\t{:f}\n'.format(a.numpy().decode('utf-8'),
int(c), d))
return True
def test(test_dataset,
data_name,
epoch,
auc=False,
pvalue=False,
corr=False):
metric_test_loss.reset_states()
metric_test.reset_states()
all_pred, all_label, all_var = [], [], []
for step, sample in enumerate(test_dataset):
var, label, pred, loss = test_step(sample)
metric_test.update_state(label, pred)
metric_test_loss.update_state(loss)
all_pred.extend(list(pred))
all_label.extend(list(label))
all_var.extend(list(var))
all_var = np.array(all_var)
all_label = np.array(all_label)
all_pred = np.array(all_pred)
_save_res(all_var, all_label, all_pred, data_name, epoch)
if auc:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} auPR= {metric_test.result_auPR()} auROC= {metric_test.result_auROC()} max= {metric_test.result_max()}'
)
if pvalue:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} pvalue= {metric_test.result_pvalue()}'
)
if corr:
corr, pvalue = metric_test.result_corr()
logger.info(
f'{data_name} pos= {metric_test.result_total()} corr= {corr} pvalue= {pvalue} max= {metric_test.result_max()}'
)
return metric_test_loss.result()
@tf.function(input_signature=[train_dataset.element_spec])
def train_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
with tf.GradientTape() as tape:
logit = model((ref_aa, alt_aa, feature), True, padding_mask)
loss = compute_loss(label, logit)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
metric_train_loss.update_state(loss)
return loss
EPOCHS = 512
watch_loss = 10000.0
watch_epoch = -1
patience_epochs = 5
for epoch in range(EPOCHS):
start = time.time()
for step, samples in enumerate(train_dataset):
loss = train_step(samples)
if optimizer.iterations == 1:
model.summary(print_fn=logger.info)
logger.info(f'Epoch {epoch} Loss {metric_train_loss.result():.4f}')
metric_train_loss.reset_states()
model.save_weights(f'{train_dir}/model/epoch-{epoch}.h5')
validate_loss = test(validate_dataset,
'validate',
epoch,
pvalue=False,
auc=True,
corr=False)
if validate_loss < watch_loss:
watch_loss = validate_loss
watch_epoch = epoch
if epoch - watch_epoch == patience_epochs:
logger.info(f'best_epoch {watch_epoch} min_loss= {watch_loss}')
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--random', type=str, default='0')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
train_single_gpu(config, args)
if __name__ == '__main__':
main()
| true
| true
|
f712d3de3c53306b7e8cbd2a8d7569448d1f0fa1
| 150
|
py
|
Python
|
mne/io/edf/__init__.py
|
dgwakeman/mne-python
|
3cc7a3f8456d78c828355f1860dd7e0297e59c73
|
[
"BSD-3-Clause"
] | 4
|
2017-10-25T18:42:30.000Z
|
2019-06-15T06:48:10.000Z
|
mne/io/edf/__init__.py
|
alexandrebarachant/mne-python
|
b54e38c9bbac38c6f53747075b5bad2936fbc5b9
|
[
"BSD-3-Clause"
] | 4
|
2015-04-20T16:10:47.000Z
|
2016-11-01T13:32:48.000Z
|
mne/io/edf/__init__.py
|
alexandrebarachant/mne-python
|
b54e38c9bbac38c6f53747075b5bad2936fbc5b9
|
[
"BSD-3-Clause"
] | 2
|
2018-04-02T06:45:11.000Z
|
2018-07-16T23:39:02.000Z
|
"""EDF+,BDF module for conversion to FIF"""
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
from .edf import read_raw_edf
| 18.75
| 45
| 0.706667
|
from .edf import read_raw_edf
| true
| true
|
f712d4a70a34e71d9b6b998c3bb2d73d7e9cf24b
| 2,349
|
py
|
Python
|
minimax/algorithm.py
|
ARBII-xD/Checkers-Using-Minimax-Algorithm
|
f687757dca7689b4838a0d210abf6b255b1ab15a
|
[
"MIT"
] | null | null | null |
minimax/algorithm.py
|
ARBII-xD/Checkers-Using-Minimax-Algorithm
|
f687757dca7689b4838a0d210abf6b255b1ab15a
|
[
"MIT"
] | null | null | null |
minimax/algorithm.py
|
ARBII-xD/Checkers-Using-Minimax-Algorithm
|
f687757dca7689b4838a0d210abf6b255b1ab15a
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
import pygame
RED = (0,100,100)
WHITE = (255, 255, 255)
def minimax(position, depth, max_player, game):
if depth == 0 or position.winner() != None: #maximum depth reach or someone has won, returning position along with evaluating the position
return position.evaluate(), position #position where we move to
if max_player: #maximizing player..
maxEval = float('-inf')
best_move = None
for move in get_all_moves(position, WHITE, game):
evaluation = minimax(move, depth-1, False, game)[0] #recursive call to minimax algo
maxEval = max(maxEval, evaluation)
if maxEval == evaluation:
best_move = move
return maxEval, best_move
else:
minEval = float('inf') #minimizing the player
best_move = None
for move in get_all_moves(position, RED, game):
evaluation = minimax(move, depth-1, True, game)[0] #recursive call to minimax algo
minEval = min(minEval, evaluation)
if minEval == evaluation:
best_move = move
return minEval, best_move
def simulate_move(piece, move, board, game, skip):
board.move(piece, move[0], move[1])
if skip:
board.remove(skip)
return board
def get_all_moves(board, color, game):
moves = []
for piece in board.get_all_pieces(color):
valid_moves = board.get_valid_moves(piece)
for move, skip in valid_moves.items():
draw_moves(game, board, piece)
temp_board = deepcopy(board) #copy all of the pieces that are stored in board
temp_piece = temp_board.get_piece(piece.row, piece.col)
new_board = simulate_move(temp_piece, move, temp_board, game, skip)
moves.append(new_board)
return moves
def draw_moves(game, board, piece):
valid_moves = board.get_valid_moves(piece)
board.draw(game.win)
pygame.draw.circle(game.win, (0,255,0), (piece.x, piece.y), 50, 5)
game.draw_valid_moves(valid_moves.keys())
pygame.display.update() #to simulate all possible move of all checkers before any move
# pygame.time.delay(100) #just delay 1 second before AI checkers move
| 34.544118
| 146
| 0.616007
|
from copy import deepcopy
import pygame
RED = (0,100,100)
WHITE = (255, 255, 255)
def minimax(position, depth, max_player, game):
if depth == 0 or position.winner() != None:
return position.evaluate(), position
if max_player:
maxEval = float('-inf')
best_move = None
for move in get_all_moves(position, WHITE, game):
evaluation = minimax(move, depth-1, False, game)[0]
maxEval = max(maxEval, evaluation)
if maxEval == evaluation:
best_move = move
return maxEval, best_move
else:
minEval = float('inf')
best_move = None
for move in get_all_moves(position, RED, game):
evaluation = minimax(move, depth-1, True, game)[0]
minEval = min(minEval, evaluation)
if minEval == evaluation:
best_move = move
return minEval, best_move
def simulate_move(piece, move, board, game, skip):
board.move(piece, move[0], move[1])
if skip:
board.remove(skip)
return board
def get_all_moves(board, color, game):
moves = []
for piece in board.get_all_pieces(color):
valid_moves = board.get_valid_moves(piece)
for move, skip in valid_moves.items():
draw_moves(game, board, piece)
temp_board = deepcopy(board)
temp_piece = temp_board.get_piece(piece.row, piece.col)
new_board = simulate_move(temp_piece, move, temp_board, game, skip)
moves.append(new_board)
return moves
def draw_moves(game, board, piece):
valid_moves = board.get_valid_moves(piece)
board.draw(game.win)
pygame.draw.circle(game.win, (0,255,0), (piece.x, piece.y), 50, 5)
game.draw_valid_moves(valid_moves.keys())
pygame.display.update()
| true
| true
|
f712d5b23f3d353d51169075f17ae12ccbceea2b
| 785
|
py
|
Python
|
python/qidoc/test/test_qidoc_clean.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
python/qidoc/test/test_qidoc_clean.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
python/qidoc/test/test_qidoc_clean.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" QiBuild """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
def test_simple(qidoc_action):
""" Test Simple """
world_proj = qidoc_action.add_test_project("world")
build_dir = os.path.join(world_proj.path, "build-doc")
assert not os.path.exists(build_dir)
qidoc_action("build", "world")
assert os.path.exists(build_dir)
qidoc_action("clean", "world")
assert os.path.exists(build_dir)
qidoc_action("clean", "world", "--force")
assert not os.path.exists(build_dir)
| 32.708333
| 84
| 0.719745
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
def test_simple(qidoc_action):
world_proj = qidoc_action.add_test_project("world")
build_dir = os.path.join(world_proj.path, "build-doc")
assert not os.path.exists(build_dir)
qidoc_action("build", "world")
assert os.path.exists(build_dir)
qidoc_action("clean", "world")
assert os.path.exists(build_dir)
qidoc_action("clean", "world", "--force")
assert not os.path.exists(build_dir)
| true
| true
|
f712d63ef56485edfd05b220d59ea32a53d6543b
| 11,764
|
py
|
Python
|
src/oci/signer.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/signer.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/signer.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
import base64
import email.utils
import hashlib
import io
import functools
import os
from oci._vendor import six
from oci.util import record_body_position_for_rewind, rewind_body, back_up_body_calculate_stream_content_length, read_stream_for_signing
from ._vendor import httpsig_cffi, requests
from .exceptions import InvalidPrivateKey, MissingPrivateKeyPassphrase
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import logging
logger = logging.getLogger(__name__)
SIGNATURE_VERSION = "1"
def load_private_key_from_file(filename, pass_phrase=None):
filename = os.path.expanduser(filename)
with io.open(filename, mode="rb") as f:
private_key_data = f.read().strip()
return load_private_key(private_key_data, pass_phrase)
def load_private_key(secret, pass_phrase):
"""Loads a private key that may use a pass_phrase.
Tries to correct or diagnose common errors:
- provided pass_phrase but didn't need one
- provided a public key
"""
if isinstance(secret, six.text_type):
secret = secret.encode("ascii")
if isinstance(pass_phrase, six.text_type):
pass_phrase = pass_phrase.encode("ascii")
backend = default_backend()
try:
# 0) Try with pass_phrase
return serialization.load_pem_private_key(secret, pass_phrase, backend=backend)
except TypeError:
# 1) Either:
# - key has pass_phrase and one wasn't provided
# - key doesn't have pass_phrase and one was provided.
#
# Can't fix the first, but we *can* fix the second.
# This can happen if the DEFAULT profile has a pass_phrase but
# another profile uses a key file without a pass_phrase.
if pass_phrase is None:
# 1.1) private key needed a pass_phrase and we don't have one
raise MissingPrivateKeyPassphrase("The provided key requires a passphrase.")
else:
# 1.2) try again without pass_phrase; could be an artifact from DEFAULT
return serialization.load_pem_private_key(secret, None, backend=backend)
except ValueError:
# 2) Try to determine what kind of failure this is.
# Most likely, this is either a bad password or a public key.
# If loading it as a public key fails, it's almost certainly a bad password.
for loader in [
serialization.load_der_public_key,
serialization.load_pem_public_key,
serialization.load_ssh_public_key
]:
try:
loader(secret, backend=backend)
except (ValueError, UnsupportedAlgorithm):
# 2.1) Not a public key; try the next format
pass
else:
# 2.2) This is a public key
raise InvalidPrivateKey("Authentication requires a private key, but a public key was provided.")
# 2.3) Password is probably wrong.
raise InvalidPrivateKey("The provided key is not a private key, or the provided passphrase is incorrect.")
def inject_missing_headers(request, sign_body, enforce_content_headers):
# Inject date, host, and content-type if missing
request.headers.setdefault(
"date", email.utils.formatdate(usegmt=True))
request.headers.setdefault(
"host", six.moves.urllib.parse.urlparse(request.url).netloc)
if hasattr(request.body, "buffer") or hasattr(request.body, "read"):
request.headers.setdefault("content-type", "application/octet-stream")
request.headers.setdefault("content-type", "application/json")
if enforce_content_headers:
# Requests with a body need to send content-type,
# content-length, and x-content-sha256
if "x-content-sha256" not in request.headers and sign_body:
body = request.body or ""
m = hashlib.sha256()
# Handle String types
if isinstance(body, six.string_types):
body = body.encode("utf-8")
request.headers.setdefault("content-length", str(len(body)))
m.update(body)
# Handle bytes
elif isinstance(body, (bytes, bytearray)):
m.update(body)
# Handling signing for Files/stdin
elif hasattr(body, "buffer") or hasattr(body, "read"):
is_body_rewindable, original_position = record_body_position_for_rewind(body)
if is_body_rewindable:
content_length = read_stream_for_signing(m, body)
if content_length == -1:
raise IOError("Unable to read stream for signing! Please sign the stream yourself by using the custom header x-content-sha256")
request.headers.setdefault("content-length", str(content_length))
is_rewind_success = rewind_body(body, original_position)
if not is_rewind_success:
raise IOError("Unable to rewind request body while signing!")
else:
logger.warning("Stream cannot be rewound, trying to backup and sign the body!")
stream = back_up_body_calculate_stream_content_length(body)
# Updating request body as it cannot be rewound
request.body = stream.get("byte_content")
m.update(stream.get("byte_content"))
request.headers.setdefault("content-length", str(stream.get("content_length")))
# Update sha256 header
if m:
base64digest = base64.b64encode(m.digest())
base64string = base64digest.decode("utf-8")
request.headers["x-content-sha256"] = base64string
# HeaderSigner doesn't support private keys with passwords.
# Patched since the constructor parses the key in __init__
class _PatchedHeaderSigner(httpsig_cffi.sign.HeaderSigner):
HEADER_SIGNER_TEMPLATE = 'Signature algorithm="rsa-sha256",headers="{}",keyId="{}",signature="%s",version="{}"'
"""Internal. If you need to construct a Signer, use :class:`~.Signer` instead."""
def __init__(self, key_id, private_key, headers):
# Dropped general support for the specific signing/hash the SDK uses.
self.sign_algorithm = "rsa"
self.hash_algorithm = "sha256"
self._hash = None
self._rsahash = httpsig_cffi.utils.HASHES[self.hash_algorithm]
self._rsa_private = private_key
self._rsa_public = self._rsa_private.public_key()
self.headers = headers
self.signature_template = self.HEADER_SIGNER_TEMPLATE.format(" ".join(headers), key_id, SIGNATURE_VERSION)
def reset_signer(self, key_id, private_key):
self._hash = None
self._rsa_private = private_key
self._rsa_public = self._rsa_private.public_key()
self.signature_template = self.HEADER_SIGNER_TEMPLATE.format(" ".join(self.headers), key_id, SIGNATURE_VERSION)
# An abstract class whose subclasses can sign requests. This contains the core logic for creating a signer and signing
# requests, but does not source the required information:
#
# - api key
# - private key
# - headers
#
# As concrete implementations are expected to provide these and have their ways of sourcing/constructing them.
class AbstractBaseSigner(requests.auth.AuthBase):
def create_signers(self, api_key, private_key, generic_headers, body_headers):
self._basic_signer = _PatchedHeaderSigner(
key_id=api_key,
private_key=private_key,
headers=generic_headers)
self._body_signer = _PatchedHeaderSigner(
key_id=api_key,
private_key=private_key,
headers=generic_headers + body_headers)
def validate_request(self, request):
verb = request.method.lower()
if verb not in ["get", "head", "delete", "put", "post", "patch"]:
raise ValueError("Don't know how to sign request verb {}".format(verb))
def do_request_sign(self, request, enforce_content_headers=True):
verb = request.method.lower()
sign_body = verb in ["put", "post", "patch"]
if sign_body and enforce_content_headers:
signer = self._body_signer
else:
signer = self._basic_signer
# The requests library sets the Transfer-Encoding header to 'chunked' if the
# body is a stream with 0 length. Object storage does not currently support this option,
# and the request will fail if it is not removed. This is the only hook available where we
# can do this after the header is added and before the request is sent.
request.headers.pop('Transfer-Encoding', None)
inject_missing_headers(request, sign_body, enforce_content_headers)
signed_headers = signer.sign(
request.headers,
host=six.moves.urllib.parse.urlparse(request.url).netloc,
method=request.method,
path=request.path_url)
request.headers.update(signed_headers)
return request
def __call__(self, request, enforce_content_headers=True):
self.validate_request(request)
return self.do_request_sign(request, enforce_content_headers)
@property
def without_content_headers(self):
return functools.partial(self, enforce_content_headers=False)
class Signer(AbstractBaseSigner):
"""
A requests auth instance that can be reused across requests. This signer is intended to be used
when signing requests for a given user and it requires that user's ID, their private key
and cerificate fingerprint.
The private key can be sourced from a file (private_key_file_location) or the PEM string can be
provided directly (private_key_content).
The headers to be signed by this signer are not customizable.
You can manually sign calls by creating an instance of the signer, and
providing it as the ``auth`` argument to Requests functions:
.. code-block:: python
import requests
from oci import Signer
auth = Signer(...)
resp = requests.get("https://...", auth=auth)
"""
def __init__(self, tenancy, user, fingerprint, private_key_file_location, pass_phrase=None, private_key_content=None):
self.api_key = tenancy + "/" + user + "/" + fingerprint
if private_key_content:
self.private_key = load_private_key(private_key_content, pass_phrase)
else:
self.private_key = load_private_key_from_file(private_key_file_location, pass_phrase)
generic_headers = ["date", "(request-target)", "host"]
body_headers = ["content-length", "content-type", "x-content-sha256"]
self.create_signers(self.api_key, self.private_key, generic_headers, body_headers)
@staticmethod
def from_config(config):
from .config import validate_config
validate_config(config)
return Signer(
config['tenancy'],
config['user'],
config['fingerprint'],
private_key_file_location=config['key_file'],
pass_phrase=config.get('pass_phrase'),
private_key_content=config.get('key_content')
)
| 43.091575
| 245
| 0.670095
|
from __future__ import absolute_import
import base64
import email.utils
import hashlib
import io
import functools
import os
from oci._vendor import six
from oci.util import record_body_position_for_rewind, rewind_body, back_up_body_calculate_stream_content_length, read_stream_for_signing
from ._vendor import httpsig_cffi, requests
from .exceptions import InvalidPrivateKey, MissingPrivateKeyPassphrase
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import logging
logger = logging.getLogger(__name__)
SIGNATURE_VERSION = "1"
def load_private_key_from_file(filename, pass_phrase=None):
filename = os.path.expanduser(filename)
with io.open(filename, mode="rb") as f:
private_key_data = f.read().strip()
return load_private_key(private_key_data, pass_phrase)
def load_private_key(secret, pass_phrase):
if isinstance(secret, six.text_type):
secret = secret.encode("ascii")
if isinstance(pass_phrase, six.text_type):
pass_phrase = pass_phrase.encode("ascii")
backend = default_backend()
try:
return serialization.load_pem_private_key(secret, pass_phrase, backend=backend)
except TypeError:
# - key doesn't have pass_phrase and one was provided.
# This can happen if the DEFAULT profile has a pass_phrase but
# another profile uses a key file without a pass_phrase.
if pass_phrase is None:
# 1.1) private key needed a pass_phrase and we don't have one
raise MissingPrivateKeyPassphrase("The provided key requires a passphrase.")
else:
return serialization.load_pem_private_key(secret, None, backend=backend)
except ValueError:
for loader in [
serialization.load_der_public_key,
serialization.load_pem_public_key,
serialization.load_ssh_public_key
]:
try:
loader(secret, backend=backend)
except (ValueError, UnsupportedAlgorithm):
# 2.1) Not a public key; try the next format
pass
else:
# 2.2) This is a public key
raise InvalidPrivateKey("Authentication requires a private key, but a public key was provided.")
# 2.3) Password is probably wrong.
raise InvalidPrivateKey("The provided key is not a private key, or the provided passphrase is incorrect.")
def inject_missing_headers(request, sign_body, enforce_content_headers):
# Inject date, host, and content-type if missing
request.headers.setdefault(
"date", email.utils.formatdate(usegmt=True))
request.headers.setdefault(
"host", six.moves.urllib.parse.urlparse(request.url).netloc)
if hasattr(request.body, "buffer") or hasattr(request.body, "read"):
request.headers.setdefault("content-type", "application/octet-stream")
request.headers.setdefault("content-type", "application/json")
if enforce_content_headers:
# Requests with a body need to send content-type,
# content-length, and x-content-sha256
if "x-content-sha256" not in request.headers and sign_body:
body = request.body or ""
m = hashlib.sha256()
# Handle String types
if isinstance(body, six.string_types):
body = body.encode("utf-8")
request.headers.setdefault("content-length", str(len(body)))
m.update(body)
# Handle bytes
elif isinstance(body, (bytes, bytearray)):
m.update(body)
# Handling signing for Files/stdin
elif hasattr(body, "buffer") or hasattr(body, "read"):
is_body_rewindable, original_position = record_body_position_for_rewind(body)
if is_body_rewindable:
content_length = read_stream_for_signing(m, body)
if content_length == -1:
raise IOError("Unable to read stream for signing! Please sign the stream yourself by using the custom header x-content-sha256")
request.headers.setdefault("content-length", str(content_length))
is_rewind_success = rewind_body(body, original_position)
if not is_rewind_success:
raise IOError("Unable to rewind request body while signing!")
else:
logger.warning("Stream cannot be rewound, trying to backup and sign the body!")
stream = back_up_body_calculate_stream_content_length(body)
# Updating request body as it cannot be rewound
request.body = stream.get("byte_content")
m.update(stream.get("byte_content"))
request.headers.setdefault("content-length", str(stream.get("content_length")))
# Update sha256 header
if m:
base64digest = base64.b64encode(m.digest())
base64string = base64digest.decode("utf-8")
request.headers["x-content-sha256"] = base64string
# HeaderSigner doesn't support private keys with passwords.
class _PatchedHeaderSigner(httpsig_cffi.sign.HeaderSigner):
HEADER_SIGNER_TEMPLATE = 'Signature algorithm="rsa-sha256",headers="{}",keyId="{}",signature="%s",version="{}"'
def __init__(self, key_id, private_key, headers):
self.sign_algorithm = "rsa"
self.hash_algorithm = "sha256"
self._hash = None
self._rsahash = httpsig_cffi.utils.HASHES[self.hash_algorithm]
self._rsa_private = private_key
self._rsa_public = self._rsa_private.public_key()
self.headers = headers
self.signature_template = self.HEADER_SIGNER_TEMPLATE.format(" ".join(headers), key_id, SIGNATURE_VERSION)
def reset_signer(self, key_id, private_key):
self._hash = None
self._rsa_private = private_key
self._rsa_public = self._rsa_private.public_key()
self.signature_template = self.HEADER_SIGNER_TEMPLATE.format(" ".join(self.headers), key_id, SIGNATURE_VERSION)
class AbstractBaseSigner(requests.auth.AuthBase):
def create_signers(self, api_key, private_key, generic_headers, body_headers):
self._basic_signer = _PatchedHeaderSigner(
key_id=api_key,
private_key=private_key,
headers=generic_headers)
self._body_signer = _PatchedHeaderSigner(
key_id=api_key,
private_key=private_key,
headers=generic_headers + body_headers)
def validate_request(self, request):
verb = request.method.lower()
if verb not in ["get", "head", "delete", "put", "post", "patch"]:
raise ValueError("Don't know how to sign request verb {}".format(verb))
def do_request_sign(self, request, enforce_content_headers=True):
verb = request.method.lower()
sign_body = verb in ["put", "post", "patch"]
if sign_body and enforce_content_headers:
signer = self._body_signer
else:
signer = self._basic_signer
# The requests library sets the Transfer-Encoding header to 'chunked' if the
# body is a stream with 0 length. Object storage does not currently support this option,
# and the request will fail if it is not removed. This is the only hook available where we
# can do this after the header is added and before the request is sent.
request.headers.pop('Transfer-Encoding', None)
inject_missing_headers(request, sign_body, enforce_content_headers)
signed_headers = signer.sign(
request.headers,
host=six.moves.urllib.parse.urlparse(request.url).netloc,
method=request.method,
path=request.path_url)
request.headers.update(signed_headers)
return request
def __call__(self, request, enforce_content_headers=True):
self.validate_request(request)
return self.do_request_sign(request, enforce_content_headers)
@property
def without_content_headers(self):
return functools.partial(self, enforce_content_headers=False)
class Signer(AbstractBaseSigner):
def __init__(self, tenancy, user, fingerprint, private_key_file_location, pass_phrase=None, private_key_content=None):
self.api_key = tenancy + "/" + user + "/" + fingerprint
if private_key_content:
self.private_key = load_private_key(private_key_content, pass_phrase)
else:
self.private_key = load_private_key_from_file(private_key_file_location, pass_phrase)
generic_headers = ["date", "(request-target)", "host"]
body_headers = ["content-length", "content-type", "x-content-sha256"]
self.create_signers(self.api_key, self.private_key, generic_headers, body_headers)
@staticmethod
def from_config(config):
from .config import validate_config
validate_config(config)
return Signer(
config['tenancy'],
config['user'],
config['fingerprint'],
private_key_file_location=config['key_file'],
pass_phrase=config.get('pass_phrase'),
private_key_content=config.get('key_content')
)
| true
| true
|
f712d66ca7e4d5f30fd049146ccec02a60361ca6
| 5,779
|
py
|
Python
|
gnuradio-3.7.13.4/gr-analog/python/analog/qa_fastnoise.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | 1
|
2021-03-09T07:32:37.000Z
|
2021-03-09T07:32:37.000Z
|
gnuradio-3.7.13.4/gr-analog/python/analog/qa_fastnoise.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | null | null | null |
gnuradio-3.7.13.4/gr-analog/python/analog/qa_fastnoise.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks
import numpy
class test_fastnoise_source(gr_unittest.TestCase):
def setUp (self):
self.num = 2**22
self.num_items = 10**6
self.default_args = {"samples": self.num, "seed": 43, "ampl": 1}
def tearDown (self):
pass
def run_test_real(self, form):
""" Run test case with float input/output
"""
tb = gr.top_block()
src = analog.fastnoise_source_f(type=form, **self.default_args)
head = blocks.head(nitems=self.num_items, sizeof_stream_item=gr.sizeof_float)
sink = blocks.vector_sink_f()
tb.connect(src, head, sink)
tb.run()
return numpy.array(sink.data())
def run_test_complex(self, form):
""" Run test case with complex input/output
"""
tb = gr.top_block()
src = analog.fastnoise_source_c(type=form, **self.default_args)
head = blocks.head(nitems=self.num_items, sizeof_stream_item=gr.sizeof_gr_complex)
sink = blocks.vector_sink_c()
tb.connect(src, head, sink)
tb.run()
return numpy.array(sink.data())
def test_001_real_uniform_moments(self):
data = self.run_test_real(analog.GR_UNIFORM)
self.assertAlmostEqual(min(data), -1, places=4)
self.assertAlmostEqual(max(data), 1, places=4)
# mean, variance
self.assertAlmostEqual(data.mean(), 0, places=2)
self.assertAlmostEqual(data.var(), (1-(-1))**2./12, places=3)
def test_001_real_gaussian_moments(self):
data = self.run_test_real(analog.GR_GAUSSIAN)
# mean, variance
self.assertAlmostEqual(data.mean(), 0, places=2)
self.assertAlmostEqual(data.var(), 1, places=2)
def test_001_real_laplacian_moments(self):
data = self.run_test_real(analog.GR_LAPLACIAN)
# mean, variance
self.assertAlmostEqual(data.mean(), 0, places=2)
self.assertAlmostEqual(data.var(), 2, places=2)
def test_001_complex_uniform_moments(self):
data = self.run_test_complex(analog.GR_UNIFORM)
# mean, variance
self.assertAlmostEqual(data.real.mean(), 0, places=2)
self.assertAlmostEqual(data.real.var(), 0.5*(1-(-1))**2./12, places=3)
self.assertAlmostEqual(data.imag.mean(), 0, places=2)
self.assertAlmostEqual(data.imag.var(), 0.5*(1-(-1))**2./12, places=3)
def test_001_complex_gaussian_moments(self):
data = self.run_test_complex(analog.GR_GAUSSIAN)
# mean, variance
self.assertAlmostEqual(data.real.mean(), 0, places=2)
self.assertAlmostEqual(data.real.var(), 0.5, places=2)
self.assertAlmostEqual(data.imag.mean(), 0, places=2)
self.assertAlmostEqual(data.imag.var(), 0.5, places=2)
def test_002_real_uniform_reproducibility(self):
data1 = self.run_test_real(analog.GR_UNIFORM)
data2 = self.run_test_real(analog.GR_UNIFORM)
# It's pseudoramdo thus must be equal
self.assertTrue(numpy.array_equal(data1, data2))
def test_002_real_gaussian_reproducibility(self):
data1 = self.run_test_real(analog.GR_GAUSSIAN)
data2 = self.run_test_real(analog.GR_GAUSSIAN)
self.assertTrue(numpy.array_equal(data1, data2))
def test_003_real_uniform_pool(self):
src = analog.fastnoise_source_f(type=analog.GR_UNIFORM, **self.default_args)
src2 = analog.fastnoise_source_f(type=analog.GR_UNIFORM, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_real_gaussian_pool(self):
src = analog.fastnoise_source_f(type=analog.GR_GAUSSIAN, **self.default_args)
src2 = analog.fastnoise_source_f(type=analog.GR_GAUSSIAN, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_cmplx_gaussian_pool(self):
src = analog.fastnoise_source_c(type=analog.GR_GAUSSIAN, **self.default_args)
src2 = analog.fastnoise_source_c(type=analog.GR_GAUSSIAN, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_cmplx_uniform_pool(self):
src = analog.fastnoise_source_c(type=analog.GR_UNIFORM, **self.default_args)
src2 = analog.fastnoise_source_c(type=analog.GR_UNIFORM, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_real_laplacian_pool(self):
src = analog.fastnoise_source_f(type=analog.GR_LAPLACIAN, **self.default_args)
src2 = analog.fastnoise_source_f(type=analog.GR_LAPLACIAN, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
if __name__ == '__main__':
gr_unittest.run(test_fastnoise_source, "test_fastnoise_source.xml")
| 41.278571
| 99
| 0.693719
|
from gnuradio import gr, gr_unittest, analog, blocks
import numpy
class test_fastnoise_source(gr_unittest.TestCase):
def setUp (self):
self.num = 2**22
self.num_items = 10**6
self.default_args = {"samples": self.num, "seed": 43, "ampl": 1}
def tearDown (self):
pass
def run_test_real(self, form):
tb = gr.top_block()
src = analog.fastnoise_source_f(type=form, **self.default_args)
head = blocks.head(nitems=self.num_items, sizeof_stream_item=gr.sizeof_float)
sink = blocks.vector_sink_f()
tb.connect(src, head, sink)
tb.run()
return numpy.array(sink.data())
def run_test_complex(self, form):
tb = gr.top_block()
src = analog.fastnoise_source_c(type=form, **self.default_args)
head = blocks.head(nitems=self.num_items, sizeof_stream_item=gr.sizeof_gr_complex)
sink = blocks.vector_sink_c()
tb.connect(src, head, sink)
tb.run()
return numpy.array(sink.data())
def test_001_real_uniform_moments(self):
data = self.run_test_real(analog.GR_UNIFORM)
self.assertAlmostEqual(min(data), -1, places=4)
self.assertAlmostEqual(max(data), 1, places=4)
self.assertAlmostEqual(data.mean(), 0, places=2)
self.assertAlmostEqual(data.var(), (1-(-1))**2./12, places=3)
def test_001_real_gaussian_moments(self):
data = self.run_test_real(analog.GR_GAUSSIAN)
self.assertAlmostEqual(data.mean(), 0, places=2)
self.assertAlmostEqual(data.var(), 1, places=2)
def test_001_real_laplacian_moments(self):
data = self.run_test_real(analog.GR_LAPLACIAN)
self.assertAlmostEqual(data.mean(), 0, places=2)
self.assertAlmostEqual(data.var(), 2, places=2)
def test_001_complex_uniform_moments(self):
data = self.run_test_complex(analog.GR_UNIFORM)
self.assertAlmostEqual(data.real.mean(), 0, places=2)
self.assertAlmostEqual(data.real.var(), 0.5*(1-(-1))**2./12, places=3)
self.assertAlmostEqual(data.imag.mean(), 0, places=2)
self.assertAlmostEqual(data.imag.var(), 0.5*(1-(-1))**2./12, places=3)
def test_001_complex_gaussian_moments(self):
data = self.run_test_complex(analog.GR_GAUSSIAN)
self.assertAlmostEqual(data.real.mean(), 0, places=2)
self.assertAlmostEqual(data.real.var(), 0.5, places=2)
self.assertAlmostEqual(data.imag.mean(), 0, places=2)
self.assertAlmostEqual(data.imag.var(), 0.5, places=2)
def test_002_real_uniform_reproducibility(self):
data1 = self.run_test_real(analog.GR_UNIFORM)
data2 = self.run_test_real(analog.GR_UNIFORM)
self.assertTrue(numpy.array_equal(data1, data2))
def test_002_real_gaussian_reproducibility(self):
data1 = self.run_test_real(analog.GR_GAUSSIAN)
data2 = self.run_test_real(analog.GR_GAUSSIAN)
self.assertTrue(numpy.array_equal(data1, data2))
def test_003_real_uniform_pool(self):
src = analog.fastnoise_source_f(type=analog.GR_UNIFORM, **self.default_args)
src2 = analog.fastnoise_source_f(type=analog.GR_UNIFORM, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_real_gaussian_pool(self):
src = analog.fastnoise_source_f(type=analog.GR_GAUSSIAN, **self.default_args)
src2 = analog.fastnoise_source_f(type=analog.GR_GAUSSIAN, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_cmplx_gaussian_pool(self):
src = analog.fastnoise_source_c(type=analog.GR_GAUSSIAN, **self.default_args)
src2 = analog.fastnoise_source_c(type=analog.GR_GAUSSIAN, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_cmplx_uniform_pool(self):
src = analog.fastnoise_source_c(type=analog.GR_UNIFORM, **self.default_args)
src2 = analog.fastnoise_source_c(type=analog.GR_UNIFORM, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
def test_003_real_laplacian_pool(self):
src = analog.fastnoise_source_f(type=analog.GR_LAPLACIAN, **self.default_args)
src2 = analog.fastnoise_source_f(type=analog.GR_LAPLACIAN, **self.default_args)
self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))
if __name__ == '__main__':
gr_unittest.run(test_fastnoise_source, "test_fastnoise_source.xml")
| true
| true
|
f712d6e22f52ae42d8161b676b62d682117d0c50
| 4,316
|
py
|
Python
|
openfoodfact/utils.py
|
pythonmentor/teiva-p11
|
4be26edbad6e490c3991ca1ce5680e889b2ab75a
|
[
"MIT",
"Unlicense"
] | null | null | null |
openfoodfact/utils.py
|
pythonmentor/teiva-p11
|
4be26edbad6e490c3991ca1ce5680e889b2ab75a
|
[
"MIT",
"Unlicense"
] | 4
|
2021-03-30T14:25:16.000Z
|
2021-06-17T06:53:27.000Z
|
openfoodfact/utils.py
|
pythonmentor/teiva-p11
|
4be26edbad6e490c3991ca1ce5680e889b2ab75a
|
[
"MIT",
"Unlicense"
] | 2
|
2020-08-15T17:23:42.000Z
|
2020-08-15T17:23:55.000Z
|
"""Useful command to download and clean data from OpenFoodfact."""
import requests
keys = [
"id",
"product_name_fr",
"nutrition_grade_fr",
"url",
"image_front_url",
"image_ingredients_url",
]
class RequestData:
"""The class fetch the data and save it in to a json file."""
def __init__(self):
self.cat_url = "https://fr.openfoodfacts.org/categories.json"
self.search_url = "https://fr.openfoodfacts.org/cgi/search.pl"
self.list_cat = []
self.list_prod = []
self.data = {}
def exec(self, page_size):
"""Main public function executing all necessary privates functions."""
self.list_cat = self._fetch_category()
data = self._fetch_products(page_size)
return data
def _fetch_category(self):
"""Request the list of category from the API."""
print("Getting Categories from API")
try:
response = self._req(self.cat_url)
data = response.json()
list_cat = [i["name"] for i in data["tags"]][:17]
self.data = {}
return list_cat
except requests.exceptions.Timeout as t:
print("Request Timeout, please retry : ", t)
except requests.exceptions.RequestException as err:
print("Something went bad, please retry : :", err)
def _fetch_products(self, page_size):
"""Request the products in respect for the categories loaded."""
print(
"Getting Products from API in respect to the"
" Categories previously got"
)
fields = ",".join(keys)
all_products = {}
for category in self.list_cat:
config = {
"action": "process",
# Get the result by category
"tagtype_0": "categories",
# the tag represents the article search
"tag_0": category,
"fields": fields,
"tag_contains_0": "contains",
# Number of articles per page
# Min content 20, Max content 1000
"page_size": page_size,
# The API response in JSON
"json": 1,
}
response = self._req(self.search_url, param=config)
all_products[category] = response.json()
return all_products
def _req(self, url, param=None):
"""Small request function used multiple times."""
response = requests.get(url, param)
return response
class Cleaner:
"""This class will handle the data formatting before db use."""
def __init__(self, data):
"""Initialize variables and launch filter_products."""
self.data = data
self.keys = keys
self.list_cat = [categories for categories in self.data]
self._dict_data = {}
self.list_of_dictio = []
self.barcode_list = []
self.name_list = []
def filter_product(self):
"""Get the data from json files and run checks."""
for category in self.list_cat:
for element in self.data[category]["products"]:
if self._data_exist(element):
self.list_of_dictio.append(element)
self._dict_data[category] = self.list_of_dictio
self.list_of_dictio = []
return self._dict_data
def _data_exist(self, element):
"""Run trough the data, if something's missing it's discarded."""
for x in self.keys:
if (
x not in element
or element[x] == ""
or len(element["id"]) != 13
):
return False
barcode = int(element["id"])
if barcode in self.barcode_list:
return False
else:
self.barcode_list.append(barcode)
name = element["product_name_fr"].lower()
if name in self.name_list:
return False
else:
self.name_list.append(name)
return True
def req_and_clean(page_size):
"""Main function to instantiate and launch operations."""
r = RequestData()
data = r.exec(page_size)
c = Cleaner(data)
data = c.filter_product()
return data
if __name__ == "__main__":
data = req_and_clean()
print(data)
| 31.50365
| 78
| 0.568119
|
import requests
keys = [
"id",
"product_name_fr",
"nutrition_grade_fr",
"url",
"image_front_url",
"image_ingredients_url",
]
class RequestData:
def __init__(self):
self.cat_url = "https://fr.openfoodfacts.org/categories.json"
self.search_url = "https://fr.openfoodfacts.org/cgi/search.pl"
self.list_cat = []
self.list_prod = []
self.data = {}
def exec(self, page_size):
self.list_cat = self._fetch_category()
data = self._fetch_products(page_size)
return data
def _fetch_category(self):
print("Getting Categories from API")
try:
response = self._req(self.cat_url)
data = response.json()
list_cat = [i["name"] for i in data["tags"]][:17]
self.data = {}
return list_cat
except requests.exceptions.Timeout as t:
print("Request Timeout, please retry : ", t)
except requests.exceptions.RequestException as err:
print("Something went bad, please retry : :", err)
def _fetch_products(self, page_size):
print(
"Getting Products from API in respect to the"
" Categories previously got"
)
fields = ",".join(keys)
all_products = {}
for category in self.list_cat:
config = {
"action": "process",
"tagtype_0": "categories",
"tag_0": category,
"fields": fields,
"tag_contains_0": "contains",
"page_size": page_size,
"json": 1,
}
response = self._req(self.search_url, param=config)
all_products[category] = response.json()
return all_products
def _req(self, url, param=None):
response = requests.get(url, param)
return response
class Cleaner:
def __init__(self, data):
self.data = data
self.keys = keys
self.list_cat = [categories for categories in self.data]
self._dict_data = {}
self.list_of_dictio = []
self.barcode_list = []
self.name_list = []
def filter_product(self):
for category in self.list_cat:
for element in self.data[category]["products"]:
if self._data_exist(element):
self.list_of_dictio.append(element)
self._dict_data[category] = self.list_of_dictio
self.list_of_dictio = []
return self._dict_data
def _data_exist(self, element):
for x in self.keys:
if (
x not in element
or element[x] == ""
or len(element["id"]) != 13
):
return False
barcode = int(element["id"])
if barcode in self.barcode_list:
return False
else:
self.barcode_list.append(barcode)
name = element["product_name_fr"].lower()
if name in self.name_list:
return False
else:
self.name_list.append(name)
return True
def req_and_clean(page_size):
r = RequestData()
data = r.exec(page_size)
c = Cleaner(data)
data = c.filter_product()
return data
if __name__ == "__main__":
data = req_and_clean()
print(data)
| true
| true
|
f712d787e5627871c91f146aa80f27825ee8dab9
| 1,046
|
py
|
Python
|
alipay/aop/api/response/AlipayDataDataexchangeDtmorseSyncResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayDataDataexchangeDtmorseSyncResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayDataDataexchangeDtmorseSyncResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayDataDataexchangeDtmorseSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayDataDataexchangeDtmorseSyncResponse, self).__init__()
self._result_extent = None
self._success = None
@property
def result_extent(self):
return self._result_extent
@result_extent.setter
def result_extent(self, value):
self._result_extent = value
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def parse_response_content(self, response_content):
response = super(AlipayDataDataexchangeDtmorseSyncResponse, self).parse_response_content(response_content)
if 'result_extent' in response:
self.result_extent = response['result_extent']
if 'success' in response:
self.success = response['success']
| 29.055556
| 114
| 0.700765
|
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayDataDataexchangeDtmorseSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayDataDataexchangeDtmorseSyncResponse, self).__init__()
self._result_extent = None
self._success = None
@property
def result_extent(self):
return self._result_extent
@result_extent.setter
def result_extent(self, value):
self._result_extent = value
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def parse_response_content(self, response_content):
response = super(AlipayDataDataexchangeDtmorseSyncResponse, self).parse_response_content(response_content)
if 'result_extent' in response:
self.result_extent = response['result_extent']
if 'success' in response:
self.success = response['success']
| true
| true
|
f712d827a636f2d74376374332f5d9a6bde70b89
| 530
|
py
|
Python
|
CarHome/CarHome/items.py
|
monkey-hjy/SpiderCarHome
|
302e470a653f680c2b1aa72649848d61c9626ddd
|
[
"MIT"
] | 3
|
2020-03-23T09:57:10.000Z
|
2022-02-23T08:20:23.000Z
|
CarHome/CarHome/items.py
|
monkey-hjy/SpiderCarHome
|
302e470a653f680c2b1aa72649848d61c9626ddd
|
[
"MIT"
] | null | null | null |
CarHome/CarHome/items.py
|
monkey-hjy/SpiderCarHome
|
302e470a653f680c2b1aa72649848d61c9626ddd
|
[
"MIT"
] | 1
|
2021-03-29T02:41:37.000Z
|
2021-03-29T02:41:37.000Z
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class CarhomeItem(scrapy.Item):
# define the fields for your item here like:
car_name = scrapy.Field()
car_url = scrapy.Field()
# 车辆评分
car_score = scrapy.Field()
# 车辆级别
car_gray = scrapy.Field()
# 车身结构和续航里程
car_structure = scrapy.Field()
# 发动机和电动机
car_engine = scrapy.Field()
car_price = scrapy.Field()
pass
| 21.2
| 53
| 0.656604
|
import scrapy
class CarhomeItem(scrapy.Item):
car_name = scrapy.Field()
car_url = scrapy.Field()
car_score = scrapy.Field()
car_gray = scrapy.Field()
car_structure = scrapy.Field()
car_engine = scrapy.Field()
car_price = scrapy.Field()
pass
| true
| true
|
f712d8a32d790551e15d9bc33d66e68b3e000da2
| 125
|
py
|
Python
|
test/run/t315.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
test/run/t315.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
test/run/t315.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
def default_outside(x=[]):
return x
a = default_outside()
a.append(1)
print a
b = default_outside()
b.append(2)
print b
| 12.5
| 26
| 0.688
|
def default_outside(x=[]):
return x
a = default_outside()
a.append(1)
print a
b = default_outside()
b.append(2)
print b
| false
| true
|
f712d8aae613059300d69cf1c8864f9738f9af0e
| 32,052
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_06_01/operations/connection_monitors_operations.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | null | null | null |
azure-mgmt-network/azure/mgmt/network/v2018_06_01/operations/connection_monitors_operations.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | null | null | null |
azure-mgmt-network/azure/mgmt/network/v2018_06_01/operations/connection_monitors_operations.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-06-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_watcher_name, connection_monitor_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing
Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a
connection monitor.
:type parameters:
~azure.mgmt.network.v2018_06_01.models.ConnectionMonitor
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ConnectionMonitorResult
or ClientRawResponse<ConnectionMonitorResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionMonitorResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'}
def get(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing
Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConnectionMonitorResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'}
def _delete_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing
Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'}
def _stop_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.stop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def stop(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing
Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'}
def _start_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.start.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing
Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'}
def _query_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.query.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def query(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing
Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection
monitor.
:type connection_monitor_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ConnectionMonitorQueryResult or
ClientRawResponse<ConnectionMonitorQueryResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorQueryResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorQueryResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'}
def list(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing
Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ConnectionMonitorResult
:rtype:
~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResultPaged[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResult]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ConnectionMonitorResultPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ConnectionMonitorResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'}
| 50.00312
| 217
| 0.689442
|
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ConnectionMonitorsOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-06-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_watcher_name, connection_monitor_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionMonitorResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'}
def get(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'}
def _delete_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'}
def _stop_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
url = self.stop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def stop(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'}
def _start_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
url = self.start.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'}
def _query_initial(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, **operation_config):
url = self.query.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def query(
self, resource_group_name, network_watcher_name, connection_monitor_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'}
def list(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.ConnectionMonitorResultPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ConnectionMonitorResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'}
| true
| true
|
f712d92458ee75bbf3600c3bc00250da44027dad
| 5,258
|
py
|
Python
|
languages/python/docs/conf.py
|
hobofan/oso
|
2656313dc46581a588fcf0c57dee7bf91afbb3d0
|
[
"Apache-2.0"
] | null | null | null |
languages/python/docs/conf.py
|
hobofan/oso
|
2656313dc46581a588fcf0c57dee7bf91afbb3d0
|
[
"Apache-2.0"
] | null | null | null |
languages/python/docs/conf.py
|
hobofan/oso
|
2656313dc46581a588fcf0c57dee7bf91afbb3d0
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from sphinx.highlighting import lexers
from sphinxcontrib.spelling.filters import ContractionFilter
from enchant.tokenize import Filter
sys.path.insert(0, os.path.abspath("../django-oso"))
sys.path.insert(0, os.path.abspath("../flask-oso"))
sys.path.insert(0, os.path.abspath("../sqlalchemy-oso"))
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("."))
sys.path.append(os.path.abspath("_ext"))
# DJANGO SETUP FOR DJANGO-OSO #
import django
from django.conf import settings
settings.configure()
django.setup()
##
import lexer
# -- Project information -----------------------------------------------------
project = "oso"
copyright = "2020-2021 Oso Security, Inc"
author = "oso"
version = "0.26.1"
release = "0.26.1"
# -- General configuration ---------------------------------------------------
html_title = "oso Documentation"
release_mode = os.environ.get("DOCS_RELEASE", "") == "1"
master_doc = "index"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_copybutton",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.githubpages",
"sphinx.ext.ifconfig",
"sphinx.ext.todo",
"sphinxcontrib.contentui",
"sphinxcontrib.spelling",
]
lexers["node"] = lexer.NodeShellLexer()
lexers["polar"] = lexer.PolarLexer()
lexers["jshell"] = lexer.JShellLexer()
lexers["oso"] = lexer.OsoLexer()
class HyphenatedWordFilter(Filter):
"""Treat some hyphenated words as allowed due to made up words in our docs."""
# This cannot just be allowed words because hyphenated words are split.
words = {"un-taken", "un-run"}
def _skip(self, word):
return word in self.words
spelling_word_list_filename = "spelling_allowed_words.txt"
spelling_filters = [
# Fix spell check of contractions
ContractionFilter,
HyphenatedWordFilter,
]
# html_static_path = ["_static"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
# Don't copy the source or show a link
html_copy_source = False
html_show_sourcelink = False
# add copy button to <pre> elements inside a div with class="copyable"
copybutton_selector = "div.copybutton pre"
copybutton_prompt_text = "\\[\\d*\\]: |\\.\\.\\.: |\\$ "
copybutton_prompt_is_regexp = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "borland"
### Show/hide TODOs
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# html_extra_path = ["_api_docs"]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "sphinx_rtd_theme"
html_theme = "sphinx_material"
# html_theme_options = {"logo_only": True}
html_theme_options = {
# Include the master document at the top of the page in the breadcrumb bar.
"master_doc": False,
# Set the name of the project to appear in the navigation.
"nav_title": "oso Documentation",
# Specify a base_url used to generate sitemap.xml. If not
# specified, then no sitemap will be built.
"base_url": "https://docs.osohq.com/",
# Set the color and the accent color
"color_primary": "#0E024E",
"color_accent": "#FFFFFF",
# Set the repo location to get a badge with stats
"repo_url": "https://github.com/osohq/oso/",
"repo_name": "osohq/oso",
# Visible levels of the global TOC; -1 means unlimited
"globaltoc_depth": 3,
# If False, expand all TOC entries
"globaltoc_collapse": True,
# If True, show hidden TOC entries
"globaltoc_includehidden": True,
# "heroes": {"index": "Welcome to the home of the oso documentation!",},
"html_minify": release_mode,
"css_minify": release_mode,
"nav_links": False,
}
html_show_sphinx = False
version_dropdown = False
html_sidebars = {"**": ["globaltoc.html", "localtoc.html"]}
# html_logo = "oso_logo_trimmed.png"
html_js_files = [
# "js/custom.js",
]
html_css_files = [
# "css/custom.css",
# "css/matter.css",
]
# html_favicon = "favicon.ico"
| 29.875
| 82
| 0.68391
|
import os
import sys
from sphinx.highlighting import lexers
from sphinxcontrib.spelling.filters import ContractionFilter
from enchant.tokenize import Filter
sys.path.insert(0, os.path.abspath("../django-oso"))
sys.path.insert(0, os.path.abspath("../flask-oso"))
sys.path.insert(0, os.path.abspath("../sqlalchemy-oso"))
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("."))
sys.path.append(os.path.abspath("_ext"))
import django
from django.conf import settings
settings.configure()
django.setup()
import lexer
project = "oso"
copyright = "2020-2021 Oso Security, Inc"
author = "oso"
version = "0.26.1"
release = "0.26.1"
html_title = "oso Documentation"
release_mode = os.environ.get("DOCS_RELEASE", "") == "1"
master_doc = "index"
extensions = [
"sphinx_copybutton",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.githubpages",
"sphinx.ext.ifconfig",
"sphinx.ext.todo",
"sphinxcontrib.contentui",
"sphinxcontrib.spelling",
]
lexers["node"] = lexer.NodeShellLexer()
lexers["polar"] = lexer.PolarLexer()
lexers["jshell"] = lexer.JShellLexer()
lexers["oso"] = lexer.OsoLexer()
class HyphenatedWordFilter(Filter):
words = {"un-taken", "un-run"}
def _skip(self, word):
return word in self.words
spelling_word_list_filename = "spelling_allowed_words.txt"
spelling_filters = [
ContractionFilter,
HyphenatedWordFilter,
]
templates_path = ["_templates"]
exclude_patterns = ["_build"]
html_copy_source = False
html_show_sourcelink = False
# add copy button to <pre> elements inside a div with class="copyable"
copybutton_selector = "div.copybutton pre"
copybutton_prompt_text = "\\[\\d*\\]: |\\.\\.\\.: |\\$ "
copybutton_prompt_is_regexp = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "borland"
### Show/hide TODOs
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# html_extra_path = ["_api_docs"]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "sphinx_rtd_theme"
html_theme = "sphinx_material"
# html_theme_options = {"logo_only": True}
html_theme_options = {
# Include the master document at the top of the page in the breadcrumb bar.
"master_doc": False,
# Set the name of the project to appear in the navigation.
"nav_title": "oso Documentation",
# Specify a base_url used to generate sitemap.xml. If not
# specified, then no sitemap will be built.
"base_url": "https://docs.osohq.com/",
# Set the color and the accent color
"color_primary": "#0E024E",
"color_accent": "#FFFFFF",
# Set the repo location to get a badge with stats
"repo_url": "https://github.com/osohq/oso/",
"repo_name": "osohq/oso",
# Visible levels of the global TOC; -1 means unlimited
"globaltoc_depth": 3,
# If False, expand all TOC entries
"globaltoc_collapse": True,
# If True, show hidden TOC entries
"globaltoc_includehidden": True,
# "heroes": {"index": "Welcome to the home of the oso documentation!",},
"html_minify": release_mode,
"css_minify": release_mode,
"nav_links": False,
}
html_show_sphinx = False
version_dropdown = False
html_sidebars = {"**": ["globaltoc.html", "localtoc.html"]}
# html_logo = "oso_logo_trimmed.png"
html_js_files = [
# "js/custom.js",
]
html_css_files = [
# "css/custom.css",
# "css/matter.css",
]
# html_favicon = "favicon.ico"
| true
| true
|
f712d9f94983a2b03e5360c5cb331880264eac96
| 2,183
|
py
|
Python
|
profil3r/app/_menu.py
|
fxcebx/Profil3r
|
9b51f17f44e7b5cb9247bb5ed86aaf27685cb83e
|
[
"MIT"
] | 1
|
2021-11-17T09:47:18.000Z
|
2021-11-17T09:47:18.000Z
|
profil3r/app/_menu.py
|
fxcebx/Profil3r
|
9b51f17f44e7b5cb9247bb5ed86aaf27685cb83e
|
[
"MIT"
] | null | null | null |
profil3r/app/_menu.py
|
fxcebx/Profil3r
|
9b51f17f44e7b5cb9247bb5ed86aaf27685cb83e
|
[
"MIT"
] | null | null | null |
from PyInquirer import prompt, Separator
# The menus displays a list of checkboxes, which allows the user to select the separators and modules he wants to use
def separators_menu(self):
# Get a list of all existing separators
separators = self.config["separators"]
separators_menu = [
{
'type': 'checkbox',
'qmark': '⚙️ ',
'message': 'Select separators',
'name': 'separators',
'choices': []
}
]
for separator, value in separators.items():
# Separator title
separators_menu[0]["choices"].append(Separator("{} - Exemple : john{}doe".format(separator, value)))
# Separator
separators_menu[0]["choices"].append({"name": value})
self.separators = prompt(separators_menu)["separators"]
def services_menu(self):
# Get a list of all enabled modules
modules_list = sorted([module for module in self.config["plateform"] if self.config["plateform"][module]["enabled"] == "yes"])
# Create a list of all existing categories
categories = sorted(list(set([content["type"] for module, content in self.config["plateform"].items() if module in modules_list])))
services_menu = [
{
'type': 'checkbox',
'qmark': '⚙️ ',
'message': 'Select services',
'name': 'modules',
'choices': [
],
'validate': lambda answer: 'You must choose at least one service !' \
if len(answer) == 0 else True
}
]
for category in categories:
# Category title
services_menu[0]["choices"].append(Separator(category.upper()))
# Append category items
for module in modules_list:
if self.config["plateform"][module]["type"] == category:
services_menu[0]["choices"].append(
{
'name': module,
# Checked by default
'checked': module in self.config["report_elements"]
})
modules = prompt(services_menu)["modules"]
self.modules_update(modules)
| 35.786885
| 139
| 0.561612
|
from PyInquirer import prompt, Separator
def separators_menu(self):
separators = self.config["separators"]
separators_menu = [
{
'type': 'checkbox',
'qmark': '⚙️ ',
'message': 'Select separators',
'name': 'separators',
'choices': []
}
]
for separator, value in separators.items():
separators_menu[0]["choices"].append(Separator("{} - Exemple : john{}doe".format(separator, value)))
separators_menu[0]["choices"].append({"name": value})
self.separators = prompt(separators_menu)["separators"]
def services_menu(self):
modules_list = sorted([module for module in self.config["plateform"] if self.config["plateform"][module]["enabled"] == "yes"])
categories = sorted(list(set([content["type"] for module, content in self.config["plateform"].items() if module in modules_list])))
services_menu = [
{
'type': 'checkbox',
'qmark': '⚙️ ',
'message': 'Select services',
'name': 'modules',
'choices': [
],
'validate': lambda answer: 'You must choose at least one service !' \
if len(answer) == 0 else True
}
]
for category in categories:
services_menu[0]["choices"].append(Separator(category.upper()))
for module in modules_list:
if self.config["plateform"][module]["type"] == category:
services_menu[0]["choices"].append(
{
'name': module,
'checked': module in self.config["report_elements"]
})
modules = prompt(services_menu)["modules"]
self.modules_update(modules)
| true
| true
|
f712da4b952c50969a2013d5aeb86c1c59be1dc0
| 3,466
|
py
|
Python
|
watero_go/services/base_service.py
|
Qinnnnnn/Watero_Go
|
4e277f197971ba190591a0e83b00424b075dce3d
|
[
"MIT"
] | null | null | null |
watero_go/services/base_service.py
|
Qinnnnnn/Watero_Go
|
4e277f197971ba190591a0e83b00424b075dce3d
|
[
"MIT"
] | null | null | null |
watero_go/services/base_service.py
|
Qinnnnnn/Watero_Go
|
4e277f197971ba190591a0e83b00424b075dce3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/pythonr
# -*- coding: utf-8 -*-
"""
File : base_service.py
Author : Zerui Qin
CreateDate : 2018-12-20 10:00:00
LastModifiedDate : 2018-12-20 10:00:00
Note : Agent基础服务类, 获取Agent数据服务相关方法
"""
import datetime
import psutil
import pytz
import requests
from watero_go.utils import hardware
from watero_go.utils.log import log_debug
from watero_go.utils.mapping import Route
class BaseService:
"""
Agent基础服务
"""
def __init__(self, p_url_prefix):
"""
初始化
:param p_url_prefix: 中心服务地址前缀
"""
self.url_prefix = p_url_prefix
self.mac_addr = hardware.get_mac_address()
self.tz = pytz.timezone('Asia/Shanghai') # 设置获取的时区
self.access_token = ''
def auth(self):
"""
Agent认证
:return: int - 响应状态码
"""
payload = dict()
payload['mac_addr'] = self.mac_addr
response = requests.get(url=self.url_prefix + Route.AUTH.value, data=payload)
res_json = response.json()
status = res_json['status']
if response.status_code: # 服务器状态码为200
self.access_token = res_json['message']['access_token']
log_debug.logger.info(res_json['message']['access_token'])
elif response.status_code == 403:
log_debug.logger.info(res_json['message'])
return status
def heartbeat(self):
"""
发送Agent心跳信息
:return: int - 响应状态码
"""
payload = dict()
payload['mac_addr'] = self.mac_addr
payload['access_token'] = self.access_token
payload['create_time'] = datetime.datetime.now(self.tz).strftime("%Y-%m-%d %H:%M:%S")
response = requests.post(url=self.url_prefix + Route.HEARTBEAT.value, data=payload)
res_json = response.json()
status = res_json['status']
if response.status_code == 200: # 服务器状态码为200
log_debug.logger.info(res_json['message'])
elif response.status_code == 403: # 服务器状态码为403
log_debug.logger.error(res_json['message'])
return status
def resource(self):
"""
发送Agent设备资源信息
:return: int - 响应状态码
"""
payload = dict()
payload['mac_addr'] = self.mac_addr
payload['access_token'] = self.access_token
payload['cpu_percent'] = psutil.cpu_percent() # CPU占用率
payload['cpu_count'] = psutil.cpu_count(logical=False) # CPU非逻辑核心数
payload['cpu_freq_current'] = psutil.cpu_freq()[0] # CPU当前频率
payload['total_memory'] = int(psutil.virtual_memory()[0] / 1024 / 1024) # 总内存
payload['available_memory'] = int(psutil.virtual_memory()[1] / 1024 / 1024) # 可用内存
payload['sensors_battery_percent'] = psutil.sensors_battery() # 电量百分比
payload['boot_time'] = psutil.datetime.datetime.fromtimestamp(psutil.boot_time()).strftime(
"%Y-%m-%d %H:%M:%S") # 启动时间
payload['create_time'] = datetime.datetime.now(self.tz).strftime("%Y-%m-%d %H:%M:%S")
response = requests.post(url=self.url_prefix + Route.RESOURCE.value, data=payload)
res_json = response.json()
status = res_json['status']
if response.status_code == 200: # 服务器状态码为200
log_debug.logger.info(res_json['message'])
elif response.status_code == 403: # 服务器状态码为403
log_debug.logger.error(res_json['message'])
else:
log_debug.logger.error('Unknown error')
return status
| 33.980392
| 99
| 0.615695
|
import datetime
import psutil
import pytz
import requests
from watero_go.utils import hardware
from watero_go.utils.log import log_debug
from watero_go.utils.mapping import Route
class BaseService:
def __init__(self, p_url_prefix):
self.url_prefix = p_url_prefix
self.mac_addr = hardware.get_mac_address()
self.tz = pytz.timezone('Asia/Shanghai')
self.access_token = ''
def auth(self):
payload = dict()
payload['mac_addr'] = self.mac_addr
response = requests.get(url=self.url_prefix + Route.AUTH.value, data=payload)
res_json = response.json()
status = res_json['status']
if response.status_code:
self.access_token = res_json['message']['access_token']
log_debug.logger.info(res_json['message']['access_token'])
elif response.status_code == 403:
log_debug.logger.info(res_json['message'])
return status
def heartbeat(self):
payload = dict()
payload['mac_addr'] = self.mac_addr
payload['access_token'] = self.access_token
payload['create_time'] = datetime.datetime.now(self.tz).strftime("%Y-%m-%d %H:%M:%S")
response = requests.post(url=self.url_prefix + Route.HEARTBEAT.value, data=payload)
res_json = response.json()
status = res_json['status']
if response.status_code == 200:
log_debug.logger.info(res_json['message'])
elif response.status_code == 403:
log_debug.logger.error(res_json['message'])
return status
def resource(self):
payload = dict()
payload['mac_addr'] = self.mac_addr
payload['access_token'] = self.access_token
payload['cpu_percent'] = psutil.cpu_percent()
payload['cpu_count'] = psutil.cpu_count(logical=False)
payload['cpu_freq_current'] = psutil.cpu_freq()[0]
payload['total_memory'] = int(psutil.virtual_memory()[0] / 1024 / 1024)
payload['available_memory'] = int(psutil.virtual_memory()[1] / 1024 / 1024)
payload['sensors_battery_percent'] = psutil.sensors_battery()
payload['boot_time'] = psutil.datetime.datetime.fromtimestamp(psutil.boot_time()).strftime(
"%Y-%m-%d %H:%M:%S")
payload['create_time'] = datetime.datetime.now(self.tz).strftime("%Y-%m-%d %H:%M:%S")
response = requests.post(url=self.url_prefix + Route.RESOURCE.value, data=payload)
res_json = response.json()
status = res_json['status']
if response.status_code == 200:
log_debug.logger.info(res_json['message'])
elif response.status_code == 403:
log_debug.logger.error(res_json['message'])
else:
log_debug.logger.error('Unknown error')
return status
| true
| true
|
f712dc5199108dec890f7de5823fae6ab14f629e
| 28,210
|
py
|
Python
|
tensorflow_transform/schema_inference.py
|
Saiprasad16/transform
|
774458bf0c296f8275fedf3ace303427654dace7
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:52:06.000Z
|
2021-05-10T10:52:06.000Z
|
tensorflow_transform/schema_inference.py
|
Saiprasad16/transform
|
774458bf0c296f8275fedf3ace303427654dace7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_transform/schema_inference.py
|
Saiprasad16/transform
|
774458bf0c296f8275fedf3ace303427654dace7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic associated with schema inference and propagation.
This module contains functionality to set the schema assciated with a Tensor,
and to infer the schema for a tensor, including any information that has been
set. This module will also contain any schema propagation logic, i.e. deducing
the schema of a tensor from its parents in the graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Callable, Dict, Mapping, Optional, Tuple
# GOOGLE-INITIALIZATION
import tensorflow as tf
from tensorflow_transform import common
from tensorflow_transform import common_types
from tensorflow_transform import graph_context
from tensorflow_transform import tf2_utils
from tensorflow_transform import tf_utils
from tensorflow_transform.saved import saved_transform_io_v2
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import any_pb2
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
# pylint: enable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
def _feature_spec_from_batched_tensors(tensors):
"""Infer a feature spec from a dict of tensors.
Args:
tensors: A dict whose keys are strings and values are `Tensor`,
`SparseTensor`, or `RaggedTensor`s.
Returns:
A feature spec inferred from the types and shapes of the tensors.
Raises:
ValueError: If the feature spec cannot be inferred.
TypeError: If any of the values of `tensors` are not a `Tensor`,
`SparseTensor`, or `RaggedTensor`.
"""
feature_spec = {}
for name, tensor in tensors.items():
if tensor.dtype not in (tf.string, tf.int64, tf.float32):
raise ValueError('Feature {} ({}) had invalid dtype {} for feature spec'
.format(name, tensor, tensor.dtype))
if isinstance(tensor, tf.SparseTensor):
shape = tensor.get_shape()
if shape.ndims > 2:
feature_spec[name] = tf.io.SparseFeature(
index_key=[
'{}$sparse_indices_{}'.format(name, idx)
for idx in range(shape.ndims - 1)
],
value_key='{}$sparse_values'.format(name),
dtype=tensor.dtype,
size=shape[1:],
already_sorted=True)
else:
feature_spec[name] = tf.io.VarLenFeature(tensor.dtype)
elif isinstance(tensor, tf.Tensor):
shape = tensor.get_shape()
if shape.ndims in [None, 0]:
raise ValueError(
'Feature {} ({}) had invalid shape {} for FixedLenFeature: must '
'have rank at least 1'.format(name, tensor, shape))
if any(dim is None for dim in shape.as_list()[1:]):
raise ValueError(
'Feature {} ({}) had invalid shape {} for FixedLenFeature: apart '
'from the batch dimension, all dimensions must have known size'
.format(name, tensor, shape))
feature_spec[name] = tf.io.FixedLenFeature(shape.as_list()[1:],
tensor.dtype)
elif isinstance(tensor, tf.RaggedTensor):
tf.compat.v1.logging.warn(
'Feature %s was a RaggedTensor. A Schema will be generated but the '
'Schema cannot be used with a coder (e.g. to materialize output '
'data) or to generated a feature spec.', name)
# Arbitrarily select VarLenFeature.
feature_spec[name] = tf.io.VarLenFeature(tensor.dtype)
else:
raise TypeError(
'Expected a Tensor, SparseTensor, or RaggedTensor got {} of type {} '
'for feature {}'
.format(tensor, type(tensor), name))
return feature_spec
def infer_feature_schema(features, graph, session=None):
"""Given a dict of tensors, creates a `Schema`.
Infers a schema, in the format of a tf.Transform `Schema`, for the given
dictionary of tensors.
If there is an override specified, we override the inferred schema for the
given feature's tensor. An override has the meaning that we should set
is_categorical=True. If session is not provided then we just set
is_categorical=True, and if the session is provided then was also compute
values of the tensors representing the min and max values and set them in the
schema.
If annotations have been specified, they are added to the output schema.
Args:
features: A dict mapping column names to `Tensor` or `SparseTensor`s. The
`Tensor` or `SparseTensor`s should have a 0'th dimension which is
interpreted as the batch dimension.
graph: A `tf.Graph` used to determine schema overrides.
session: (optional) A `tf.Session` used to compute schema overrides. If
None, schema overrides will not be computed.
Returns:
A `Schema` proto.
"""
tensor_ranges = _get_tensor_ranges(graph)
if session is None:
tensor_ranges = {hashable: (None, None) for hashable in tensor_ranges}
tensor_annotations = {}
global_annotations = []
else:
tensor_ranges = session.run(tensor_ranges)
tensor_annotations, global_annotations = _get_schema_annotations(
graph, session)
modified_tensor_ranges = {}
feature_annotations = {}
for name, tensor in features.items():
if isinstance(tensor, tf.SparseTensor):
values = tensor.values
elif isinstance(tensor, tf.RaggedTensor):
values = tensor.flat_values
else:
values = tensor
hashable_values = tf_utils.hashable_tensor_or_op(values)
if hashable_values in tensor_ranges:
assert values.dtype == tf.int64
modified_tensor_ranges[name] = tensor_ranges[hashable_values]
# tensor_annotations is a defaultdict(list) so always returns a list.
feature_annotations[name] = tensor_annotations.get(hashable_values, [])
return _infer_feature_schema_common(features, modified_tensor_ranges,
feature_annotations, global_annotations)
def infer_feature_schema_v2(features, concrete_metadata_fn,
evaluate_schema_overrides):
"""Given a dict of tensors, creates a `Schema`.
Infers a schema, in the format of a tf.Transform `Schema`, for the given
dictionary of tensors.
If there is an override specified, we override the inferred schema for the
given feature's tensor. An override has the meaning that we should set
is_categorical=True. If evaluate_schema_overrides is False then we just set
is_categorical=True, and if evaluate_schema_overrides is True then we also
compute values of the tensors representing the min and max values and set them
in the schema.
If annotations have been specified, they are added to the output schema.
Args:
features: A dict mapping column names to `Tensor` or `SparseTensor`s. The
`Tensor` or `SparseTensor`s should have a 0'th dimension which is
interpreted as the batch dimension.
concrete_metadata_fn: A `tf.ConcreteFunction` that returns a dictionary
containing the deferred annotations added to the graph when invoked with
any valid input.
evaluate_schema_overrides: A Boolean used to compute schema overrides. If
`False`, schema overrides will not be computed.
Returns:
A `Schema` proto.
"""
optimized_concrete_fn = saved_transform_io_v2.optimize_concrete_function(
concrete_metadata_fn)
metadata = collections.defaultdict(list, optimized_concrete_fn())
if not evaluate_schema_overrides:
tensor_ranges = {
tensor.numpy().decode(): (None, None)
for tensor in metadata[_TF_METADATA_TENSOR_COLLECTION]
}
tensor_annotations = {}
global_annotations = []
else:
tensor_ranges = _get_tensor_ranges_v2(metadata)
tensor_annotations, global_annotations = _get_schema_annotations_v2(
metadata)
return _infer_feature_schema_common(features, tensor_ranges,
tensor_annotations, global_annotations)
def _infer_feature_schema_common(features, tensor_ranges, feature_annotations,
global_annotations):
"""Given a dict of tensors, creates a `Schema`.
Args:
features: A dict mapping column names to `Tensor` or `SparseTensor`s. The
`Tensor` or `SparseTensor`s should have a 0'th dimension which is
interpreted as the batch dimension.
tensor_ranges: A dict mapping a tensor to a tuple containing its min and max
value.
feature_annotations: dictionary from feature name to list of any_pb2.Any
protos to be added as an annotation for that feature in the schema.
global_annotations: list of any_pb2.Any protos to be added at the global
schema level.
Returns:
A `Schema` proto.
"""
domains = {}
feature_tags = collections.defaultdict(list)
for name, tensor in features.items():
if isinstance(tensor, tf.RaggedTensor):
# Add the 'ragged_tensor' tag which will cause coder and
# schema_as_feature_spec to raise an error, as currently there is no
# feature spec for ragged tensors.
feature_tags[name].append(schema_utils.RAGGED_TENSOR_TAG)
if name in tensor_ranges:
min_value, max_value = tensor_ranges[name]
domains[name] = schema_pb2.IntDomain(
min=min_value, max=max_value, is_categorical=True)
feature_spec = _feature_spec_from_batched_tensors(features)
schema_proto = schema_utils.schema_from_feature_spec(feature_spec, domains)
# Add the annotations to the schema.
for annotation in global_annotations:
schema_proto.annotation.extra_metadata.add().CopyFrom(annotation)
# Build a map from logical feature names to Feature protos
feature_protos_by_name = {}
for feature in schema_proto.feature:
feature_protos_by_name[feature.name] = feature
for sparse_feature in schema_proto.sparse_feature:
for index_feature in sparse_feature.index_feature:
feature_protos_by_name.pop(index_feature.name)
value_feature = feature_protos_by_name.pop(
sparse_feature.value_feature.name)
feature_protos_by_name[sparse_feature.name] = value_feature
# Update annotations
for feature_name, annotations in feature_annotations.items():
feature_proto = feature_protos_by_name[feature_name]
for annotation in annotations:
feature_proto.annotation.extra_metadata.add().CopyFrom(annotation)
for feature_name, tags in feature_tags.items():
feature_proto = feature_protos_by_name[feature_name]
for tag in tags:
feature_proto.annotation.tag.append(tag)
return schema_proto
# Names of collections, which should all be the same length and contain tensors.
# Each tensor in the first collection should have its min/max described by the
# tensors in the other two collections.
_TF_METADATA_TENSOR_COLLECTION = 'tft_schema_override_tensor'
_TF_METADATA_TENSOR_MIN_COLLECTION = 'tft_schema_override_min'
_TF_METADATA_TENSOR_MAX_COLLECTION = 'tft_schema_override_max'
# Collections for adding to annotation.extra_metadata on the schema. Each
# tensor in the first collection should have a proto type and proto message in
# the other two collections
_TF_METADATA_EXTRA_ANNOTATION = 'tft_schema_override_annotation_tensor'
_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL = 'tft_schema_override_annotation_type'
_TF_METADATA_EXTRA_ANNOTATION_PROTO = 'tft_schema_override_annotation_proto'
# Used to indicate that an annotation should be applied at the schema level.
_TF_METADATA_EXTRA_ANNOTATION_GLOBAL = 'tft_schema_override_global_sentinel'
def set_tensor_schema_override(tensor, min_value, max_value):
"""Override parts of the schema of a `Tensor`.
Args:
tensor: The `Tensor` whose range is being set. Must have dtype int64.
min_value: A `Tensor` representing the min value of `tensor`.
max_value: A `Tensor` representing the max value of `tensor`.
Raises:
ValueError: If any arguments are invalid.
"""
if not isinstance(tensor, tf.Tensor):
raise ValueError('tensor {} was not a Tensor'.format(tensor))
if tensor.dtype != tf.int64:
raise ValueError(
'Range can only be set for feature of type tf.int64, got {}'.format(
tensor.dtype))
if not isinstance(min_value, tf.Tensor):
raise ValueError('min_value {} was not a Tensor'.format(min_value))
if not isinstance(max_value, tf.Tensor):
raise ValueError('max_value {} was not a Tensor'.format(max_value))
tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_COLLECTION, tensor)
tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_MIN_COLLECTION, min_value)
tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_MAX_COLLECTION, max_value)
def _get_tensor_ranges(graph):
"""Lookup overrides for `Tensor`s or `SparseTensor`s."""
tensors = graph.get_collection(_TF_METADATA_TENSOR_COLLECTION)
min_values = graph.get_collection(_TF_METADATA_TENSOR_MIN_COLLECTION)
max_values = graph.get_collection(_TF_METADATA_TENSOR_MAX_COLLECTION)
assert len(tensors) == len(min_values), '{} != {}'.format(tensors, min_values)
assert len(tensors) == len(max_values), '{} != {}'.format(tensors, max_values)
return dict(zip(map(tf_utils.hashable_tensor_or_op, tensors),
zip(min_values, max_values)))
def _get_tensor_ranges_v2(metadata):
"""Lookup overrides for `Tensor`s or `SparseTensor`s."""
tensors = metadata[_TF_METADATA_TENSOR_COLLECTION]
min_values = metadata[_TF_METADATA_TENSOR_MIN_COLLECTION]
max_values = metadata[_TF_METADATA_TENSOR_MAX_COLLECTION]
assert len(tensors) == len(min_values), '{} != {}'.format(tensors, min_values)
assert len(tensors) == len(max_values), '{} != {}'.format(tensors, max_values)
return {
tensor.numpy().decode(): (min_value.numpy(), max_value.numpy())
for (tensor, min_value, max_value) in zip(tensors, min_values, max_values)
}
def get_tensor_schema_override(
tensor: common_types.TensorType) -> Tuple[tf.Tensor, tf.Tensor]:
"""Lookup schema overrides for a `Tensor` or `SparseTensor`."""
if isinstance(tensor, tf.SparseTensor):
tensor = tensor.values
overrides = _get_tensor_ranges(tensor.graph)
min_max = overrides.get(tf_utils.hashable_tensor_or_op(tensor), None)
if min_max is None:
raise ValueError('Requested tensor does not have recorded min/max values')
return min_max
def annotate(type_url, proto_message, tensor=None):
"""Adds a deferred annotation to the schema.
Experimental: This API is subject to change.
This function allows analyzers or end users to annotate the post-transform
schema with additional information based on analyzer output. These annotations
are stored in the annotation.extra_metadata field of the tf.metadata schema:
https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/schema.proto#L193
Args:
type_url: A string or string `Tensor` containing the type url which uniquely
identifies the type of the serialized proto message. See
https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto#L151
proto_message: A deferred string tensor containing the serialized proto to
write to the feature schema.
tensor: (optional) If provided, the annotation will be written to the
Feature proto that is created for this tensor in the schema. If None,
the annotation is assumed to be global. Note: if the tensor is not present
in the output signature of `preprocessing_fn`, this will be a no-op.
"""
if tensor is None:
tensor = tf.constant('unused', name=_TF_METADATA_EXTRA_ANNOTATION_GLOBAL)
if not isinstance(tensor, (tf.Tensor, tf.SparseTensor)):
raise ValueError('tensor {} was not a Tensor'.format(tensor))
if not isinstance(proto_message, tf.Tensor):
raise ValueError('proto_message {} was not a Tensor'.format(proto_message))
# If the type_url is passed as a plain string, create a string tensor.
if not isinstance(type_url, tf.Tensor):
type_url = tf.constant(type_url, dtype=tf.string)
# Note: The tensors, types, and messages are stored in separate collections
# because SavedModel only supports primitive types in collections.
tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION, tensor)
tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL,
type_url)
tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION_PROTO,
proto_message)
def _get_schema_annotations(graph, session):
"""Fetch extra_metadata annotations to be applied to the schema.
Extracts any deferred annotations that have been added to the graph and
evaluates them to obtain any_pb2.Any proto messages.
Args:
graph: A `tf.Graph` used to determine schema overrides.
session: (optional) A `tf.Session` used to compute schema annotations. If
None, schema annotations will not be computed.
Returns:
tensor_annotations: dictionary from tensor to list of any_pb2.Any protos to
be added as an annotation for that tensor's feature in the schema.
global_annotations: list of any_pb2.Any protos to be added at the global
schema level.
"""
tensors = graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION)
type_urls = session.run(
graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL))
proto_values = session.run(
graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION_PROTO))
tensor_annotation_keys = []
for tensor in tensors:
# Entries meant for the global schema annotation will have names like
# tft_schema_override_global_sentinel:0 or
# transform/tft_schema_override_global_sentinel_1:0
tensor_name = tensor.name.split('/')[-1]
if tensor_name.startswith(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL):
tensor_annotation_keys.append(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL)
else:
tensor_annotation_keys.append(tf_utils.hashable_tensor_or_op(tensor))
return _get_schema_annotations_common(tensor_annotation_keys, type_urls,
proto_values)
def _get_schema_annotations_v2(metadata):
"""Fetch extra_metadata annotations to be applied to the schema.
Extracts any deferred annotations that have been added to the graph and
evaluates them to obtain any_pb2.Any proto messages.
Args:
metadata: A dictionary containing the deferred annotations added to the
graph.
Returns:
tensor_annotations: dictionary from tensor to list of any_pb2.Any protos to
be added as an annotation for that tensor's feature in the schema.
global_annotations: list of any_pb2.Any protos to be added at the global
schema level.
"""
type_urls = [
type_url.numpy()
for type_url in metadata[_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL]
]
proto_values = [
proto_value.numpy()
for proto_value in metadata[_TF_METADATA_EXTRA_ANNOTATION_PROTO]
]
tensor_annotation_keys = [
tensor.numpy().decode()
for tensor in metadata[_TF_METADATA_EXTRA_ANNOTATION]
]
return _get_schema_annotations_common(tensor_annotation_keys, type_urls,
proto_values)
def _get_schema_annotations_common(tensor_annotation_keys, type_urls,
proto_values):
"""Fetch extra_metadata annotations to be applied to the schema.
Args:
tensor_annotation_keys: A list containing either
`_TF_METADATA_EXTRA_ANNOTATION_GLOBAL` or a hashed tensor representation
corresponding to each entry in `proto_values`. If an entry
is`_TF_METADATA_EXTRA_ANNOTATION_GLOBAL`, the corresponding any_pb2.Any
proto in `proto_values` is returned in `global_annotations`. Otherwise, it
is returned in `feature_annotations`.
type_urls: A list of type urls corresponding to the serialized protos in
`proto_values`.
proto_values: A list of serialized any_pb2.Any protos.
Returns:
A tuple of:
tensor_annotations: dictionary from tensor to list of any_pb2.Any protos to
be added as an annotation for that tensor's feature in the schema.
global_annotations: list of any_pb2.Any protos to be added at the global
schema level.
"""
tensor_annotations = collections.defaultdict(list)
global_annotations = []
if not common.IS_ANNOTATIONS_PB_AVAILABLE:
return tensor_annotations, global_annotations
assert len(tensor_annotation_keys) == len(type_urls) == len(proto_values)
for (tensor_annotation_key, type_url,
proto_value) in zip(tensor_annotation_keys, type_urls, proto_values):
annotation = any_pb2.Any(type_url=type_url, value=proto_value)
if (isinstance(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL,
type(tensor_annotation_key)) and
tensor_annotation_key == _TF_METADATA_EXTRA_ANNOTATION_GLOBAL):
global_annotations.append(annotation)
else:
tensor_annotations[tensor_annotation_key].append(annotation)
return tensor_annotations, global_annotations
def _get_tensor_value_to_key_map(features_dict):
"""Get reverse map from name of tensor values to key in `features_dict`."""
result = {}
for key, tensor in features_dict.items():
if isinstance(tensor, tf.SparseTensor):
values = tensor.values
elif isinstance(tensor, tf.RaggedTensor):
values = tensor.flat_values
else:
values = tensor
result[values.name] = key
return result
def _get_schema_overrides(graph,
tensor_name_to_key_map,
tensor_collection_key,
overrides_keys,
default_tensor_name=None):
"""Obtain schema overrides from graph collections.
For every tensor in the `tensor_collection_key` collection, the corresponding
feature name is in `tensor_name_to_key_map` and its schema overrides are in
the graph collections defined by keys in `overrides_keys`.
If a tensor does not exist in `tensor_name_to_key_map` but its name starts
with `default_tensor_name` (if provided), the overrides are returned with this
key.
Args:
graph: A `FuncGraph`.
tensor_name_to_key_map: A dictionary from tensor name to output feature key.
tensor_collection_key: Key for the graph collection that contains list of
tensors to annotate.
overrides_keys: A list of graph collection keys that contain schema
overrides/annotations.
default_tensor_name: (Optional) A String. If provided, use as feature key if
a tensor in the graph collections is not in `tensor_name_to_key_map`.
Returns:
A dictionary from graph collection keys to lists of features and their
schema overrides/annotations.
"""
tensors = graph.get_collection(tensor_collection_key)
overrides_list = [graph.get_collection(k) for k in overrides_keys]
result = collections.defaultdict(list)
assert (len(tensors) == len(overrides_list[0]) and
all(len(lst) == len(overrides_list[0]) for lst in overrides_list))
for tensor, overrides_tuple in zip(tensors, zip(*overrides_list)):
if tensor.name in tensor_name_to_key_map:
result[tensor_collection_key].append(tensor_name_to_key_map[tensor.name])
else:
if default_tensor_name is None:
continue
tensor_name = tensor.name.split('/')[-1]
if tensor.dtype == tf.string and tensor_name.startswith(
default_tensor_name):
result[tensor_collection_key].append(default_tensor_name)
else:
continue
# If a feature name was added to the result list for tensor_collection_key,
# add its annotations as well.
assert len(overrides_keys) == len(overrides_tuple)
for overrides_key, override in zip(overrides_keys, overrides_tuple):
result[overrides_key].append(override)
return result
def get_traced_metadata_fn(
tensor_replacement_map: Optional[Dict[str, tf.Tensor]],
preprocessing_fn: Callable[[Mapping[str, common_types.InputTensorType]],
Mapping[str, common_types.InputTensorType]],
structured_inputs: Mapping[str, common_types.InputTensorType],
base_temp_dir: str, evaluate_schema_overrides: bool) -> function.Function:
"""Get a tf.function that returns a dictionary of annotations.
Annotations are added to graph collections keyed by graph tensor names when
`preprocessing_fn` is being traced. The metadata fn defined by this method
converts the graph tensor names to output feature keys.
If `evaluate_schema_overrides` is True, tracing the `preprocessing_fn` will
add overrides for feature ranges (min/max) and/or feature protos to the graph
collection, if applicable. These overrides are returned when the function
returned by this method is invoked.
Args:
tensor_replacement_map: A map from placeholder tensor names to their
evaluated replacement tensors.
preprocessing_fn: A user defined python function to be traced.
structured_inputs: A dictionary of placeholder inputs to `preprocessing_fn`.
base_temp_dir: Base path to write any dummy assets to during tracing.
evaluate_schema_overrides: If `False`, the returned dictionary contains a
single key `_TF_METADATA_TENSOR_COLLECTION` as all other annotations are
deferred. Else, the returned dictionary contains several deferred
annotations.
Returns:
A dictionary whose keys represent the types of annotations and the values
are collections of feature keys/annotations.
"""
# Since this is a TFT-internal function with constant outputs, autograph will
# not affect its behavior. It will only increase tracing time, if enabled.
# Hence, trace with `autograph=False` here.
@tf.function(input_signature=[], autograph=False)
def metadata_fn():
graph = ops.get_default_graph()
inputs = tf2_utils.supply_missing_inputs(structured_inputs, batch_size=1)
with graph_context.TFGraphContext(
temp_dir=base_temp_dir, evaluated_replacements=tensor_replacement_map):
transformed_features = preprocessing_fn(inputs)
# Get a map from tensor value names to feature keys.
reversed_features = _get_tensor_value_to_key_map(transformed_features)
result = collections.defaultdict(list)
if not evaluate_schema_overrides:
schema_override_tensors = graph.get_collection(
_TF_METADATA_TENSOR_COLLECTION)
for tensor in schema_override_tensors:
if tensor.name in reversed_features:
result[_TF_METADATA_TENSOR_COLLECTION].append(
reversed_features[tensor.name])
else:
# Obtain schema overrides for feature tensor ranges.
result.update(
_get_schema_overrides(graph, reversed_features,
_TF_METADATA_TENSOR_COLLECTION, [
_TF_METADATA_TENSOR_MIN_COLLECTION,
_TF_METADATA_TENSOR_MAX_COLLECTION
]))
# Obtain schema overrides for feature protos. If no feature tensor is in
# the `_TF_METADATA_EXTRA_ANNOTATION` collection for a specified
# annotation, `_TF_METADATA_EXTRA_ANNOTATION_GLOBAL` is used as the
# feature name to indicate that this annotation should be added to the
# global schema.
result.update(
_get_schema_overrides(graph, reversed_features,
_TF_METADATA_EXTRA_ANNOTATION, [
_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL,
_TF_METADATA_EXTRA_ANNOTATION_PROTO
], _TF_METADATA_EXTRA_ANNOTATION_GLOBAL))
return result
return metadata_fn
| 43.668731
| 99
| 0.731301
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Callable, Dict, Mapping, Optional, Tuple
import tensorflow as tf
from tensorflow_transform import common
from tensorflow_transform import common_types
from tensorflow_transform import graph_context
from tensorflow_transform import tf2_utils
from tensorflow_transform import tf_utils
from tensorflow_transform.saved import saved_transform_io_v2
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import any_pb2
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow_metadata.proto.v0 import schema_pb2
def _feature_spec_from_batched_tensors(tensors):
feature_spec = {}
for name, tensor in tensors.items():
if tensor.dtype not in (tf.string, tf.int64, tf.float32):
raise ValueError('Feature {} ({}) had invalid dtype {} for feature spec'
.format(name, tensor, tensor.dtype))
if isinstance(tensor, tf.SparseTensor):
shape = tensor.get_shape()
if shape.ndims > 2:
feature_spec[name] = tf.io.SparseFeature(
index_key=[
'{}$sparse_indices_{}'.format(name, idx)
for idx in range(shape.ndims - 1)
],
value_key='{}$sparse_values'.format(name),
dtype=tensor.dtype,
size=shape[1:],
already_sorted=True)
else:
feature_spec[name] = tf.io.VarLenFeature(tensor.dtype)
elif isinstance(tensor, tf.Tensor):
shape = tensor.get_shape()
if shape.ndims in [None, 0]:
raise ValueError(
'Feature {} ({}) had invalid shape {} for FixedLenFeature: must '
'have rank at least 1'.format(name, tensor, shape))
if any(dim is None for dim in shape.as_list()[1:]):
raise ValueError(
'Feature {} ({}) had invalid shape {} for FixedLenFeature: apart '
'from the batch dimension, all dimensions must have known size'
.format(name, tensor, shape))
feature_spec[name] = tf.io.FixedLenFeature(shape.as_list()[1:],
tensor.dtype)
elif isinstance(tensor, tf.RaggedTensor):
tf.compat.v1.logging.warn(
'Feature %s was a RaggedTensor. A Schema will be generated but the '
'Schema cannot be used with a coder (e.g. to materialize output '
'data) or to generated a feature spec.', name)
feature_spec[name] = tf.io.VarLenFeature(tensor.dtype)
else:
raise TypeError(
'Expected a Tensor, SparseTensor, or RaggedTensor got {} of type {} '
'for feature {}'
.format(tensor, type(tensor), name))
return feature_spec
def infer_feature_schema(features, graph, session=None):
tensor_ranges = _get_tensor_ranges(graph)
if session is None:
tensor_ranges = {hashable: (None, None) for hashable in tensor_ranges}
tensor_annotations = {}
global_annotations = []
else:
tensor_ranges = session.run(tensor_ranges)
tensor_annotations, global_annotations = _get_schema_annotations(
graph, session)
modified_tensor_ranges = {}
feature_annotations = {}
for name, tensor in features.items():
if isinstance(tensor, tf.SparseTensor):
values = tensor.values
elif isinstance(tensor, tf.RaggedTensor):
values = tensor.flat_values
else:
values = tensor
hashable_values = tf_utils.hashable_tensor_or_op(values)
if hashable_values in tensor_ranges:
assert values.dtype == tf.int64
modified_tensor_ranges[name] = tensor_ranges[hashable_values]
feature_annotations[name] = tensor_annotations.get(hashable_values, [])
return _infer_feature_schema_common(features, modified_tensor_ranges,
feature_annotations, global_annotations)
def infer_feature_schema_v2(features, concrete_metadata_fn,
evaluate_schema_overrides):
optimized_concrete_fn = saved_transform_io_v2.optimize_concrete_function(
concrete_metadata_fn)
metadata = collections.defaultdict(list, optimized_concrete_fn())
if not evaluate_schema_overrides:
tensor_ranges = {
tensor.numpy().decode(): (None, None)
for tensor in metadata[_TF_METADATA_TENSOR_COLLECTION]
}
tensor_annotations = {}
global_annotations = []
else:
tensor_ranges = _get_tensor_ranges_v2(metadata)
tensor_annotations, global_annotations = _get_schema_annotations_v2(
metadata)
return _infer_feature_schema_common(features, tensor_ranges,
tensor_annotations, global_annotations)
def _infer_feature_schema_common(features, tensor_ranges, feature_annotations,
global_annotations):
domains = {}
feature_tags = collections.defaultdict(list)
for name, tensor in features.items():
if isinstance(tensor, tf.RaggedTensor):
feature_tags[name].append(schema_utils.RAGGED_TENSOR_TAG)
if name in tensor_ranges:
min_value, max_value = tensor_ranges[name]
domains[name] = schema_pb2.IntDomain(
min=min_value, max=max_value, is_categorical=True)
feature_spec = _feature_spec_from_batched_tensors(features)
schema_proto = schema_utils.schema_from_feature_spec(feature_spec, domains)
for annotation in global_annotations:
schema_proto.annotation.extra_metadata.add().CopyFrom(annotation)
feature_protos_by_name = {}
for feature in schema_proto.feature:
feature_protos_by_name[feature.name] = feature
for sparse_feature in schema_proto.sparse_feature:
for index_feature in sparse_feature.index_feature:
feature_protos_by_name.pop(index_feature.name)
value_feature = feature_protos_by_name.pop(
sparse_feature.value_feature.name)
feature_protos_by_name[sparse_feature.name] = value_feature
for feature_name, annotations in feature_annotations.items():
feature_proto = feature_protos_by_name[feature_name]
for annotation in annotations:
feature_proto.annotation.extra_metadata.add().CopyFrom(annotation)
for feature_name, tags in feature_tags.items():
feature_proto = feature_protos_by_name[feature_name]
for tag in tags:
feature_proto.annotation.tag.append(tag)
return schema_proto
_TF_METADATA_TENSOR_COLLECTION = 'tft_schema_override_tensor'
_TF_METADATA_TENSOR_MIN_COLLECTION = 'tft_schema_override_min'
_TF_METADATA_TENSOR_MAX_COLLECTION = 'tft_schema_override_max'
_TF_METADATA_EXTRA_ANNOTATION = 'tft_schema_override_annotation_tensor'
_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL = 'tft_schema_override_annotation_type'
_TF_METADATA_EXTRA_ANNOTATION_PROTO = 'tft_schema_override_annotation_proto'
_TF_METADATA_EXTRA_ANNOTATION_GLOBAL = 'tft_schema_override_global_sentinel'
def set_tensor_schema_override(tensor, min_value, max_value):
if not isinstance(tensor, tf.Tensor):
raise ValueError('tensor {} was not a Tensor'.format(tensor))
if tensor.dtype != tf.int64:
raise ValueError(
'Range can only be set for feature of type tf.int64, got {}'.format(
tensor.dtype))
if not isinstance(min_value, tf.Tensor):
raise ValueError('min_value {} was not a Tensor'.format(min_value))
if not isinstance(max_value, tf.Tensor):
raise ValueError('max_value {} was not a Tensor'.format(max_value))
tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_COLLECTION, tensor)
tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_MIN_COLLECTION, min_value)
tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_MAX_COLLECTION, max_value)
def _get_tensor_ranges(graph):
tensors = graph.get_collection(_TF_METADATA_TENSOR_COLLECTION)
min_values = graph.get_collection(_TF_METADATA_TENSOR_MIN_COLLECTION)
max_values = graph.get_collection(_TF_METADATA_TENSOR_MAX_COLLECTION)
assert len(tensors) == len(min_values), '{} != {}'.format(tensors, min_values)
assert len(tensors) == len(max_values), '{} != {}'.format(tensors, max_values)
return dict(zip(map(tf_utils.hashable_tensor_or_op, tensors),
zip(min_values, max_values)))
def _get_tensor_ranges_v2(metadata):
tensors = metadata[_TF_METADATA_TENSOR_COLLECTION]
min_values = metadata[_TF_METADATA_TENSOR_MIN_COLLECTION]
max_values = metadata[_TF_METADATA_TENSOR_MAX_COLLECTION]
assert len(tensors) == len(min_values), '{} != {}'.format(tensors, min_values)
assert len(tensors) == len(max_values), '{} != {}'.format(tensors, max_values)
return {
tensor.numpy().decode(): (min_value.numpy(), max_value.numpy())
for (tensor, min_value, max_value) in zip(tensors, min_values, max_values)
}
def get_tensor_schema_override(
tensor: common_types.TensorType) -> Tuple[tf.Tensor, tf.Tensor]:
if isinstance(tensor, tf.SparseTensor):
tensor = tensor.values
overrides = _get_tensor_ranges(tensor.graph)
min_max = overrides.get(tf_utils.hashable_tensor_or_op(tensor), None)
if min_max is None:
raise ValueError('Requested tensor does not have recorded min/max values')
return min_max
def annotate(type_url, proto_message, tensor=None):
if tensor is None:
tensor = tf.constant('unused', name=_TF_METADATA_EXTRA_ANNOTATION_GLOBAL)
if not isinstance(tensor, (tf.Tensor, tf.SparseTensor)):
raise ValueError('tensor {} was not a Tensor'.format(tensor))
if not isinstance(proto_message, tf.Tensor):
raise ValueError('proto_message {} was not a Tensor'.format(proto_message))
if not isinstance(type_url, tf.Tensor):
type_url = tf.constant(type_url, dtype=tf.string)
tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION, tensor)
tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL,
type_url)
tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION_PROTO,
proto_message)
def _get_schema_annotations(graph, session):
tensors = graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION)
type_urls = session.run(
graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL))
proto_values = session.run(
graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION_PROTO))
tensor_annotation_keys = []
for tensor in tensors:
tensor_name = tensor.name.split('/')[-1]
if tensor_name.startswith(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL):
tensor_annotation_keys.append(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL)
else:
tensor_annotation_keys.append(tf_utils.hashable_tensor_or_op(tensor))
return _get_schema_annotations_common(tensor_annotation_keys, type_urls,
proto_values)
def _get_schema_annotations_v2(metadata):
type_urls = [
type_url.numpy()
for type_url in metadata[_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL]
]
proto_values = [
proto_value.numpy()
for proto_value in metadata[_TF_METADATA_EXTRA_ANNOTATION_PROTO]
]
tensor_annotation_keys = [
tensor.numpy().decode()
for tensor in metadata[_TF_METADATA_EXTRA_ANNOTATION]
]
return _get_schema_annotations_common(tensor_annotation_keys, type_urls,
proto_values)
def _get_schema_annotations_common(tensor_annotation_keys, type_urls,
proto_values):
tensor_annotations = collections.defaultdict(list)
global_annotations = []
if not common.IS_ANNOTATIONS_PB_AVAILABLE:
return tensor_annotations, global_annotations
assert len(tensor_annotation_keys) == len(type_urls) == len(proto_values)
for (tensor_annotation_key, type_url,
proto_value) in zip(tensor_annotation_keys, type_urls, proto_values):
annotation = any_pb2.Any(type_url=type_url, value=proto_value)
if (isinstance(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL,
type(tensor_annotation_key)) and
tensor_annotation_key == _TF_METADATA_EXTRA_ANNOTATION_GLOBAL):
global_annotations.append(annotation)
else:
tensor_annotations[tensor_annotation_key].append(annotation)
return tensor_annotations, global_annotations
def _get_tensor_value_to_key_map(features_dict):
result = {}
for key, tensor in features_dict.items():
if isinstance(tensor, tf.SparseTensor):
values = tensor.values
elif isinstance(tensor, tf.RaggedTensor):
values = tensor.flat_values
else:
values = tensor
result[values.name] = key
return result
def _get_schema_overrides(graph,
tensor_name_to_key_map,
tensor_collection_key,
overrides_keys,
default_tensor_name=None):
tensors = graph.get_collection(tensor_collection_key)
overrides_list = [graph.get_collection(k) for k in overrides_keys]
result = collections.defaultdict(list)
assert (len(tensors) == len(overrides_list[0]) and
all(len(lst) == len(overrides_list[0]) for lst in overrides_list))
for tensor, overrides_tuple in zip(tensors, zip(*overrides_list)):
if tensor.name in tensor_name_to_key_map:
result[tensor_collection_key].append(tensor_name_to_key_map[tensor.name])
else:
if default_tensor_name is None:
continue
tensor_name = tensor.name.split('/')[-1]
if tensor.dtype == tf.string and tensor_name.startswith(
default_tensor_name):
result[tensor_collection_key].append(default_tensor_name)
else:
continue
assert len(overrides_keys) == len(overrides_tuple)
for overrides_key, override in zip(overrides_keys, overrides_tuple):
result[overrides_key].append(override)
return result
def get_traced_metadata_fn(
tensor_replacement_map: Optional[Dict[str, tf.Tensor]],
preprocessing_fn: Callable[[Mapping[str, common_types.InputTensorType]],
Mapping[str, common_types.InputTensorType]],
structured_inputs: Mapping[str, common_types.InputTensorType],
base_temp_dir: str, evaluate_schema_overrides: bool) -> function.Function:
@tf.function(input_signature=[], autograph=False)
def metadata_fn():
graph = ops.get_default_graph()
inputs = tf2_utils.supply_missing_inputs(structured_inputs, batch_size=1)
with graph_context.TFGraphContext(
temp_dir=base_temp_dir, evaluated_replacements=tensor_replacement_map):
transformed_features = preprocessing_fn(inputs)
reversed_features = _get_tensor_value_to_key_map(transformed_features)
result = collections.defaultdict(list)
if not evaluate_schema_overrides:
schema_override_tensors = graph.get_collection(
_TF_METADATA_TENSOR_COLLECTION)
for tensor in schema_override_tensors:
if tensor.name in reversed_features:
result[_TF_METADATA_TENSOR_COLLECTION].append(
reversed_features[tensor.name])
else:
result.update(
_get_schema_overrides(graph, reversed_features,
_TF_METADATA_TENSOR_COLLECTION, [
_TF_METADATA_TENSOR_MIN_COLLECTION,
_TF_METADATA_TENSOR_MAX_COLLECTION
]))
result.update(
_get_schema_overrides(graph, reversed_features,
_TF_METADATA_EXTRA_ANNOTATION, [
_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL,
_TF_METADATA_EXTRA_ANNOTATION_PROTO
], _TF_METADATA_EXTRA_ANNOTATION_GLOBAL))
return result
return metadata_fn
| true
| true
|
f712dc6eb272c7e1cfc8af53279c9be612493ee3
| 132
|
py
|
Python
|
chia/util/default_root.py
|
wei-zero/shell-blockchain
|
f792a54b39d491efbac8e165228268716e07409e
|
[
"Apache-2.0"
] | 1
|
2021-09-19T18:58:32.000Z
|
2021-09-19T18:58:32.000Z
|
chia/util/default_root.py
|
wei-zero/shell-blockchain
|
f792a54b39d491efbac8e165228268716e07409e
|
[
"Apache-2.0"
] | null | null | null |
chia/util/default_root.py
|
wei-zero/shell-blockchain
|
f792a54b39d491efbac8e165228268716e07409e
|
[
"Apache-2.0"
] | null | null | null |
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("SHL_ROOT", "~/.shl/mainnet"))).resolve()
| 26.4
| 95
| 0.75
|
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("SHL_ROOT", "~/.shl/mainnet"))).resolve()
| true
| true
|
f712dd6bed08e1c48b8aaf9f59279ca864fa33d6
| 16,424
|
py
|
Python
|
yatube/posts/tests/test_views.py
|
Dimanitto/yatube
|
1d93d5b90c2a9f7c888ce83eac5acdc4d9ed40e2
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_views.py
|
Dimanitto/yatube
|
1d93d5b90c2a9f7c888ce83eac5acdc4d9ed40e2
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_views.py
|
Dimanitto/yatube
|
1d93d5b90c2a9f7c888ce83eac5acdc4d9ed40e2
|
[
"MIT"
] | null | null | null |
import shutil
import tempfile
from django.core.cache import cache
from http import HTTPStatus
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth import get_user_model
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from ..models import Group, Post, Comment, Follow
from django import forms
User = get_user_model()
# Создаем временную папку для медиа-файлов;
# на момент теста медиа папка будет переопределена
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
# Для сохранения media-файлов в тестах будет использоваться
# временная папка TEMP_MEDIA_ROOT, а потом мы ее удалим
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class TaskPagesTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='tester')
# Создадим запись в БД
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.group = Group.objects.create(
title='Тестовая группа',
slug='test-slug',
description='Тестовое описание',
)
cls.post = Post.objects.create(
author=cls.user,
text='Тестовая пост',
group=cls.group,
image=uploaded,
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
# Модуль shutil - библиотека Python с удобными инструментами
# для управления файлами и директориями:
# создание, удаление, копирование, перемещение, изменение папок, файлов
# Метод shutil.rmtree удаляет директорию и всё её содержимое
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def setUp(self):
# Создаем авторизованный клиент
self.user = User.objects.create_user(username='auth')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
# Проверяем используемые шаблоны
def test_pages_uses_correct_template(self):
"""URL-адрес использует соответствующий шаблон."""
self.authorized_client.force_login(self.post.author)
# Собираем в словарь пары "reverse(name): имя_html_шаблона"
templates_pages_names = {
reverse('posts:index'):
'posts/index.html',
reverse('posts:group_posts', kwargs={'slug': 'test-slug'}):
'posts/group_list.html',
reverse('posts:profile', kwargs={'username': 'tester'}):
'posts/profile.html',
reverse('posts:post_detail', kwargs={'post_id': 1}):
'posts/post_detail.html',
reverse('posts:post_edit', kwargs={'post_id': 1}):
'posts/create_post.html',
reverse('posts:post_create'):
'posts/create_post.html',
}
# Проверяем, что при обращении к name вызывается
# соответствующий HTML-шаблон
for reverse_name, template in templates_pages_names.items():
with self.subTest(reverse_name=reverse_name):
response = self.authorized_client.get(reverse_name)
self.assertTemplateUsed(response, template)
# Проверяем, что словарь context страницы index
# содержит ожидаемые значения
def test_index_detail_pages_show_correct_context(self):
"""Шаблон index сформирован с правильным контекстом."""
response = (self.authorized_client.get(reverse('posts:index')))
# Взяли первый элемент из списка и проверили, что его содержание
# совпадает с ожидаемым
first_object = response.context['page_obj'][0]
# Проверим все объекты контекста
self.assertEqual(first_object.author, self.post.author)
self.assertEqual(first_object.group, self.group)
self.assertEqual(first_object.text, 'Тестовая пост')
self.assertEqual(first_object.image, 'posts/small.gif')
def test_group_list_detail_pages_show_correct_context(self):
"""Шаблон group_posts сформирован с правильным контекстом."""
response = (self.authorized_client.
get(reverse('posts:group_posts',
kwargs={'slug': 'test-slug'}))
)
# Взяли первый элемент из списка и проверили, что его содержание
# совпадает с ожидаемым отфильтрованным
first_object = response.context['group']
second_object = response.context['page_obj'][0]
task_group_0 = first_object.title
self.assertEqual(task_group_0, 'Тестовая группа')
self.assertEqual(second_object.image, 'posts/small.gif')
def test_profile_detail_show_correct_context(self):
"""Шаблон profile отфильтрован по пользователю"""
response = (self.authorized_client.
get(reverse('posts:profile',
kwargs={'username': 'tester'}))
)
first_object = response.context['client']
second_object = response.context['page_obj'][0]
task_client_0 = first_object.username
task_post_0 = second_object.text
# Проверим пост и пользователя
self.assertEqual(task_client_0, 'tester')
self.assertEqual(task_post_0, 'Тестовая пост')
self.assertEqual(second_object.image, 'posts/small.gif')
def test_post_detail_show_correct_context(self):
"""Шаблон post detail отфильтрован по id"""
response = (self.authorized_client.
get(reverse('posts:post_detail',
kwargs={'post_id': 1}))
)
first_object = response.context['count']
second_object = response.context['post']
task_count_0 = first_object
task_post_0 = second_object.text
# Проверим что пост один
self.assertEqual(task_count_0, 1)
self.assertEqual(task_post_0, 'Тестовая пост')
self.assertEqual(second_object.image, 'posts/small.gif')
def test_edit_post_show_correct_context(self):
"""Шаблон create post форма редактирования поста"""
# Снова нужно авторизироваться под автором поста (tester),
# без этого мы не сможем редактировать пост от имени (auth)
self.authorized_client.force_login(self.post.author)
response = (self.authorized_client.
get(reverse('posts:post_edit',
kwargs={'post_id': 1}))
)
# Словарь ожидаемых типов полей формы:
# указываем, объектами какого класса должны быть поля формы
form_fields = {
# При создании формы поля модели типа TextField
# преобразуются в CharField с виджетом forms.Textarea
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
# Проверяем, что типы полей формы в словаре context
# соответствуют ожиданиям
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context.get('form').fields.get(value)
# Проверяет, что поле формы является экземпляром
# указанного класса
self.assertIsInstance(form_field, expected)
def test_post_create_show_correct_context(self):
"""Шаблон create post форма создания поста"""
response = self.authorized_client.get(reverse('posts:post_create'))
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context.get('form').fields.get(value)
self.assertIsInstance(form_field, expected)
def test_post_additional_verification_create(self):
"""Проверка пост не попал в другую группу"""
# Создадим группу с постом различную чтобы сравнить
group = Group.objects.create(
title='Заголовок для 2 тестовой группы',
slug='test_slug2'
)
Post.objects.create(
author=self.user,
text='Test post 2',
group=group,
)
# Т.к мы создали второй пост с другой группой
# проверим что на странице группы этот пост один
response = self.authorized_client.get(
reverse('posts:group_posts', kwargs={'slug': group.slug}))
count = response.context["page_obj"].object_list
# Теперь всего постов 2, и только один должен быть
# на странице группы
self.assertEqual(len(count), Post.objects.count() - 1)
self.assertEqual(response.status_code, 200)
def test_comment_succesful(self):
"""Комментарий появляется на странице поста"""
Comment.objects.create(
post=self.post,
author=self.user,
text='Test comment'
)
response = (self.authorized_client.
get(reverse('posts:post_detail',
kwargs={'post_id': 1}))
)
first_object = response.context['comments']
# Из-за QuerySet пришлось добавить .get()
# Без нее была бы ошибка, как ниже
# <QuerySet [<Comment: Test comment>]> != 'Test comment'
self.assertEqual(first_object.get().text, 'Test comment')
# Проверим и автора
self.assertEqual(first_object.get().author, self.user)
# Проверим и пост относящийся к комментарию
self.assertEqual(first_object.get().post.text, self.post.text)
def test_cache(self):
"""Проверка кеширования главной страницы"""
# Логика теста: при удалении записи из базы, она остаётся
# в response.content главной страницы до тех пор,
# пока кэш не будет очищен принудительно.
response = self.authorized_client.get(reverse('posts:index'))
count_before_del = Post.objects.count()
instance = Post.objects.get(text='Тестовая пост')
# Удалим едиственный пост
instance.delete()
first_obj = response.content
# Удалим принудительно кеш, не ждать же нам 20 секунд)
cache.clear()
response = self.authorized_client.get(reverse('posts:index'))
second_obj = response.content
count_after_del = Post.objects.count()
# Проверим что content до удаления и после кеширования разный
self.assertNotEqual(first_obj, second_obj)
# Кол-во постов = 0
self.assertEqual(count_after_del, count_before_del - 1)
def test_new_post_in_feed_who_followed(self):
"""Новая запись пользователя появляется в ленте тех, кто на него
подписан"""
# Подпишемся на tester'a у него 1 пост
Follow.objects.create(
user=self.user,
author=self.post.author
)
response = self.authorized_client.get(reverse('posts:follow_index'))
first_object = response.context['page_obj'][0]
# Проверим пост и пользователя что он есть в ленте
self.assertEqual(first_object.author.username, 'tester')
self.assertEqual(first_object.text, 'Тестовая пост')
self.assertEqual(first_object.image, 'posts/small.gif')
def test_new_post_not_appear_in_feed_who_unfollowed(self):
"""Новая запись пользователя не появляется в ленте тех, кто
не подписан на него"""
# Создадим второго авторизованного не подписанного на автора
self.user = User.objects.create_user(username='auth_without')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
response = self.authorized_client.get(reverse('posts:follow_index'))
# Т.к он ни на кого не подписан в ленте у него 0 постов
# QuerySet будет пустым без единого объекта
# object_list он вернет список для того чтобы подсчитать кол-во
second_object = response.context['page_obj']
self.assertEqual(second_object.object_list.count(), 0)
def test_auth_user_can_follow(self):
"""Авторизованный пользователь может подписываться на других
пользователей"""
count = Follow.objects.count()
# Создадим подписку
Follow.objects.create(
user=self.user,
author=self.post.author
)
url = f'/profile/{self.user}/follow/'
# Проверим поочереди для авторизованного и для гостя
response = self.authorized_client.get(url)
# Т.к там идет редирект сразу код проверки 302
self.assertEqual(response.status_code, HTTPStatus.FOUND)
# Проверим что кол-во подписок увеличилось
self.assertEqual(Follow.objects.count(), count + 1)
# Все поля соотвествуют
self.assertTrue(
Follow.objects.filter(
user=self.user,
author=self.post.author
).exists()
)
self.guest_client = Client()
response = self.guest_client.get(url)
# Проверим что не авторизованный пользователь перейдет
# на страницу входа
self.assertEqual(response.url, f'/auth/login/?next={url}')
def test_auth_user_can_unfollow(self):
"""Авторизованный пользователь может удалять подписки"""
# Создадим подписку
Follow.objects.create(
user=self.user,
author=self.post.author
)
count = Follow.objects.count()
# Удалим подписку
Follow.objects.filter(
user=self.user,
author=self.post.author
).delete()
self.assertEqual(Follow.objects.count(), count - 1)
url = f'/profile/{self.user}/unfollow/'
# Проверим поочереди для авторизованного и для гостя
response = self.authorized_client.get(url)
# Т.к там идет редирект сразу код проверки 302
self.assertEqual(response.status_code, HTTPStatus.FOUND)
self.guest_client = Client()
response = self.guest_client.get(url)
# Проверим что не авторизованный пользователь перейдет
# на страницу входа
self.assertEqual(response.url, f'/auth/login/?next={url}')
class PaginatorViewsTest(TestCase):
# Здесь создаются фикстуры: клиент и 13 тестовых записей.
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='tester')
cls.group = Group.objects.create(
title='Тестовая группа',
slug='test-slug',
description='Тестовое описание',
)
# Создадим 13 записей в БД
for i in range(13):
cls.post = Post.objects.create(
author=cls.user,
text=f'Тестовая пост {i}',
# Привяжем созданную выше группу к посту
group=cls.group,
)
def setUp(self):
# Создаем авторизованный клиент
self.user = User.objects.create_user(username='auth')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_first_page_contains_ten_records(self):
urls = (
reverse('posts:index'),
reverse('posts:group_posts', kwargs={'slug': 'test-slug'}),
reverse('posts:profile', kwargs={'username': 'tester'}),
)
for url in urls:
response = self.authorized_client.get(url)
# Проверка: количество постов на первой странице равно 10.
self.assertEqual(len(response.context['page_obj']), 10)
def test_second_page_contains_three_records(self):
# Проверка: на второй странице должно быть три поста.
urls = (
reverse('posts:index'),
reverse('posts:group_posts', kwargs={'slug': 'test-slug'}),
reverse('posts:profile', kwargs={'username': 'tester'}),
)
for url in urls:
response = self.authorized_client.get(url + '?page=3')
self.assertEqual(len(response.context['page_obj']), 3)
| 42.439276
| 79
| 0.631089
|
import shutil
import tempfile
from django.core.cache import cache
from http import HTTPStatus
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth import get_user_model
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from ..models import Group, Post, Comment, Follow
from django import forms
User = get_user_model()
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class TaskPagesTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='tester')
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.group = Group.objects.create(
title='Тестовая группа',
slug='test-slug',
description='Тестовое описание',
)
cls.post = Post.objects.create(
author=cls.user,
text='Тестовая пост',
group=cls.group,
image=uploaded,
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def setUp(self):
self.user = User.objects.create_user(username='auth')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_pages_uses_correct_template(self):
self.authorized_client.force_login(self.post.author)
templates_pages_names = {
reverse('posts:index'):
'posts/index.html',
reverse('posts:group_posts', kwargs={'slug': 'test-slug'}):
'posts/group_list.html',
reverse('posts:profile', kwargs={'username': 'tester'}):
'posts/profile.html',
reverse('posts:post_detail', kwargs={'post_id': 1}):
'posts/post_detail.html',
reverse('posts:post_edit', kwargs={'post_id': 1}):
'posts/create_post.html',
reverse('posts:post_create'):
'posts/create_post.html',
}
for reverse_name, template in templates_pages_names.items():
with self.subTest(reverse_name=reverse_name):
response = self.authorized_client.get(reverse_name)
self.assertTemplateUsed(response, template)
def test_index_detail_pages_show_correct_context(self):
response = (self.authorized_client.get(reverse('posts:index')))
first_object = response.context['page_obj'][0]
self.assertEqual(first_object.author, self.post.author)
self.assertEqual(first_object.group, self.group)
self.assertEqual(first_object.text, 'Тестовая пост')
self.assertEqual(first_object.image, 'posts/small.gif')
def test_group_list_detail_pages_show_correct_context(self):
response = (self.authorized_client.
get(reverse('posts:group_posts',
kwargs={'slug': 'test-slug'}))
)
first_object = response.context['group']
second_object = response.context['page_obj'][0]
task_group_0 = first_object.title
self.assertEqual(task_group_0, 'Тестовая группа')
self.assertEqual(second_object.image, 'posts/small.gif')
def test_profile_detail_show_correct_context(self):
response = (self.authorized_client.
get(reverse('posts:profile',
kwargs={'username': 'tester'}))
)
first_object = response.context['client']
second_object = response.context['page_obj'][0]
task_client_0 = first_object.username
task_post_0 = second_object.text
self.assertEqual(task_client_0, 'tester')
self.assertEqual(task_post_0, 'Тестовая пост')
self.assertEqual(second_object.image, 'posts/small.gif')
def test_post_detail_show_correct_context(self):
response = (self.authorized_client.
get(reverse('posts:post_detail',
kwargs={'post_id': 1}))
)
first_object = response.context['count']
second_object = response.context['post']
task_count_0 = first_object
task_post_0 = second_object.text
self.assertEqual(task_count_0, 1)
self.assertEqual(task_post_0, 'Тестовая пост')
self.assertEqual(second_object.image, 'posts/small.gif')
def test_edit_post_show_correct_context(self):
self.authorized_client.force_login(self.post.author)
response = (self.authorized_client.
get(reverse('posts:post_edit',
kwargs={'post_id': 1}))
)
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context.get('form').fields.get(value)
self.assertIsInstance(form_field, expected)
def test_post_create_show_correct_context(self):
response = self.authorized_client.get(reverse('posts:post_create'))
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context.get('form').fields.get(value)
self.assertIsInstance(form_field, expected)
def test_post_additional_verification_create(self):
group = Group.objects.create(
title='Заголовок для 2 тестовой группы',
slug='test_slug2'
)
Post.objects.create(
author=self.user,
text='Test post 2',
group=group,
)
response = self.authorized_client.get(
reverse('posts:group_posts', kwargs={'slug': group.slug}))
count = response.context["page_obj"].object_list
self.assertEqual(len(count), Post.objects.count() - 1)
self.assertEqual(response.status_code, 200)
def test_comment_succesful(self):
Comment.objects.create(
post=self.post,
author=self.user,
text='Test comment'
)
response = (self.authorized_client.
get(reverse('posts:post_detail',
kwargs={'post_id': 1}))
)
first_object = response.context['comments']
self.assertEqual(first_object.get().text, 'Test comment')
self.assertEqual(first_object.get().author, self.user)
self.assertEqual(first_object.get().post.text, self.post.text)
def test_cache(self):
response = self.authorized_client.get(reverse('posts:index'))
count_before_del = Post.objects.count()
instance = Post.objects.get(text='Тестовая пост')
instance.delete()
first_obj = response.content
cache.clear()
response = self.authorized_client.get(reverse('posts:index'))
second_obj = response.content
count_after_del = Post.objects.count()
self.assertNotEqual(first_obj, second_obj)
self.assertEqual(count_after_del, count_before_del - 1)
def test_new_post_in_feed_who_followed(self):
Follow.objects.create(
user=self.user,
author=self.post.author
)
response = self.authorized_client.get(reverse('posts:follow_index'))
first_object = response.context['page_obj'][0]
# Проверим пост и пользователя что он есть в ленте
self.assertEqual(first_object.author.username, 'tester')
self.assertEqual(first_object.text, 'Тестовая пост')
self.assertEqual(first_object.image, 'posts/small.gif')
def test_new_post_not_appear_in_feed_who_unfollowed(self):
# Создадим второго авторизованного не подписанного на автора
self.user = User.objects.create_user(username='auth_without')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
response = self.authorized_client.get(reverse('posts:follow_index'))
# Т.к он ни на кого не подписан в ленте у него 0 постов
# QuerySet будет пустым без единого объекта
# object_list он вернет список для того чтобы подсчитать кол-во
second_object = response.context['page_obj']
self.assertEqual(second_object.object_list.count(), 0)
def test_auth_user_can_follow(self):
count = Follow.objects.count()
# Создадим подписку
Follow.objects.create(
user=self.user,
author=self.post.author
)
url = f'/profile/{self.user}/follow/'
# Проверим поочереди для авторизованного и для гостя
response = self.authorized_client.get(url)
# Т.к там идет редирект сразу код проверки 302
self.assertEqual(response.status_code, HTTPStatus.FOUND)
# Проверим что кол-во подписок увеличилось
self.assertEqual(Follow.objects.count(), count + 1)
# Все поля соотвествуют
self.assertTrue(
Follow.objects.filter(
user=self.user,
author=self.post.author
).exists()
)
self.guest_client = Client()
response = self.guest_client.get(url)
# Проверим что не авторизованный пользователь перейдет
# на страницу входа
self.assertEqual(response.url, f'/auth/login/?next={url}')
def test_auth_user_can_unfollow(self):
# Создадим подписку
Follow.objects.create(
user=self.user,
author=self.post.author
)
count = Follow.objects.count()
# Удалим подписку
Follow.objects.filter(
user=self.user,
author=self.post.author
).delete()
self.assertEqual(Follow.objects.count(), count - 1)
url = f'/profile/{self.user}/unfollow/'
# Проверим поочереди для авторизованного и для гостя
response = self.authorized_client.get(url)
# Т.к там идет редирект сразу код проверки 302
self.assertEqual(response.status_code, HTTPStatus.FOUND)
self.guest_client = Client()
response = self.guest_client.get(url)
# Проверим что не авторизованный пользователь перейдет
# на страницу входа
self.assertEqual(response.url, f'/auth/login/?next={url}')
class PaginatorViewsTest(TestCase):
# Здесь создаются фикстуры: клиент и 13 тестовых записей.
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='tester')
cls.group = Group.objects.create(
title='Тестовая группа',
slug='test-slug',
description='Тестовое описание',
)
# Создадим 13 записей в БД
for i in range(13):
cls.post = Post.objects.create(
author=cls.user,
text=f'Тестовая пост {i}',
# Привяжем созданную выше группу к посту
group=cls.group,
)
def setUp(self):
# Создаем авторизованный клиент
self.user = User.objects.create_user(username='auth')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_first_page_contains_ten_records(self):
urls = (
reverse('posts:index'),
reverse('posts:group_posts', kwargs={'slug': 'test-slug'}),
reverse('posts:profile', kwargs={'username': 'tester'}),
)
for url in urls:
response = self.authorized_client.get(url)
# Проверка: количество постов на первой странице равно 10.
self.assertEqual(len(response.context['page_obj']), 10)
def test_second_page_contains_three_records(self):
# Проверка: на второй странице должно быть три поста.
urls = (
reverse('posts:index'),
reverse('posts:group_posts', kwargs={'slug': 'test-slug'}),
reverse('posts:profile', kwargs={'username': 'tester'}),
)
for url in urls:
response = self.authorized_client.get(url + '?page=3')
self.assertEqual(len(response.context['page_obj']), 3)
| true
| true
|
f712dd6d2f6273bf1a9e18367b21be5b5e836958
| 331
|
py
|
Python
|
wiske/util/logger.py
|
jthistle/wiskesynth
|
b2132c7edf79c9f52b0a67779ae77a8d427ae712
|
[
"MIT"
] | null | null | null |
wiske/util/logger.py
|
jthistle/wiskesynth
|
b2132c7edf79c9f52b0a67779ae77a8d427ae712
|
[
"MIT"
] | null | null | null |
wiske/util/logger.py
|
jthistle/wiskesynth
|
b2132c7edf79c9f52b0a67779ae77a8d427ae712
|
[
"MIT"
] | null | null | null |
import logging
def get_logger():
logger = logging.getLogger("debug")
hdlr = logging.FileHandler("debug.log")
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
logger = get_logger()
| 23.642857
| 74
| 0.703927
|
import logging
def get_logger():
logger = logging.getLogger("debug")
hdlr = logging.FileHandler("debug.log")
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
logger = get_logger()
| true
| true
|
f712dd782c3efe97099c83a527417452c37a4862
| 3,562
|
py
|
Python
|
docs/source/conf.py
|
jbueltemeier/pystiche_papers
|
d162c2db87251f9e3280fea35cf149d030dc335b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
jbueltemeier/pystiche_papers
|
d162c2db87251f9e3280fea35cf149d030dc335b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
jbueltemeier/pystiche_papers
|
d162c2db87251f9e3280fea35cf149d030dc335b
|
[
"BSD-3-Clause"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Imports ---------------------------------------------------------------------------
import os
from datetime import datetime
from distutils.util import strtobool
from importlib_metadata import metadata
from os import path
# -- Run config ------------------------------------------------------------------------
def get_bool_env_var(name, default=False):
try:
return bool(strtobool(os.environ[name]))
except KeyError:
return default
run_by_github_actions = get_bool_env_var("GITHUB_ACTIONS")
run_by_rtd = get_bool_env_var("READTHEDOCS")
run_by_ci = run_by_github_actions or run_by_rtd or get_bool_env_var("CI")
# -- Path setup ------------------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
PROJECT_ROOT = path.abspath(path.join(path.abspath(path.dirname(__file__)), "..", ".."))
# -- Project information ---------------------------------------------------------------
meta = metadata("pystiche_papers")
project = meta["name"]
author = meta["author"]
copyright = f"{datetime.now().year}, {author}"
release = meta["version"]
version = ".".join(release.split(".")[:2])
# -- General configuration -------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx_autodoc_typehints",
"sphinxcontrib.bibtex",
"sphinx.ext.doctest",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
# exclude_patterns = []
# -- Config for intersphinx -----------------------------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"torch": ("https://pytorch.org/docs/stable/", None),
"torchvision": ("https://pytorch.org/docs/stable/", None),
"pystiche": ("https://pystiche.readthedocs.io/en/stable/", None),
}
# -- Options for Latex / MathJax ------------------------------------------------------
with open("custom_cmds.tex", "r") as fh:
custom_cmds = fh.read()
latex_elements = {"preamble": custom_cmds}
mathjax_inline = [r"\(" + custom_cmds, r"\)"]
mathjax_display = [r"\[" + custom_cmds, r"\]"]
# -- Options for HTML output -----------------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 32.981481
| 88
| 0.613419
|
import os
from datetime import datetime
from distutils.util import strtobool
from importlib_metadata import metadata
from os import path
def get_bool_env_var(name, default=False):
try:
return bool(strtobool(os.environ[name]))
except KeyError:
return default
run_by_github_actions = get_bool_env_var("GITHUB_ACTIONS")
run_by_rtd = get_bool_env_var("READTHEDOCS")
run_by_ci = run_by_github_actions or run_by_rtd or get_bool_env_var("CI")
PROJECT_ROOT = path.abspath(path.join(path.abspath(path.dirname(__file__)), "..", ".."))
meta = metadata("pystiche_papers")
project = meta["name"]
author = meta["author"]
copyright = f"{datetime.now().year}, {author}"
release = meta["version"]
version = ".".join(release.split(".")[:2])
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx_autodoc_typehints",
"sphinxcontrib.bibtex",
"sphinx.ext.doctest",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"torch": ("https://pytorch.org/docs/stable/", None),
"torchvision": ("https://pytorch.org/docs/stable/", None),
"pystiche": ("https://pystiche.readthedocs.io/en/stable/", None),
}
with open("custom_cmds.tex", "r") as fh:
custom_cmds = fh.read()
latex_elements = {"preamble": custom_cmds}
mathjax_inline = [r"\(" + custom_cmds, r"\)"]
mathjax_display = [r"\[" + custom_cmds, r"\]"]
html_theme = "sphinx_rtd_theme"
| true
| true
|
f712ddaddf5fa16ffdec898ed9bf189ea59a2c2f
| 30,939
|
py
|
Python
|
kaku_events.py
|
bear/kaku
|
a86ad97eac2a9069d3b8b2897567595894f0beee
|
[
"MIT"
] | 26
|
2015-10-09T14:46:35.000Z
|
2022-01-11T21:28:50.000Z
|
kaku_events.py
|
bear/kaku
|
a86ad97eac2a9069d3b8b2897567595894f0beee
|
[
"MIT"
] | 4
|
2016-05-10T09:28:50.000Z
|
2019-12-03T02:17:18.000Z
|
kaku_events.py
|
bear/kaku
|
a86ad97eac2a9069d3b8b2897567595894f0beee
|
[
"MIT"
] | 5
|
2016-06-21T05:43:36.000Z
|
2019-11-18T23:25:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2013-2016 by Mike Taylor
:license: CC0 1.0 Universal, see LICENSE for more details.
"""
import os
import json
import uuid
import types
import errno
import shutil
import logging
import datetime
import argparse
import pytz
import redis
import jinja2
import ronkyuu
import requests
import markdown2
from bs4 import BeautifulSoup
from logging.handlers import RotatingFileHandler
from dateutil.parser import parse
from bearlib.config import Config, findConfigFile
from bearlib.tools import normalizeFilename
try:
# python 3
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger(__name__)
def getTimestamp():
utcdate = datetime.datetime.utcnow()
tzLocal = pytz.timezone('America/New_York')
return tzLocal.localize(utcdate, is_dst=None)
def mkpath(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def createPath(path, log):
result = True
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
log.exception(exc)
if os.path.isdir(path):
pass
else:
result = False
return result
def escXML(text, escape_quotes=False):
if isinstance(text, types.UnicodeType):
s = list(text)
else:
if isinstance(text, types.IntType):
s = str(text)
else:
s = text
s = list(unicode(s, 'utf-8', 'ignore'))
cc = 0
matches = ('&', '<', '"', '>')
for c in s:
if c in matches:
if c == '&':
s[cc] = u'&'
elif c == '<':
s[cc] = u'<'
elif c == '>':
s[cc] = u'>'
elif escape_quotes:
s[cc] = u'"'
cc += 1
return ''.join(s)
def readMD(targetFile):
result = {}
content = []
header = True
mdFile = '%s.md' % targetFile
for line in open(mdFile, 'r').readlines():
item = line.decode('utf-8', 'xmlcharrefreplace')
if header and len(item.strip()) == 0:
header = False
if header and ':' in item:
tag, value = item.split(':', 1)
result[tag.lower()] = value.strip()
else:
content.append(item)
result['modified'] = os.path.getmtime(mdFile)
result['path'] = os.path.dirname(mdFile)
result['content'] = u''.join(content[1:])
if 'created' not in result and 'date' in result:
result['created'] = result['date']
if 'published' not in result and 'created' in result:
result['published'] = result['created']
return result
def writeMD(targetFile, data):
page = mdPost % data
with open('%s.md' % targetFile, 'w+') as h:
h.write(page.encode('utf-8'))
def loadMetadata(targetFile):
mdData = readMD(targetFile)
if os.path.exists('%s.json' % targetFile):
with open('%s.json' % targetFile, 'r') as h:
result = json.load(h)
if 'published' not in result:
result['published'] = result['created']
if 'route' not in result:
result['route'] = u'%(year)s/%(doy)s/%(slug)s' % result
if 'url' not in result:
result['url'] = '%s%s.html' % (cfg.baseroute, result['route'])
for key in ('created', 'published', 'updated', 'deleted'):
if key in result:
result[key] = parse(result[key])
else:
for key in ('created', 'published'):
mdData[key] = parse(mdData[key])
created = mdData['created']
mdData['key'] = created.strftime('%Y%m%d%H%M%S')
mdData['year'] = created.strftime('%Y')
mdData['doy'] = created.strftime('%j')
mdData['route'] = u'%(year)s/%(doy)s/%(slug)s' % mdData
mdData['url'] = '%s%s.html' % (cfg.baseroute, mdData['route'])
result = {}
for key in mdData:
result[key] = mdData[key]
result['modified'] = mdData['modified']
result['content'] = mdData['content']
return result
def saveMetadata(targetFile, data):
if 'created' not in data:
data['created'] = data['date']
if 'published' not in data:
data['published'] = data['created']
for key in ('created', 'published', 'updated', 'deleted'):
if key in data:
data[key] = data[key].strftime('%Y-%m-%d %H:%M:%S')
with open('%s.json' % targetFile, 'w+') as h:
h.write(json.dumps(data, indent=2))
def loadOurWebmentions(targetFile):
result = {}
if os.path.exists('%s.mentions' % targetFile):
with open('%s.mentions' % targetFile, 'r') as h:
result = json.load(h)
return result
def saveOurMentions(targetFile, mentions):
logger.info('saving webmentions for %s' % targetFile)
with open('%s.mentions' % targetFile, 'w+') as h:
h.write(json.dumps(mentions, indent=2))
def scanOurMentions(sourceURL, mentions):
# loop thru to see if this mention is already present
found = None
for key in mentions:
item = mentions[key]['mention']
url = urlparse(item['sourceURL'])
if url.netloc == sourceURL.netloc and url.path == sourceURL.path:
found = key
break
logger.info('scanOurMentions result [%s]' % found)
return found
def loadOutboundWebmentions(targetFile):
result = {}
if os.path.exists('%s.outboundmentions' % targetFile):
with open('%s.outboundmentions' % targetFile, 'r') as h:
result = json.load(h)
return result
def saveOutboundWebmentions(targetFile, mentions):
logger.info('saving outbound webmentions from %s' % targetFile)
with open('%s.outboundmentions' % targetFile, 'w+') as h:
h.write(json.dumps(mentions, indent=2))
def checkOutboundWebmentions(sourceURL, html, targetFile, update=False):
logger.info('checking for outbound webmentions [%s]' % sourceURL)
try:
cached = loadOutboundWebmentions(targetFile)
found = ronkyuu.findMentions(sourceURL, content=html)
mentions = {}
# loop thru webmentions found in our post and
# check if they are new/updated or already seen
for href in found['refs']:
if sourceURL != href:
logger.info(href)
key = 'webmention::%s::%s' % (sourceURL, href)
keySeen = db.exists(key)
if keySeen:
if update:
keySeen = False
s = 'update forced'
else:
s = 'already processed'
else:
s = 'new mention'
logger.info('\t%s [%s]' % (s, key))
mentions[key] = { 'key': key,
'href': href,
'keySeen': keySeen,
'removed': False
}
# loop thru found webmentions and check against cache for any removed
for key in cached:
if key not in mentions:
mentions[key] = cached[key]
mentions[key]['removed'] = True
if 'keySeen' not in mentions[key]:
mentions[key]['keySeen'] = False
removed = []
for key in mentions:
mention = mentions[key]
logger.info('seen: %(keySeen)s removed: %(removed)s [%(key)s]' % mention)
# send webmentions for new/updated or removed
if mention['removed'] or not mention['keySeen']:
if mention['removed']:
removed.append(key)
href = mention['href']
wmStatus, wmUrl, debug = ronkyuu.discoverEndpoint(href, test_urls=False, debug=True)
logger.info('webmention endpoint discovery: %s [%s]' % (wmStatus, wmUrl))
if len(debug) > 0:
logger.info('\n\tdebug: '.join(debug))
if wmUrl is not None and wmStatus == 200:
logger.info('\tfound webmention endpoint %s for %s' % (wmUrl, href))
resp, debug = ronkyuu.sendWebmention(sourceURL, href, wmUrl, debug=True)
if len(debug) > 0:
logger.info('\n\tdebug: '.join(debug))
if resp.status_code == requests.codes.ok:
if key not in cached:
cached[key] = { 'key': key,
'href': href,
'wmUrl': wmUrl,
'status': resp.status_code
}
if len(resp.history) == 0:
db.set(key, resp.status_code)
logger.info('\twebmention sent successfully')
else:
logger.info('\twebmention POST was redirected')
else:
logger.info('\twebmention send returned a status code of %s' % resp.status_code)
for key in removed:
del cached[key]
db.delete(key)
saveOutboundWebmentions(targetFile, cached)
except:
logger.exception('exception during checkOutboundWebmentions')
def postUpdate(targetFile, action=None):
"""Generate data for targeted file.
All mentions to the post are checked for updates.
The post is also scanned for any outbound Webmentions.
targetFile: path and filename without extension.
"""
pageEnv = {}
templateLoader = jinja2.FileSystemLoader(searchpath=cfg.paths.templates)
templates = jinja2.Environment(loader=templateLoader)
postTemplate = templates.get_template(cfg.templates['post'])
postPageTemplate = templates.get_template(cfg.templates['postPage'])
post = loadMetadata(targetFile)
ourMentions = loadOurWebmentions(targetFile)
# bring over site config items
for s in ('title',):
pageEnv[s] = cfg[s]
if action == 'update':
post['updated'] = getTimestamp()
if os.path.exists('%s.deleted' % targetFile):
logger.info('post [%s] is marked as deleted' % targetFile)
if action == 'delete' and 'deleted' not in post:
post['deleted'] = getTimestamp()
post['html'] = '<p>This article has been deleted.</p>'
pageEnv['title'] = 'This article has been deleted'
pageEnv['meta'] = '<meta http-equiv="Status" content="410 GONE" />'
pageEnv['mentions'] = []
else:
logger.info('updating post [%s]' % targetFile)
post['html'] = md.convert(post['content'])
if 'deleted' in post:
del post['deleted']
removed = []
for key in ourMentions:
m = ourMentions[key]['mention']
r = requests.get(m['sourceURL'], verify=True)
if r.status_code == 410:
logger.info('a mention no longer exists - removing [%s]' % key)
removed.append(key)
else:
if 'charset' in r.headers.get('content-type', ''):
content = r.text
else:
content = r.content
soup = BeautifulSoup(content, 'html5lib')
status = None
for meta in soup.findAll('meta', attrs={'http-equiv': lambda x: x and x.lower() == 'status'}):
try:
status = int(meta['content'].split(' ')[0])
except:
pass
if status == 410:
logger.info('a mention no longer exists (via http-equiv) - removing [%s]' % key)
removed.append(key)
for key in removed:
del ourMentions[key]
mentions = []
for key in ourMentions:
m = ourMentions[key]['mention']
# convert string dates into datetime's for template processing
if 'postDate' in m:
m['postDate'] = parse(m['postDate'])
mentions.append(m)
pageEnv['title'] = post['title']
pageEnv['mentions'] = mentions
pageEnv['meta'] = metaEmbed % post
post['xml'] = escXML(post['html'])
pageEnv['post'] = post
postHtml = postTemplate.render(pageEnv)
postPage = postPageTemplate.render(pageEnv)
with open('%s.html' % targetFile, 'w+') as h:
h.write(postHtml.encode('utf-8'))
htmlDir = os.path.join(cfg.paths.output, post['year'], post['doy'])
if not os.path.exists(htmlDir):
mkpath(htmlDir)
with open(os.path.join(htmlDir, '%s.html' % post['slug']), 'w+') as h:
h.write(postPage.encode('utf-8'))
saveMetadata(targetFile, post)
checkOutboundWebmentions('%s%s' % (cfg.baseurl, post['url']), postHtml, targetFile, update=True)
def checkPost(targetFile, eventData):
"""Check if the post's markdown file is present and create it if not.
targetFile: path and filename without extension.
eventData: Micropub data to create the post from.
"""
if not os.path.exists('%s.md' % targetFile):
if 'micropub' in eventData:
micropub = eventData['micropub']
if 'content' in micropub:
content = micropub['content']
if 'html' in micropub and len(micropub['html']) > 0:
content.append(micropub['html'])
if 'category' in micropub:
categories = ','.join(micropub['category'])
else:
categories = ''
if 'photo' in micropub:
for url, alt in micropub['photo']:
if len(alt) > 0:
t = ' alt="%s"' % alt
else:
t = ''
content.append('<img src="%s"%s></img>' % (url, t))
logger.info(micropub.keys())
if 'photo_files' in micropub:
for filename in micropub['photo_files']:
photoSrc = os.path.join(cfg.paths.uploads, filename)
photoTgt = os.path.join(cfg.paths.output, 'images', filename)
logger.info('photo file: %s %s %s' % (filename, photoSrc, photoTgt))
shutil.copyfile(photoSrc, photoTgt)
url = '%s%s%s/%s' % (cfg.baseurl, cfg.baseroute, 'images', filename)
content.append('<img src="%s"></img>' % url)
# location = "geo:40.0958,-74.90736;u=92"
# in-reply-to = "https://bear.im/bearlog/2016/123/testing-delete.html"
# bookmark-of = "https://bear.im"
# category = [u'code', u'indieweb']
# html = [u'<p>something</p><p class="">line2</p><p class="">line3<br></p><p class=""><br></p>']
logger.info('content: %d %s' % (len(content), content))
data = { 'created': eventData['timestamp'],
'published': eventData['timestamp'],
'slug': eventData['slug'],
'author': 'bear',
'tags': categories,
'content': '\n'.join(content),
'title': eventData['title'],
'summary': eventData['title'],
'year': eventData['year'],
'doy': eventData['doy'],
'uuid': str(uuid.uuid4()),
'payload': eventData['micropub'],
}
writeMD(targetFile, data)
else:
logger.error('checkPost for [%s] - no Micropub data included' % targetFile)
def mentionDelete(mention):
logger.info('mention delete of [%s] within [%s]' % (mention['targetURL'], mention['sourceURL']))
# update() handles removal of out of date mentions
targetURL = urlparse(mention['targetURL'])
targetRoute = targetURL.path.replace(cfg.baseroute, '')
postUpdate(os.path.join(cfg.paths.content, targetRoute))
def mentionUpdate(mention):
logger.info('mention update of [%s] within [%s]' % (mention['targetURL'], mention['sourceURL']))
targetPath = urlparse(mention['targetURL'].strip()).path
pathItems = targetPath.split('.')
logger.info('[%s] %s' % (targetPath, pathItems))
if pathItems[-1].lower() == 'html':
targetPath = '.'.join(pathItems[:-1])
eventDate = getTimestamp()
sourceURL = urlparse(mention['sourceURL'])
targetRoute = targetPath.replace(cfg.baseroute, '')
targetFile = os.path.join(cfg.paths.content, targetRoute)
logger.info('targetFile [%s]' % targetFile)
ourMentions = loadOurWebmentions(targetFile)
found = scanOurMentions(sourceURL, ourMentions)
if found is not None:
logger.info('updated mention of [%s] within [%s]' % (found, mention['targetURL']))
ourMentions[found]['updated'] = eventDate.strftime('%Y-%m-%dT%H:%M:%S')
ourMentions[found]['mention'] = mention
else:
key = 'mention::%s::%s' % (sourceURL.netloc, sourceURL.path)
ourMentions[key] = { 'created': mention['postDate'],
'updated': None,
'mention': mention,
}
logger.info('added mention of [%s] within [%s]' % (key, mention['targetURL']))
saveOurMentions(targetFile, ourMentions)
postUpdate(targetFile)
def indexUpdate():
"""Scan all posts and generate the index page.
"""
frontpage = {}
logger.info('building index page')
for path, dirlist, filelist in os.walk(cfg.paths.content):
if len(filelist) > 0:
for item in filelist:
filename, ext = os.path.splitext(item)
if ext in ('.json',) and '.mentions.json' not in item:
if os.path.exists(os.path.join(path, '%s.deleted' % filename)):
logger.info('skipping deleted post [%s]' % filename)
else:
page = loadMetadata(os.path.join(path, filename))
frontpage[page['key']] = page
templateLoader = jinja2.FileSystemLoader(searchpath=cfg.paths.templates)
templates = jinja2.Environment(loader=templateLoader)
indexTemplate = templates.get_template(cfg.templates['index'])
pageEnv = { 'posts': [],
'title': cfg.title,
}
frontpageKeys = frontpage.keys()
frontpageKeys.sort(reverse=True)
for key in frontpageKeys[:cfg.index_articles]:
pageEnv['posts'].append(frontpage[key])
page = indexTemplate.render(pageEnv)
indexDir = os.path.join(cfg.paths.output)
if not os.path.exists(indexDir):
mkpath(indexDir)
with open(os.path.join(indexDir, 'index.html'), 'w+') as h:
h.write(page.encode('utf-8'))
def isUpdated(path, filename, force=False):
mFile = os.path.join(path, '%s.md' % filename)
jFile = os.path.join(path, '%s.json' % filename)
if os.path.exists(os.path.join(path, '%s.deleted' % filename)):
return 'delete'
if os.path.exists(jFile):
mTime = os.path.getmtime(mFile)
jTime = os.path.getmtime(jFile)
if force or mTime > jTime:
return 'update'
else:
return 'unchanged'
else:
return 'create'
def gather(filepath, filename=None, force=False):
logger.info('gather [%s] [%s] [%s]' % (filepath, filename, force))
if filename is None:
if filepath is None:
logger.error('A specific file or a path to walk must be specified')
else:
for path, dirlist, filelist in os.walk(filepath):
if len(filelist) > 0:
for item in filelist:
filename, ext = os.path.splitext(item)
if ext in ('.md',):
state = isUpdated(path, filename, force)
key = 'kaku-event::%s::%s::%s' % ('post', state, str(uuid.uuid4()))
data = { 'type': 'post',
'action': state,
'data': { 'path': path,
'file': filename
},
'key': key
}
db.set(key, json.dumps(data))
db.publish(cfg.events, key)
else:
s = normalizeFilename(filename)
if not os.path.exists(s):
s = normalizeFilename(os.path.join(filepath, filename))
logger.info('checking [%s]' % s)
if os.path.exists(s):
path = os.path.dirname(s)
filename, ext = os.path.splitext(s)
if ext in ('.md',):
state = isUpdated(path, filename, force)
key = 'kaku-event::%s::%s::%s' % ('post', state, str(uuid.uuid4()))
data = { 'type': 'post',
'action': state,
'data': { 'path': path,
'file': filename
},
'key': key
}
db.set(key, json.dumps(data))
db.publish(cfg.events, key)
def handlePost(eventAction, eventData):
"""Process the Kaku event for Posts.
eventAction: create, update, delete, undelete or unchanged
eventData: a dict that contains information about the post
Micropub generated post events will have eventData keys:
slug, title, location, timestamp, micropub
Post events generated by the gather daemon will have keys:
path, file
"""
if eventAction == 'create':
if 'path' in eventData:
postDir = eventData['path']
targetFile = eventData['file']
else:
timestamp = parse(eventData['timestamp'])
eventData['year'] = str(timestamp.year)
eventData['doy'] = timestamp.strftime('%j')
slug = eventData['slug']
postDir = os.path.join(cfg.paths.content, eventData['year'], eventData['doy'])
targetFile = os.path.join(postDir, slug)
if not os.path.exists(postDir):
mkpath(postDir)
checkPost(targetFile, eventData)
postUpdate(targetFile, eventAction)
elif eventAction in ('update', 'delete', 'add'):
if 'file' in eventData:
targetFile = eventData['file']
else:
targetURL = urlparse(eventData['url'])
targetRoute = targetURL.path.replace(cfg.baseroute, '')
targetFile = os.path.join(cfg.paths.content, targetRoute)
changed = False
if eventAction == 'delete':
with open('%s.deleted' % targetFile, 'a'):
os.utime('%s.deleted' % targetFile, None)
changed = True
elif eventAction == 'update':
actionData = eventData['micropub']
actionKey = eventData['actionkey']
data = loadMetadata(targetFile)
for key in ('slug', 'tags', 'content', 'html'):
logger.info(' -- %s: %s' % (key, data[key]))
logger.info('update (%s) %s' % (actionKey, json.dumps(actionData)))
if actionKey == 'replace':
if 'content' in actionData:
data['content'] = '\n'.join(actionData['content'])
changed = True
if 'category' in actionData:
tags = data['tags'].split(',')
for tag in actionData['category']:
if tag not in tags:
tags.append(tag)
changed = True
data['tags'] = ','.join(tags)
elif actionKey == 'add':
if 'content' in actionData:
data['content'] += '\n'.join(actionData['content'])
changed = True
if 'category' in actionData:
tags = data['tags'].split(',')
for tag in actionData['category']:
if tag not in tags:
tags.append(tag)
changed = True
data['tags'] = ','.join(tags)
elif actionKey == 'delete':
if 'category' in actionData:
if type(actionData) is dict:
tags = data['tags'].split(',')
for tag in actionData['category']:
if tag in tags:
tags.remove(tag)
changed = True
data['tags'] = ','.join(tags)
else:
data['tags'] = ''
changed = True
for key in ('slug', 'tags', 'content', 'html'):
logger.info(' -- %s: %s' % (key, data[key]))
if changed:
writeMD(targetFile, data)
saveMetadata(targetFile, data)
if changed:
postUpdate(targetFile, eventAction)
elif eventAction == 'undelete':
logger.info(eventData.keys())
if 'url' in eventData:
targetURL = urlparse(eventData['url'])
targetRoute = targetURL.path.replace(cfg.baseroute, '')
targetFile = os.path.join(cfg.paths.content, targetRoute)
logger.info('checking delete marker %s.deleted' % targetFile)
if os.path.exists('%s.deleted' % targetFile):
logger.info('removing delete marker')
os.remove('%s.deleted' % targetFile)
postUpdate(targetFile, eventAction)
indexUpdate()
def handleMentions(eventAction, eventData):
"""Process the Kaku event for mentions.
eventAction: create, update or delete
eventData: dict with the keys sourceURL, targetURL,
vouchDomain, vouched, postDate, hcard, mf2data
"""
if eventAction == 'create' or eventAction == 'update':
mentionUpdate(eventData)
elif eventAction == 'delete':
mentionDelete(eventData)
def handleGather(eventData):
if 'file' in eventData:
gather(cfg.paths.content, eventData['file'], eventData['force'])
else:
gather(cfg.paths.content)
def handleEvent(eventKey):
"""Process an incoming Kaku Event.
Retrieve the event data from the key given and call the appropriate handler.
Valid Event Types are mention, post, gather
For gather events, only the data item will be found
For mention and post, action and data will be found
Valid Event Action are create, update, delete, undelete
Event Data is a dict of items relevant to the event
"""
try:
event = json.loads(db.get(eventKey))
eventType = event['type']
if eventType == 'gather':
handleGather(event['data'])
else:
eventAction = event['action']
eventData = event['data']
logger.info('dispatching %(action)s for %(type)s' % event)
if eventType == 'post':
handlePost(eventAction, eventData)
elif eventType == 'mention':
handleMentions(eventAction, eventData)
db.expire(eventKey, 86400)
except:
logger.exception('error during event [%s]' % eventKey)
def initLogging(logpath, logname):
logFormatter = logging.Formatter("%(asctime)s %(levelname)-9s %(message)s", "%Y-%m-%d %H:%M:%S")
logfilename = os.path.join(logpath, logname)
logHandler = RotatingFileHandler(logfilename, maxBytes=1024 * 1024 * 100, backupCount=7)
logHandler.setFormatter(logFormatter)
logger.addHandler(logHandler)
logger.setLevel(logging.DEBUG)
def getRedis(redisURL):
url = urlparse(redisURL)
host = url.netloc
if ':' in host:
host, port = host.split(':')
port = int(port)
else:
port = 6379
if len(url.path) > 0:
db = int(url.path[1:])
else:
db = 0
return redis.StrictRedis(host=host, port=port, db=db)
# Example config file
# {
# "baseroute": "/bearlog/",
# "baseurl": "https://bear.im",
# "index_articles": 15,
# "redis": "redis://127.0.0.1:6379/1",
# "markdown_extras": [ "fenced-code-blocks", "cuddled-lists" ],
# "logname": "kaku_events.log",
# "events": "kaku-events",
# "paths": {
# "templates": "/home/bearim/templates/",
# "content": "/home/bearim/content/",
# "output": "/srv/bear.im/bearlog/",
# "log": "/home/bearim/"
# },
# "templates": {
# "post": "article.jinja",
# "mention": "mention.jinja",
# "postPage": "article_page.jinja",
# "index": "blog_index.jinja",
# "markdown": "post.md",
# "embed": "meta.embed"
# }
# }
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./kaku_events.cfg')
parser.add_argument('--file', default=None,
help='A specific markdown file to check and then exit')
parser.add_argument('--force', default=False, action='store_true',
help='Force any found markdown files (or specific file) to be considered an update.')
args = parser.parse_args()
cfgFiles = findConfigFile(args.config)
cfg = Config()
cfg.fromJson(cfgFiles[0])
initLogging(cfg.paths.log, cfg.logname)
logger.info('kaku_events started')
db = getRedis(cfg.redis)
with open(os.path.join(cfg.paths.templates, cfg.templates.markdown)) as h:
mdPost = h.read()
with open(os.path.join(cfg.paths.templates, cfg.templates.embed)) as h:
metaEmbed = h.read()
if args.file is not None:
gather(cfg.paths.content, args.file, args.force)
else:
md = markdown2.Markdown(extras=cfg.markdown_extras)
p = db.pubsub()
p.subscribe(cfg.events)
logger.info('listening for events')
for item in p.listen():
if item['type'] == 'message':
key = item['data']
if key.startswith('kaku-event::'):
logger.info('handling event [%s]' % key)
handleEvent(key)
| 39.015132
| 115
| 0.531013
|
import os
import json
import uuid
import types
import errno
import shutil
import logging
import datetime
import argparse
import pytz
import redis
import jinja2
import ronkyuu
import requests
import markdown2
from bs4 import BeautifulSoup
from logging.handlers import RotatingFileHandler
from dateutil.parser import parse
from bearlib.config import Config, findConfigFile
from bearlib.tools import normalizeFilename
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger(__name__)
def getTimestamp():
utcdate = datetime.datetime.utcnow()
tzLocal = pytz.timezone('America/New_York')
return tzLocal.localize(utcdate, is_dst=None)
def mkpath(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def createPath(path, log):
result = True
try:
os.makedirs(path)
except OSError as exc:
log.exception(exc)
if os.path.isdir(path):
pass
else:
result = False
return result
def escXML(text, escape_quotes=False):
if isinstance(text, types.UnicodeType):
s = list(text)
else:
if isinstance(text, types.IntType):
s = str(text)
else:
s = text
s = list(unicode(s, 'utf-8', 'ignore'))
cc = 0
matches = ('&', '<', '"', '>')
for c in s:
if c in matches:
if c == '&':
s[cc] = u'&'
elif c == '<':
s[cc] = u'<'
elif c == '>':
s[cc] = u'>'
elif escape_quotes:
s[cc] = u'"'
cc += 1
return ''.join(s)
def readMD(targetFile):
result = {}
content = []
header = True
mdFile = '%s.md' % targetFile
for line in open(mdFile, 'r').readlines():
item = line.decode('utf-8', 'xmlcharrefreplace')
if header and len(item.strip()) == 0:
header = False
if header and ':' in item:
tag, value = item.split(':', 1)
result[tag.lower()] = value.strip()
else:
content.append(item)
result['modified'] = os.path.getmtime(mdFile)
result['path'] = os.path.dirname(mdFile)
result['content'] = u''.join(content[1:])
if 'created' not in result and 'date' in result:
result['created'] = result['date']
if 'published' not in result and 'created' in result:
result['published'] = result['created']
return result
def writeMD(targetFile, data):
page = mdPost % data
with open('%s.md' % targetFile, 'w+') as h:
h.write(page.encode('utf-8'))
def loadMetadata(targetFile):
mdData = readMD(targetFile)
if os.path.exists('%s.json' % targetFile):
with open('%s.json' % targetFile, 'r') as h:
result = json.load(h)
if 'published' not in result:
result['published'] = result['created']
if 'route' not in result:
result['route'] = u'%(year)s/%(doy)s/%(slug)s' % result
if 'url' not in result:
result['url'] = '%s%s.html' % (cfg.baseroute, result['route'])
for key in ('created', 'published', 'updated', 'deleted'):
if key in result:
result[key] = parse(result[key])
else:
for key in ('created', 'published'):
mdData[key] = parse(mdData[key])
created = mdData['created']
mdData['key'] = created.strftime('%Y%m%d%H%M%S')
mdData['year'] = created.strftime('%Y')
mdData['doy'] = created.strftime('%j')
mdData['route'] = u'%(year)s/%(doy)s/%(slug)s' % mdData
mdData['url'] = '%s%s.html' % (cfg.baseroute, mdData['route'])
result = {}
for key in mdData:
result[key] = mdData[key]
result['modified'] = mdData['modified']
result['content'] = mdData['content']
return result
def saveMetadata(targetFile, data):
if 'created' not in data:
data['created'] = data['date']
if 'published' not in data:
data['published'] = data['created']
for key in ('created', 'published', 'updated', 'deleted'):
if key in data:
data[key] = data[key].strftime('%Y-%m-%d %H:%M:%S')
with open('%s.json' % targetFile, 'w+') as h:
h.write(json.dumps(data, indent=2))
def loadOurWebmentions(targetFile):
result = {}
if os.path.exists('%s.mentions' % targetFile):
with open('%s.mentions' % targetFile, 'r') as h:
result = json.load(h)
return result
def saveOurMentions(targetFile, mentions):
logger.info('saving webmentions for %s' % targetFile)
with open('%s.mentions' % targetFile, 'w+') as h:
h.write(json.dumps(mentions, indent=2))
def scanOurMentions(sourceURL, mentions):
# loop thru to see if this mention is already present
found = None
for key in mentions:
item = mentions[key]['mention']
url = urlparse(item['sourceURL'])
if url.netloc == sourceURL.netloc and url.path == sourceURL.path:
found = key
break
logger.info('scanOurMentions result [%s]' % found)
return found
def loadOutboundWebmentions(targetFile):
result = {}
if os.path.exists('%s.outboundmentions' % targetFile):
with open('%s.outboundmentions' % targetFile, 'r') as h:
result = json.load(h)
return result
def saveOutboundWebmentions(targetFile, mentions):
logger.info('saving outbound webmentions from %s' % targetFile)
with open('%s.outboundmentions' % targetFile, 'w+') as h:
h.write(json.dumps(mentions, indent=2))
def checkOutboundWebmentions(sourceURL, html, targetFile, update=False):
logger.info('checking for outbound webmentions [%s]' % sourceURL)
try:
cached = loadOutboundWebmentions(targetFile)
found = ronkyuu.findMentions(sourceURL, content=html)
mentions = {}
# loop thru webmentions found in our post and
# check if they are new/updated or already seen
for href in found['refs']:
if sourceURL != href:
logger.info(href)
key = 'webmention::%s::%s' % (sourceURL, href)
keySeen = db.exists(key)
if keySeen:
if update:
keySeen = False
s = 'update forced'
else:
s = 'already processed'
else:
s = 'new mention'
logger.info('\t%s [%s]' % (s, key))
mentions[key] = { 'key': key,
'href': href,
'keySeen': keySeen,
'removed': False
}
# loop thru found webmentions and check against cache for any removed
for key in cached:
if key not in mentions:
mentions[key] = cached[key]
mentions[key]['removed'] = True
if 'keySeen' not in mentions[key]:
mentions[key]['keySeen'] = False
removed = []
for key in mentions:
mention = mentions[key]
logger.info('seen: %(keySeen)s removed: %(removed)s [%(key)s]' % mention)
# send webmentions for new/updated or removed
if mention['removed'] or not mention['keySeen']:
if mention['removed']:
removed.append(key)
href = mention['href']
wmStatus, wmUrl, debug = ronkyuu.discoverEndpoint(href, test_urls=False, debug=True)
logger.info('webmention endpoint discovery: %s [%s]' % (wmStatus, wmUrl))
if len(debug) > 0:
logger.info('\n\tdebug: '.join(debug))
if wmUrl is not None and wmStatus == 200:
logger.info('\tfound webmention endpoint %s for %s' % (wmUrl, href))
resp, debug = ronkyuu.sendWebmention(sourceURL, href, wmUrl, debug=True)
if len(debug) > 0:
logger.info('\n\tdebug: '.join(debug))
if resp.status_code == requests.codes.ok:
if key not in cached:
cached[key] = { 'key': key,
'href': href,
'wmUrl': wmUrl,
'status': resp.status_code
}
if len(resp.history) == 0:
db.set(key, resp.status_code)
logger.info('\twebmention sent successfully')
else:
logger.info('\twebmention POST was redirected')
else:
logger.info('\twebmention send returned a status code of %s' % resp.status_code)
for key in removed:
del cached[key]
db.delete(key)
saveOutboundWebmentions(targetFile, cached)
except:
logger.exception('exception during checkOutboundWebmentions')
def postUpdate(targetFile, action=None):
pageEnv = {}
templateLoader = jinja2.FileSystemLoader(searchpath=cfg.paths.templates)
templates = jinja2.Environment(loader=templateLoader)
postTemplate = templates.get_template(cfg.templates['post'])
postPageTemplate = templates.get_template(cfg.templates['postPage'])
post = loadMetadata(targetFile)
ourMentions = loadOurWebmentions(targetFile)
# bring over site config items
for s in ('title',):
pageEnv[s] = cfg[s]
if action == 'update':
post['updated'] = getTimestamp()
if os.path.exists('%s.deleted' % targetFile):
logger.info('post [%s] is marked as deleted' % targetFile)
if action == 'delete' and 'deleted' not in post:
post['deleted'] = getTimestamp()
post['html'] = '<p>This article has been deleted.</p>'
pageEnv['title'] = 'This article has been deleted'
pageEnv['meta'] = '<meta http-equiv="Status" content="410 GONE" />'
pageEnv['mentions'] = []
else:
logger.info('updating post [%s]' % targetFile)
post['html'] = md.convert(post['content'])
if 'deleted' in post:
del post['deleted']
removed = []
for key in ourMentions:
m = ourMentions[key]['mention']
r = requests.get(m['sourceURL'], verify=True)
if r.status_code == 410:
logger.info('a mention no longer exists - removing [%s]' % key)
removed.append(key)
else:
if 'charset' in r.headers.get('content-type', ''):
content = r.text
else:
content = r.content
soup = BeautifulSoup(content, 'html5lib')
status = None
for meta in soup.findAll('meta', attrs={'http-equiv': lambda x: x and x.lower() == 'status'}):
try:
status = int(meta['content'].split(' ')[0])
except:
pass
if status == 410:
logger.info('a mention no longer exists (via http-equiv) - removing [%s]' % key)
removed.append(key)
for key in removed:
del ourMentions[key]
mentions = []
for key in ourMentions:
m = ourMentions[key]['mention']
# convert string dates into datetime's for template processing
if 'postDate' in m:
m['postDate'] = parse(m['postDate'])
mentions.append(m)
pageEnv['title'] = post['title']
pageEnv['mentions'] = mentions
pageEnv['meta'] = metaEmbed % post
post['xml'] = escXML(post['html'])
pageEnv['post'] = post
postHtml = postTemplate.render(pageEnv)
postPage = postPageTemplate.render(pageEnv)
with open('%s.html' % targetFile, 'w+') as h:
h.write(postHtml.encode('utf-8'))
htmlDir = os.path.join(cfg.paths.output, post['year'], post['doy'])
if not os.path.exists(htmlDir):
mkpath(htmlDir)
with open(os.path.join(htmlDir, '%s.html' % post['slug']), 'w+') as h:
h.write(postPage.encode('utf-8'))
saveMetadata(targetFile, post)
checkOutboundWebmentions('%s%s' % (cfg.baseurl, post['url']), postHtml, targetFile, update=True)
def checkPost(targetFile, eventData):
if not os.path.exists('%s.md' % targetFile):
if 'micropub' in eventData:
micropub = eventData['micropub']
if 'content' in micropub:
content = micropub['content']
if 'html' in micropub and len(micropub['html']) > 0:
content.append(micropub['html'])
if 'category' in micropub:
categories = ','.join(micropub['category'])
else:
categories = ''
if 'photo' in micropub:
for url, alt in micropub['photo']:
if len(alt) > 0:
t = ' alt="%s"' % alt
else:
t = ''
content.append('<img src="%s"%s></img>' % (url, t))
logger.info(micropub.keys())
if 'photo_files' in micropub:
for filename in micropub['photo_files']:
photoSrc = os.path.join(cfg.paths.uploads, filename)
photoTgt = os.path.join(cfg.paths.output, 'images', filename)
logger.info('photo file: %s %s %s' % (filename, photoSrc, photoTgt))
shutil.copyfile(photoSrc, photoTgt)
url = '%s%s%s/%s' % (cfg.baseurl, cfg.baseroute, 'images', filename)
content.append('<img src="%s"></img>' % url)
# location = "geo:40.0958,-74.90736;u=92"
# in-reply-to = "https://bear.im/bearlog/2016/123/testing-delete.html"
# bookmark-of = "https://bear.im"
# category = [u'code', u'indieweb']
# html = [u'<p>something</p><p class="">line2</p><p class="">line3<br></p><p class=""><br></p>']
logger.info('content: %d %s' % (len(content), content))
data = { 'created': eventData['timestamp'],
'published': eventData['timestamp'],
'slug': eventData['slug'],
'author': 'bear',
'tags': categories,
'content': '\n'.join(content),
'title': eventData['title'],
'summary': eventData['title'],
'year': eventData['year'],
'doy': eventData['doy'],
'uuid': str(uuid.uuid4()),
'payload': eventData['micropub'],
}
writeMD(targetFile, data)
else:
logger.error('checkPost for [%s] - no Micropub data included' % targetFile)
def mentionDelete(mention):
logger.info('mention delete of [%s] within [%s]' % (mention['targetURL'], mention['sourceURL']))
# update() handles removal of out of date mentions
targetURL = urlparse(mention['targetURL'])
targetRoute = targetURL.path.replace(cfg.baseroute, '')
postUpdate(os.path.join(cfg.paths.content, targetRoute))
def mentionUpdate(mention):
logger.info('mention update of [%s] within [%s]' % (mention['targetURL'], mention['sourceURL']))
targetPath = urlparse(mention['targetURL'].strip()).path
pathItems = targetPath.split('.')
logger.info('[%s] %s' % (targetPath, pathItems))
if pathItems[-1].lower() == 'html':
targetPath = '.'.join(pathItems[:-1])
eventDate = getTimestamp()
sourceURL = urlparse(mention['sourceURL'])
targetRoute = targetPath.replace(cfg.baseroute, '')
targetFile = os.path.join(cfg.paths.content, targetRoute)
logger.info('targetFile [%s]' % targetFile)
ourMentions = loadOurWebmentions(targetFile)
found = scanOurMentions(sourceURL, ourMentions)
if found is not None:
logger.info('updated mention of [%s] within [%s]' % (found, mention['targetURL']))
ourMentions[found]['updated'] = eventDate.strftime('%Y-%m-%dT%H:%M:%S')
ourMentions[found]['mention'] = mention
else:
key = 'mention::%s::%s' % (sourceURL.netloc, sourceURL.path)
ourMentions[key] = { 'created': mention['postDate'],
'updated': None,
'mention': mention,
}
logger.info('added mention of [%s] within [%s]' % (key, mention['targetURL']))
saveOurMentions(targetFile, ourMentions)
postUpdate(targetFile)
def indexUpdate():
frontpage = {}
logger.info('building index page')
for path, dirlist, filelist in os.walk(cfg.paths.content):
if len(filelist) > 0:
for item in filelist:
filename, ext = os.path.splitext(item)
if ext in ('.json',) and '.mentions.json' not in item:
if os.path.exists(os.path.join(path, '%s.deleted' % filename)):
logger.info('skipping deleted post [%s]' % filename)
else:
page = loadMetadata(os.path.join(path, filename))
frontpage[page['key']] = page
templateLoader = jinja2.FileSystemLoader(searchpath=cfg.paths.templates)
templates = jinja2.Environment(loader=templateLoader)
indexTemplate = templates.get_template(cfg.templates['index'])
pageEnv = { 'posts': [],
'title': cfg.title,
}
frontpageKeys = frontpage.keys()
frontpageKeys.sort(reverse=True)
for key in frontpageKeys[:cfg.index_articles]:
pageEnv['posts'].append(frontpage[key])
page = indexTemplate.render(pageEnv)
indexDir = os.path.join(cfg.paths.output)
if not os.path.exists(indexDir):
mkpath(indexDir)
with open(os.path.join(indexDir, 'index.html'), 'w+') as h:
h.write(page.encode('utf-8'))
def isUpdated(path, filename, force=False):
mFile = os.path.join(path, '%s.md' % filename)
jFile = os.path.join(path, '%s.json' % filename)
if os.path.exists(os.path.join(path, '%s.deleted' % filename)):
return 'delete'
if os.path.exists(jFile):
mTime = os.path.getmtime(mFile)
jTime = os.path.getmtime(jFile)
if force or mTime > jTime:
return 'update'
else:
return 'unchanged'
else:
return 'create'
def gather(filepath, filename=None, force=False):
logger.info('gather [%s] [%s] [%s]' % (filepath, filename, force))
if filename is None:
if filepath is None:
logger.error('A specific file or a path to walk must be specified')
else:
for path, dirlist, filelist in os.walk(filepath):
if len(filelist) > 0:
for item in filelist:
filename, ext = os.path.splitext(item)
if ext in ('.md',):
state = isUpdated(path, filename, force)
key = 'kaku-event::%s::%s::%s' % ('post', state, str(uuid.uuid4()))
data = { 'type': 'post',
'action': state,
'data': { 'path': path,
'file': filename
},
'key': key
}
db.set(key, json.dumps(data))
db.publish(cfg.events, key)
else:
s = normalizeFilename(filename)
if not os.path.exists(s):
s = normalizeFilename(os.path.join(filepath, filename))
logger.info('checking [%s]' % s)
if os.path.exists(s):
path = os.path.dirname(s)
filename, ext = os.path.splitext(s)
if ext in ('.md',):
state = isUpdated(path, filename, force)
key = 'kaku-event::%s::%s::%s' % ('post', state, str(uuid.uuid4()))
data = { 'type': 'post',
'action': state,
'data': { 'path': path,
'file': filename
},
'key': key
}
db.set(key, json.dumps(data))
db.publish(cfg.events, key)
def handlePost(eventAction, eventData):
if eventAction == 'create':
if 'path' in eventData:
postDir = eventData['path']
targetFile = eventData['file']
else:
timestamp = parse(eventData['timestamp'])
eventData['year'] = str(timestamp.year)
eventData['doy'] = timestamp.strftime('%j')
slug = eventData['slug']
postDir = os.path.join(cfg.paths.content, eventData['year'], eventData['doy'])
targetFile = os.path.join(postDir, slug)
if not os.path.exists(postDir):
mkpath(postDir)
checkPost(targetFile, eventData)
postUpdate(targetFile, eventAction)
elif eventAction in ('update', 'delete', 'add'):
if 'file' in eventData:
targetFile = eventData['file']
else:
targetURL = urlparse(eventData['url'])
targetRoute = targetURL.path.replace(cfg.baseroute, '')
targetFile = os.path.join(cfg.paths.content, targetRoute)
changed = False
if eventAction == 'delete':
with open('%s.deleted' % targetFile, 'a'):
os.utime('%s.deleted' % targetFile, None)
changed = True
elif eventAction == 'update':
actionData = eventData['micropub']
actionKey = eventData['actionkey']
data = loadMetadata(targetFile)
for key in ('slug', 'tags', 'content', 'html'):
logger.info(' -- %s: %s' % (key, data[key]))
logger.info('update (%s) %s' % (actionKey, json.dumps(actionData)))
if actionKey == 'replace':
if 'content' in actionData:
data['content'] = '\n'.join(actionData['content'])
changed = True
if 'category' in actionData:
tags = data['tags'].split(',')
for tag in actionData['category']:
if tag not in tags:
tags.append(tag)
changed = True
data['tags'] = ','.join(tags)
elif actionKey == 'add':
if 'content' in actionData:
data['content'] += '\n'.join(actionData['content'])
changed = True
if 'category' in actionData:
tags = data['tags'].split(',')
for tag in actionData['category']:
if tag not in tags:
tags.append(tag)
changed = True
data['tags'] = ','.join(tags)
elif actionKey == 'delete':
if 'category' in actionData:
if type(actionData) is dict:
tags = data['tags'].split(',')
for tag in actionData['category']:
if tag in tags:
tags.remove(tag)
changed = True
data['tags'] = ','.join(tags)
else:
data['tags'] = ''
changed = True
for key in ('slug', 'tags', 'content', 'html'):
logger.info(' -- %s: %s' % (key, data[key]))
if changed:
writeMD(targetFile, data)
saveMetadata(targetFile, data)
if changed:
postUpdate(targetFile, eventAction)
elif eventAction == 'undelete':
logger.info(eventData.keys())
if 'url' in eventData:
targetURL = urlparse(eventData['url'])
targetRoute = targetURL.path.replace(cfg.baseroute, '')
targetFile = os.path.join(cfg.paths.content, targetRoute)
logger.info('checking delete marker %s.deleted' % targetFile)
if os.path.exists('%s.deleted' % targetFile):
logger.info('removing delete marker')
os.remove('%s.deleted' % targetFile)
postUpdate(targetFile, eventAction)
indexUpdate()
def handleMentions(eventAction, eventData):
if eventAction == 'create' or eventAction == 'update':
mentionUpdate(eventData)
elif eventAction == 'delete':
mentionDelete(eventData)
def handleGather(eventData):
if 'file' in eventData:
gather(cfg.paths.content, eventData['file'], eventData['force'])
else:
gather(cfg.paths.content)
def handleEvent(eventKey):
try:
event = json.loads(db.get(eventKey))
eventType = event['type']
if eventType == 'gather':
handleGather(event['data'])
else:
eventAction = event['action']
eventData = event['data']
logger.info('dispatching %(action)s for %(type)s' % event)
if eventType == 'post':
handlePost(eventAction, eventData)
elif eventType == 'mention':
handleMentions(eventAction, eventData)
db.expire(eventKey, 86400)
except:
logger.exception('error during event [%s]' % eventKey)
def initLogging(logpath, logname):
logFormatter = logging.Formatter("%(asctime)s %(levelname)-9s %(message)s", "%Y-%m-%d %H:%M:%S")
logfilename = os.path.join(logpath, logname)
logHandler = RotatingFileHandler(logfilename, maxBytes=1024 * 1024 * 100, backupCount=7)
logHandler.setFormatter(logFormatter)
logger.addHandler(logHandler)
logger.setLevel(logging.DEBUG)
def getRedis(redisURL):
url = urlparse(redisURL)
host = url.netloc
if ':' in host:
host, port = host.split(':')
port = int(port)
else:
port = 6379
if len(url.path) > 0:
db = int(url.path[1:])
else:
db = 0
return redis.StrictRedis(host=host, port=port, db=db)
# Example config file
# {
# "baseroute": "/bearlog/",
# "baseurl": "https://bear.im",
# "index_articles": 15,
# "redis": "redis://127.0.0.1:6379/1",
# "markdown_extras": [ "fenced-code-blocks", "cuddled-lists" ],
# "logname": "kaku_events.log",
# "events": "kaku-events",
# "paths": {
# "templates": "/home/bearim/templates/",
# "content": "/home/bearim/content/",
# "output": "/srv/bear.im/bearlog/",
# "log": "/home/bearim/"
# },
# "templates": {
# "post": "article.jinja",
# "mention": "mention.jinja",
# "postPage": "article_page.jinja",
# "index": "blog_index.jinja",
# "markdown": "post.md",
# "embed": "meta.embed"
# }
# }
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./kaku_events.cfg')
parser.add_argument('--file', default=None,
help='A specific markdown file to check and then exit')
parser.add_argument('--force', default=False, action='store_true',
help='Force any found markdown files (or specific file) to be considered an update.')
args = parser.parse_args()
cfgFiles = findConfigFile(args.config)
cfg = Config()
cfg.fromJson(cfgFiles[0])
initLogging(cfg.paths.log, cfg.logname)
logger.info('kaku_events started')
db = getRedis(cfg.redis)
with open(os.path.join(cfg.paths.templates, cfg.templates.markdown)) as h:
mdPost = h.read()
with open(os.path.join(cfg.paths.templates, cfg.templates.embed)) as h:
metaEmbed = h.read()
if args.file is not None:
gather(cfg.paths.content, args.file, args.force)
else:
md = markdown2.Markdown(extras=cfg.markdown_extras)
p = db.pubsub()
p.subscribe(cfg.events)
logger.info('listening for events')
for item in p.listen():
if item['type'] == 'message':
key = item['data']
if key.startswith('kaku-event::'):
logger.info('handling event [%s]' % key)
handleEvent(key)
| true
| true
|
f712de466750aaf771cb56bf97edbee3086e0194
| 2,719
|
py
|
Python
|
playbooks/files/rax-maas/plugins/bonding_iface_check.py
|
odyssey4me/rpc-maas
|
6ba26ee4e793de740124d0e22d2cee6a943440f9
|
[
"Apache-2.0"
] | null | null | null |
playbooks/files/rax-maas/plugins/bonding_iface_check.py
|
odyssey4me/rpc-maas
|
6ba26ee4e793de740124d0e22d2cee6a943440f9
|
[
"Apache-2.0"
] | 2
|
2020-08-13T19:29:13.000Z
|
2020-08-13T21:45:39.000Z
|
playbooks/files/rax-maas/plugins/bonding_iface_check.py
|
odyssey4me/rpc-maas
|
6ba26ee4e793de740124d0e22d2cee6a943440f9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from maas_common import metric_bool
from maas_common import print_output
def bonding_ifaces_check(_):
bonding_ifaces = os.listdir("/proc/net/bonding")
for bonding_iface in bonding_ifaces:
bonding_iface_check_cmd = ['cat', '/proc/net/bonding/%s'
% bonding_iface]
bonding_iface_check_cmd_output = subprocess.check_output(
bonding_iface_check_cmd
)
bonding_iface_check_cmd_output_lines = (
bonding_iface_check_cmd_output.split('\n')
)
slave_count = 0
for idx, line in enumerate(bonding_iface_check_cmd_output_lines):
if line.startswith("Slave Interface"):
slave_count = slave_count + 1
has_slave_down = False
for idx, line in enumerate(bonding_iface_check_cmd_output_lines):
if line.startswith("Slave Interface"):
slave_inface_mii_status_line = (
bonding_iface_check_cmd_output_lines[idx + 1]
)
slave_inface_mii_status = (
slave_inface_mii_status_line.split(":")[1].strip()
)
if 'up' not in slave_inface_mii_status or slave_count < 2:
has_slave_down = True
if has_slave_down:
metric_bool('host_bonding_iface_%s_slave_down' %
bonding_iface,
True)
else:
metric_bool('host_bonding_iface_%s_slave_down' %
bonding_iface,
False)
def main(args):
bonding_ifaces_check(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Check statuses of local bonding interfaces')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
| 34.858974
| 74
| 0.630379
|
import argparse
import os
import subprocess
from maas_common import metric_bool
from maas_common import print_output
def bonding_ifaces_check(_):
bonding_ifaces = os.listdir("/proc/net/bonding")
for bonding_iface in bonding_ifaces:
bonding_iface_check_cmd = ['cat', '/proc/net/bonding/%s'
% bonding_iface]
bonding_iface_check_cmd_output = subprocess.check_output(
bonding_iface_check_cmd
)
bonding_iface_check_cmd_output_lines = (
bonding_iface_check_cmd_output.split('\n')
)
slave_count = 0
for idx, line in enumerate(bonding_iface_check_cmd_output_lines):
if line.startswith("Slave Interface"):
slave_count = slave_count + 1
has_slave_down = False
for idx, line in enumerate(bonding_iface_check_cmd_output_lines):
if line.startswith("Slave Interface"):
slave_inface_mii_status_line = (
bonding_iface_check_cmd_output_lines[idx + 1]
)
slave_inface_mii_status = (
slave_inface_mii_status_line.split(":")[1].strip()
)
if 'up' not in slave_inface_mii_status or slave_count < 2:
has_slave_down = True
if has_slave_down:
metric_bool('host_bonding_iface_%s_slave_down' %
bonding_iface,
True)
else:
metric_bool('host_bonding_iface_%s_slave_down' %
bonding_iface,
False)
def main(args):
bonding_ifaces_check(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Check statuses of local bonding interfaces')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
| true
| true
|
f712df66591fc0d8b0718cae5fe6211b949499cd
| 556
|
py
|
Python
|
tablarray/kwtools/__init__.py
|
chriscannon9001/tablarray
|
f07530f84a8c86abe996cdb999233ed9bb8edf7e
|
[
"BSD-3-Clause"
] | null | null | null |
tablarray/kwtools/__init__.py
|
chriscannon9001/tablarray
|
f07530f84a8c86abe996cdb999233ed9bb8edf7e
|
[
"BSD-3-Clause"
] | null | null | null |
tablarray/kwtools/__init__.py
|
chriscannon9001/tablarray
|
f07530f84a8c86abe996cdb999233ed9bb8edf7e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
kwargs_tools
by Chris Cannon
====
Provides
1. kwargs_scan - convert header and values from csv into a dictionary
2. kwargs_db - instance = myclass(search='name')
is converted to
instance = myclass(param1, param2, ...)
using a database lookup
3. kwargs_gui
@author: Chris Cannon
"""
__author__ = "Chris Cannon"
__version__ = "0.0.0"
__license__ = "GPLv3"
__status__ = "Prototype"
from .kwargs_scan import *
from .kwargs_db import *
| 21.384615
| 73
| 0.625899
|
__author__ = "Chris Cannon"
__version__ = "0.0.0"
__license__ = "GPLv3"
__status__ = "Prototype"
from .kwargs_scan import *
from .kwargs_db import *
| true
| true
|
f712e0700d6b0fc427bb1da21e1c1cd0184c3f10
| 1,715
|
py
|
Python
|
ee/tasks/send_license_usage.py
|
asherf/posthog
|
1e50704d76cba484e80b83f1e1f658bd6e98743a
|
[
"MIT"
] | null | null | null |
ee/tasks/send_license_usage.py
|
asherf/posthog
|
1e50704d76cba484e80b83f1e1f658bd6e98743a
|
[
"MIT"
] | null | null | null |
ee/tasks/send_license_usage.py
|
asherf/posthog
|
1e50704d76cba484e80b83f1e1f658bd6e98743a
|
[
"MIT"
] | null | null | null |
import posthoganalytics
import requests
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from ee.clickhouse.client import sync_execute
from ee.models.license import License
from posthog.models import User
def send_license_usage():
license = License.objects.first_valid()
if not license:
return
try:
date_from = (timezone.now() - relativedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
date_to = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
events_count = sync_execute(
"select count(1) from events where timestamp >= %(date_from)s and timestamp < %(date_to)s",
{"date_from": date_from, "date_to": date_to},
)[0][0]
response = requests.post(
"https://license.posthog.com/licenses/usage",
data={"date": date_from.strftime("%Y-%m-%d"), "key": license.key, "events_count": events_count,},
)
response.raise_for_status()
if not response.ok:
posthoganalytics.capture(
User.objects.first().distinct_id, # type: ignore
"send license usage data error",
{
"error": response.content,
"status_code": response.status_code,
"date": date_from.strftime("%Y-%m-%d"),
"events_count": events_count,
},
)
except Exception as err:
posthoganalytics.capture(
User.objects.first().distinct_id, # type: ignore
"send license usage data error",
{"error": str(err), "date": date_from.strftime("%Y-%m-%d")},
)
| 38.111111
| 111
| 0.594752
|
import posthoganalytics
import requests
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from ee.clickhouse.client import sync_execute
from ee.models.license import License
from posthog.models import User
def send_license_usage():
license = License.objects.first_valid()
if not license:
return
try:
date_from = (timezone.now() - relativedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
date_to = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
events_count = sync_execute(
"select count(1) from events where timestamp >= %(date_from)s and timestamp < %(date_to)s",
{"date_from": date_from, "date_to": date_to},
)[0][0]
response = requests.post(
"https://license.posthog.com/licenses/usage",
data={"date": date_from.strftime("%Y-%m-%d"), "key": license.key, "events_count": events_count,},
)
response.raise_for_status()
if not response.ok:
posthoganalytics.capture(
User.objects.first().distinct_id,
"send license usage data error",
{
"error": response.content,
"status_code": response.status_code,
"date": date_from.strftime("%Y-%m-%d"),
"events_count": events_count,
},
)
except Exception as err:
posthoganalytics.capture(
User.objects.first().distinct_id,
"send license usage data error",
{"error": str(err), "date": date_from.strftime("%Y-%m-%d")},
)
| true
| true
|
f712e0eb5555667a488c2bf52ce2443674b5782c
| 1,719
|
py
|
Python
|
Sec24_Design/q0284.py
|
OctoberChang/LeetCode-Solutions
|
bb7958194e7b196729611cbad19ee792ba41c429
|
[
"MIT"
] | 2
|
2021-01-26T00:59:47.000Z
|
2021-11-20T02:55:13.000Z
|
Sec24_Design/q0284.py
|
OctoberChang/LeetCode-Solutions
|
bb7958194e7b196729611cbad19ee792ba41c429
|
[
"MIT"
] | null | null | null |
Sec24_Design/q0284.py
|
OctoberChang/LeetCode-Solutions
|
bb7958194e7b196729611cbad19ee792ba41c429
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator:
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator:
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iterator = iterator
self.val_ = None
self.has_next_ = iterator.hasNext()
self.has_peak_ = False
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if not self.has_peak_:
self.has_peak_ = True
self.val_ = self.iterator.next()
return self.val_
def next(self):
"""
:rtype: int
"""
self.val_ = self.peek()
self.has_peak_ = False
self.has_next_ = self.iterator.hasNext()
return self.val_
def hasNext(self):
"""
:rtype: bool
"""
return self.has_next_
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
| 25.656716
| 81
| 0.556719
|
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator:
def __init__(self, iterator):
self.iterator = iterator
self.val_ = None
self.has_next_ = iterator.hasNext()
self.has_peak_ = False
def peek(self):
if not self.has_peak_:
self.has_peak_ = True
self.val_ = self.iterator.next()
return self.val_
def next(self):
self.val_ = self.peek()
self.has_peak_ = False
self.has_next_ = self.iterator.hasNext()
return self.val_
def hasNext(self):
return self.has_next_
| true
| true
|
f712e225df2cc8743f4e2cf410c78b74be4c512a
| 523
|
py
|
Python
|
index.py
|
kavehkm/MiddleWareSystem
|
de5484d7fab6582904506dcf7db19ecfffcc8df7
|
[
"Apache-2.0"
] | null | null | null |
index.py
|
kavehkm/MiddleWareSystem
|
de5484d7fab6582904506dcf7db19ecfffcc8df7
|
[
"Apache-2.0"
] | null | null | null |
index.py
|
kavehkm/MiddleWareSystem
|
de5484d7fab6582904506dcf7db19ecfffcc8df7
|
[
"Apache-2.0"
] | null | null | null |
# standard
from importlib import import_module
# internal
import settings
def core_func(*args, **kwargs):
print('core_func executed with args={} + kwargs={}'.format(args, kwargs))
def set_middlewares(func):
for middleware in reversed(settings.MIDDLEWARES):
p, m = middleware.rsplit('.', 1)
mod = import_module(p)
met = getattr(mod, m)
func = met(func)
return func
if __name__ == '__main__':
args = (1, 2, 3)
kwargs = {
'a': 'x',
'b': 'y',
'c': 'z'
}
set_middlewares(core_func)(*args, **kwargs)
| 17.433333
| 74
| 0.65392
|
from importlib import import_module
import settings
def core_func(*args, **kwargs):
print('core_func executed with args={} + kwargs={}'.format(args, kwargs))
def set_middlewares(func):
for middleware in reversed(settings.MIDDLEWARES):
p, m = middleware.rsplit('.', 1)
mod = import_module(p)
met = getattr(mod, m)
func = met(func)
return func
if __name__ == '__main__':
args = (1, 2, 3)
kwargs = {
'a': 'x',
'b': 'y',
'c': 'z'
}
set_middlewares(core_func)(*args, **kwargs)
| true
| true
|
f712e2465ddae95ed0184616de702dadd275e6fe
| 3,225
|
py
|
Python
|
Algorithm.Python/FinancialAdvisorDemoAlgorithm.py
|
theGOTOguy/Lean-1
|
99327d106cf7f6b40a5cb43d7cf40350bd6eb02a
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/FinancialAdvisorDemoAlgorithm.py
|
theGOTOguy/Lean-1
|
99327d106cf7f6b40a5cb43d7cf40350bd6eb02a
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/FinancialAdvisorDemoAlgorithm.py
|
theGOTOguy/Lean-1
|
99327d106cf7f6b40a5cb43d7cf40350bd6eb02a
|
[
"Apache-2.0"
] | null | null | null |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
### <summary>
### This algorithm demonstrates how to submit orders to a Financial Advisor account group, allocation profile or a single managed account.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
### <meta name="tag" content="financial advisor" />
class FinancialAdvisorDemoAlgorithm(QCAlgorithm):
def Initialize(self):
# Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must be initialized.
self.SetStartDate(2013,10,07) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.symbol = self.AddEquity("SPY", Resolution.Second).Symbol
# The default order properties can be set here to choose the FA settings
# to be automatically used in any order submission method (such as SetHoldings, Buy, Sell and Order)
# Use a default FA Account Group with an Allocation Method
self.DefaultOrderProperties = InteractiveBrokersOrderProperties()
# account group created manually in IB/TWS
self.DefaultOrderProperties.FaGroup = "TestGroupEQ"
# supported allocation methods are: EqualQuantity, NetLiq, AvailableEquity, PctChange
self.DefaultOrderProperties.FaMethod = "EqualQuantity"
# set a default FA Allocation Profile
# DefaultOrderProperties = InteractiveBrokersOrderProperties()
# allocation profile created manually in IB/TWS
# self.DefaultOrderProperties.FaProfile = "TestProfileP"
# send all orders to a single managed account
# DefaultOrderProperties = InteractiveBrokersOrderProperties()
# a sub-account linked to the Financial Advisor master account
# self.DefaultOrderProperties.Account = "DU123456"
def OnData(self, data):
# OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
if not self.Portfolio.Invested:
# when logged into IB as a Financial Advisor, this call will use order properties
# set in the DefaultOrderProperties property of QCAlgorithm
self.SetHoldings("SPY", 1)
| 47.426471
| 150
| 0.728992
|
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
self.SetHoldings("SPY", 1)
| false
| true
|
f712e29e44bdca122469aef17d117cb64090f78d
| 1,306
|
py
|
Python
|
Data_Processing/BoxPlot_error.py
|
ZzhKlaus/2018-SURF-Trajectory-Estimation
|
71c62c816d1531f6806bfa9455fec9affe36496c
|
[
"MIT"
] | 14
|
2018-09-03T03:01:00.000Z
|
2021-07-13T13:22:08.000Z
|
Data_Processing/BoxPlot_error.py
|
ZzhKlaus/2018-SURF-Trajectory-Estimation
|
71c62c816d1531f6806bfa9455fec9affe36496c
|
[
"MIT"
] | null | null | null |
Data_Processing/BoxPlot_error.py
|
ZzhKlaus/2018-SURF-Trajectory-Estimation
|
71c62c816d1531f6806bfa9455fec9affe36496c
|
[
"MIT"
] | 13
|
2018-09-08T08:49:31.000Z
|
2021-04-23T12:27:58.000Z
|
#By Zhenghang(Klaus) Zhong
#Box Plot of error distribution
from pandas import DataFrame
from pandas import read_csv
import pandas as pd
import numpy as np
from matplotlib import pyplot
# load results into a dataframe
filenames_128 = ['dis_diff_128.csv']
filenames_256 = ['dis_diff_256.csv']
filenames_512 = ['dis_diff_512.csv']
results = DataFrame()
for name in filenames_128:
results_128 = read_csv(name, header=0,usecols = [1])
# describe all results, as 1 unit = 10cm, we want to transfer to meters, /10
results_128 = results_128.div(10, axis = 0)
for name in filenames_256:
results_256 = read_csv(name, header=0,usecols = [1])
# describe all results
results_256 = results_256.div(10, axis = 0)
for name in filenames_512:
results_512 = read_csv(name, header=0,usecols = [1])
# describe all results
results_512 = results_512.div(10, axis = 0)
print(results_128.describe())
print(results_256.describe())
print(results_512.describe())
# box and whisker plot
df = pd.DataFrame(np.concatenate((results_128,results_512),axis = 1),
columns=['128', '512'])
df.boxplot(sym='k',showmeans = True,showfliers = False,return_type='dict')
#results_256.boxplot(sym='k',showmeans = True,whis = [0,8],showfliers = False,return_type='dict')
pyplot.xlabel('Hidden node')
pyplot.ylabel('Error (m)')
pyplot.show()
| 29.681818
| 97
| 0.748086
|
from pandas import DataFrame
from pandas import read_csv
import pandas as pd
import numpy as np
from matplotlib import pyplot
filenames_128 = ['dis_diff_128.csv']
filenames_256 = ['dis_diff_256.csv']
filenames_512 = ['dis_diff_512.csv']
results = DataFrame()
for name in filenames_128:
results_128 = read_csv(name, header=0,usecols = [1])
results_128 = results_128.div(10, axis = 0)
for name in filenames_256:
results_256 = read_csv(name, header=0,usecols = [1])
results_256 = results_256.div(10, axis = 0)
for name in filenames_512:
results_512 = read_csv(name, header=0,usecols = [1])
results_512 = results_512.div(10, axis = 0)
print(results_128.describe())
print(results_256.describe())
print(results_512.describe())
df = pd.DataFrame(np.concatenate((results_128,results_512),axis = 1),
columns=['128', '512'])
df.boxplot(sym='k',showmeans = True,showfliers = False,return_type='dict')
pyplot.xlabel('Hidden node')
pyplot.ylabel('Error (m)')
pyplot.show()
| true
| true
|
f712e366441a3726dfdd8750a0e6012404d00359
| 383
|
py
|
Python
|
main.py
|
afrigon/sharify-api
|
383baa5ae089d996c2d68da8b55e566dd0cfbbf9
|
[
"MIT"
] | 1
|
2020-01-06T17:29:39.000Z
|
2020-01-06T17:29:39.000Z
|
main.py
|
afrigon/sharify-api
|
383baa5ae089d996c2d68da8b55e566dd0cfbbf9
|
[
"MIT"
] | null | null | null |
main.py
|
afrigon/sharify-api
|
383baa5ae089d996c2d68da8b55e566dd0cfbbf9
|
[
"MIT"
] | 2
|
2020-01-07T14:25:38.000Z
|
2021-06-23T16:10:57.000Z
|
import os
from dotenv import load_dotenv
from app.utils.cache import Cache
from app import ApplicationFactory
load_dotenv()
with open('./AppleMusicAuthKey.p8', 'r') as f:
os.environ['APPLE_KEY'] = f.read()
TITLE = 'Sharify'
DESCRIPTION = ''
DEBUG = os.environ.get('APP_DEBUG') or False
Cache.instance().init()
app = ApplicationFactory(TITLE, DESCRIPTION).create(debug=DEBUG)
| 22.529412
| 64
| 0.744125
|
import os
from dotenv import load_dotenv
from app.utils.cache import Cache
from app import ApplicationFactory
load_dotenv()
with open('./AppleMusicAuthKey.p8', 'r') as f:
os.environ['APPLE_KEY'] = f.read()
TITLE = 'Sharify'
DESCRIPTION = ''
DEBUG = os.environ.get('APP_DEBUG') or False
Cache.instance().init()
app = ApplicationFactory(TITLE, DESCRIPTION).create(debug=DEBUG)
| true
| true
|
f712e38bdcebebf07f396d967e3bb465facc3362
| 648
|
py
|
Python
|
src/popoto/utils/list_search.py
|
yudame/portfolio
|
936b92e39e764e0e511c4203a978e240a0a90714
|
[
"MIT"
] | 5
|
2021-11-21T01:36:02.000Z
|
2022-01-28T23:16:51.000Z
|
src/popoto/utils/list_search.py
|
yudame/portfolio
|
936b92e39e764e0e511c4203a978e240a0a90714
|
[
"MIT"
] | 1
|
2021-12-29T13:20:17.000Z
|
2021-12-29T13:20:17.000Z
|
src/popoto/utils/list_search.py
|
yudame/portfolio
|
936b92e39e764e0e511c4203a978e240a0a90714
|
[
"MIT"
] | null | null | null |
# https://stackoverflow.com/questions/16974047/efficient-way-to-find-missing-elements-in-an-integer-sequence/16974075#16974075
from itertools import islice, chain
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def missing_elements(L):
missing = chain.from_iterable(range(x + 1, y) for x, y in window(L) if (y - x) > 1)
return list(missing)
| 34.105263
| 126
| 0.611111
|
rtools import islice, chain
def window(seq, n=2):
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def missing_elements(L):
missing = chain.from_iterable(range(x + 1, y) for x, y in window(L) if (y - x) > 1)
return list(missing)
| true
| true
|
f712e48a3d3608959b97bc229c46bebab602676f
| 10,349
|
py
|
Python
|
tests/krs/test_groups.py
|
WIPACrepo/keycloak-rest-services
|
2661b0db2dd320bdb8eefc62c805188bec52ecc7
|
[
"MIT"
] | 1
|
2021-09-23T14:39:36.000Z
|
2021-09-23T14:39:36.000Z
|
tests/krs/test_groups.py
|
WIPACrepo/keycloak-rest-services
|
2661b0db2dd320bdb8eefc62c805188bec52ecc7
|
[
"MIT"
] | 38
|
2020-08-31T22:53:09.000Z
|
2022-03-28T20:55:39.000Z
|
tests/krs/test_groups.py
|
WIPACrepo/keycloak-rest-services
|
2661b0db2dd320bdb8eefc62c805188bec52ecc7
|
[
"MIT"
] | null | null | null |
import pytest
from krs.token import get_token
from krs import groups, users
from ..util import keycloak_bootstrap
@pytest.mark.asyncio
async def test_list_groups_empty(keycloak_bootstrap):
ret = await groups.list_groups(rest_client=keycloak_bootstrap)
assert ret == {}
@pytest.mark.asyncio
async def test_list_groups(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
ret = await groups.list_groups(rest_client=keycloak_bootstrap)
assert list(ret.keys()) == ['/testgroup','/testgroup/testgroup2']
assert ret['/testgroup']['children'] == ['testgroup2']
@pytest.mark.asyncio
async def test_group_info(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['name'] == 'testgroup'
assert ret['path'] == '/testgroup'
assert [g['name'] for g in ret['subGroups']] == ['testgroup2']
@pytest.mark.asyncio
async def test_group_attrs(keycloak_bootstrap):
await groups.create_group('/testgroup', attrs={'foo':'bar'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['name'] == 'testgroup'
assert ret['path'] == '/testgroup'
assert ret['attributes'] == {'foo': 'bar'}
@pytest.mark.asyncio
async def test_modify_group(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {}
await groups.modify_group('/testgroup', {'baz': 'foo'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'baz': 'foo'}
@pytest.mark.asyncio
async def test_modify_group_with_attrs(keycloak_bootstrap):
await groups.create_group('/testgroup', attrs={'foo':'bar'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'foo': 'bar'}
await groups.modify_group('/testgroup', {'baz': 'foo'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'foo': 'bar', 'baz': 'foo'}
@pytest.mark.asyncio
async def test_modify_group_del_attr(keycloak_bootstrap):
await groups.create_group('/testgroup', attrs={'foo':'bar'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'foo': 'bar'}
await groups.modify_group('/testgroup', {'foo': None, 'baz': 'foo'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'baz': 'foo'}
@pytest.mark.asyncio
async def test_group_info_by_id(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
group_id = ret['id']
ret = await groups.group_info_by_id(group_id, rest_client=keycloak_bootstrap)
assert ret['name'] == 'testgroup'
assert ret['path'] == '/testgroup'
assert [g['name'] for g in ret['subGroups']] == ['testgroup2']
@pytest.mark.asyncio
async def test_create_group(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_create_subgroup(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_delete_group(keycloak_bootstrap):
# first test non-existing group
await groups.delete_group('/testgroup', rest_client=keycloak_bootstrap)
# now test existing group
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.delete_group('/testgroup', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_get_user_groups(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
ret = await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
assert ret == []
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
assert ret == ['/testgroup']
@pytest.mark.asyncio
async def test_add_user_group(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_remove_user_group(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
with pytest.raises(Exception):
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
# test for not being a member of the group
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
# now test for removing the group
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
assert ret == []
@pytest.mark.asyncio
async def test_get_group_membership(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/testgroup', rest_client=keycloak_bootstrap)
assert ret == []
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/testgroup', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
@pytest.mark.asyncio
async def test_parent_child_group_membership(keycloak_bootstrap):
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
await groups.create_group('/parent', rest_client=keycloak_bootstrap)
await groups.create_group('/parent/child', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent', 'testuser', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent/child', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/parent', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
ret = await groups.get_group_membership('/parent/child', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
@pytest.mark.asyncio
async def test_child_parent_group_membership(keycloak_bootstrap):
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
await groups.create_group('/parent', rest_client=keycloak_bootstrap)
await groups.create_group('/parent/child', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent/child', 'testuser', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/parent', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
ret = await groups.get_group_membership('/parent/child', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
@pytest.mark.asyncio
async def test_add_user_group_multiple(keycloak_bootstrap):
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
await groups.create_group('/foo', rest_client=keycloak_bootstrap)
await groups.create_group('/foo/bar', rest_client=keycloak_bootstrap)
await groups.create_group('/bar', rest_client=keycloak_bootstrap)
await groups.create_group('/foo/bar/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/bar/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.add_user_group('/foo/bar/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/foo/bar/testgroup', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
ret = await groups.get_group_membership('/bar/testgroup', rest_client=keycloak_bootstrap)
assert ret == []
ret = await groups.get_group_membership('/testgroup', rest_client=keycloak_bootstrap)
assert ret == []
| 48.359813
| 104
| 0.757948
|
import pytest
from krs.token import get_token
from krs import groups, users
from ..util import keycloak_bootstrap
@pytest.mark.asyncio
async def test_list_groups_empty(keycloak_bootstrap):
ret = await groups.list_groups(rest_client=keycloak_bootstrap)
assert ret == {}
@pytest.mark.asyncio
async def test_list_groups(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
ret = await groups.list_groups(rest_client=keycloak_bootstrap)
assert list(ret.keys()) == ['/testgroup','/testgroup/testgroup2']
assert ret['/testgroup']['children'] == ['testgroup2']
@pytest.mark.asyncio
async def test_group_info(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['name'] == 'testgroup'
assert ret['path'] == '/testgroup'
assert [g['name'] for g in ret['subGroups']] == ['testgroup2']
@pytest.mark.asyncio
async def test_group_attrs(keycloak_bootstrap):
await groups.create_group('/testgroup', attrs={'foo':'bar'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['name'] == 'testgroup'
assert ret['path'] == '/testgroup'
assert ret['attributes'] == {'foo': 'bar'}
@pytest.mark.asyncio
async def test_modify_group(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {}
await groups.modify_group('/testgroup', {'baz': 'foo'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'baz': 'foo'}
@pytest.mark.asyncio
async def test_modify_group_with_attrs(keycloak_bootstrap):
await groups.create_group('/testgroup', attrs={'foo':'bar'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'foo': 'bar'}
await groups.modify_group('/testgroup', {'baz': 'foo'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'foo': 'bar', 'baz': 'foo'}
@pytest.mark.asyncio
async def test_modify_group_del_attr(keycloak_bootstrap):
await groups.create_group('/testgroup', attrs={'foo':'bar'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'foo': 'bar'}
await groups.modify_group('/testgroup', {'foo': None, 'baz': 'foo'}, rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
assert ret['attributes'] == {'baz': 'foo'}
@pytest.mark.asyncio
async def test_group_info_by_id(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
ret = await groups.group_info('/testgroup', rest_client=keycloak_bootstrap)
group_id = ret['id']
ret = await groups.group_info_by_id(group_id, rest_client=keycloak_bootstrap)
assert ret['name'] == 'testgroup'
assert ret['path'] == '/testgroup'
assert [g['name'] for g in ret['subGroups']] == ['testgroup2']
@pytest.mark.asyncio
async def test_create_group(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_create_subgroup(keycloak_bootstrap):
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup/testgroup2', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_delete_group(keycloak_bootstrap):
await groups.delete_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.delete_group('/testgroup', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_get_user_groups(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
ret = await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
assert ret == []
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
assert ret == ['/testgroup']
@pytest.mark.asyncio
async def test_add_user_group(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
@pytest.mark.asyncio
async def test_remove_user_group(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
with pytest.raises(Exception):
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.remove_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_user_groups('testuser', rest_client=keycloak_bootstrap)
assert ret == []
@pytest.mark.asyncio
async def test_get_group_membership(keycloak_bootstrap):
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
with pytest.raises(Exception):
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/testgroup', rest_client=keycloak_bootstrap)
assert ret == []
await groups.add_user_group('/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/testgroup', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
@pytest.mark.asyncio
async def test_parent_child_group_membership(keycloak_bootstrap):
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
await groups.create_group('/parent', rest_client=keycloak_bootstrap)
await groups.create_group('/parent/child', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent', 'testuser', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent/child', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/parent', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
ret = await groups.get_group_membership('/parent/child', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
@pytest.mark.asyncio
async def test_child_parent_group_membership(keycloak_bootstrap):
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
await groups.create_group('/parent', rest_client=keycloak_bootstrap)
await groups.create_group('/parent/child', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent/child', 'testuser', rest_client=keycloak_bootstrap)
await groups.add_user_group('/parent', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/parent', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
ret = await groups.get_group_membership('/parent/child', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
@pytest.mark.asyncio
async def test_add_user_group_multiple(keycloak_bootstrap):
await users.create_user('testuser', 'first', 'last', 'email', rest_client=keycloak_bootstrap)
await groups.create_group('/foo', rest_client=keycloak_bootstrap)
await groups.create_group('/foo/bar', rest_client=keycloak_bootstrap)
await groups.create_group('/bar', rest_client=keycloak_bootstrap)
await groups.create_group('/foo/bar/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/bar/testgroup', rest_client=keycloak_bootstrap)
await groups.create_group('/testgroup', rest_client=keycloak_bootstrap)
await groups.add_user_group('/foo/bar/testgroup', 'testuser', rest_client=keycloak_bootstrap)
ret = await groups.get_group_membership('/foo/bar/testgroup', rest_client=keycloak_bootstrap)
assert ret == ['testuser']
ret = await groups.get_group_membership('/bar/testgroup', rest_client=keycloak_bootstrap)
assert ret == []
ret = await groups.get_group_membership('/testgroup', rest_client=keycloak_bootstrap)
assert ret == []
| true
| true
|
f712e55f8833113841675aec904201ea6fc6f2de
| 1,487
|
py
|
Python
|
setup.py
|
jbn/idgraph
|
f610e72bed77de011f2a6610daa3fb45ea4caa8c
|
[
"MIT"
] | 2
|
2019-10-21T15:07:06.000Z
|
2019-10-21T15:21:02.000Z
|
setup.py
|
jbn/idgraph
|
f610e72bed77de011f2a6610daa3fb45ea4caa8c
|
[
"MIT"
] | 1
|
2019-10-20T21:07:11.000Z
|
2019-10-20T21:07:11.000Z
|
setup.py
|
jbn/idgraph
|
f610e72bed77de011f2a6610daa3fb45ea4caa8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = ['pytest-runner', 'requests', 'IPython', 'jmespath']
test_requirements = ['pytest>=3', ]
setup(
author="John Bjorn Nelson",
author_email='jbn@abreka.com',
python_requires='>=3, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Provides IPython cell magic for dgraph queries, mutations, and alteration remote execution.",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='idgraph',
name='idgraph',
packages=find_packages(include=['idgraph', 'idgraph.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/jbn/idgraph',
version='0.0.1',
zip_safe=False,
)
| 30.346939
| 111
| 0.639543
|
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = ['pytest-runner', 'requests', 'IPython', 'jmespath']
test_requirements = ['pytest>=3', ]
setup(
author="John Bjorn Nelson",
author_email='jbn@abreka.com',
python_requires='>=3, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Provides IPython cell magic for dgraph queries, mutations, and alteration remote execution.",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='idgraph',
name='idgraph',
packages=find_packages(include=['idgraph', 'idgraph.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/jbn/idgraph',
version='0.0.1',
zip_safe=False,
)
| true
| true
|
f712e80bcddd2868db294705cd39a8d69135bad5
| 3,615
|
py
|
Python
|
xlab/local_settings.py
|
topd333/Xlab
|
28d89b3b18717957229ca52cb2cbbbc20bd31eae
|
[
"Unlicense"
] | null | null | null |
xlab/local_settings.py
|
topd333/Xlab
|
28d89b3b18717957229ca52cb2cbbbc20bd31eae
|
[
"Unlicense"
] | null | null | null |
xlab/local_settings.py
|
topd333/Xlab
|
28d89b3b18717957229ca52cb2cbbbc20bd31eae
|
[
"Unlicense"
] | null | null | null |
#import django.conf.global_settings as DEFAULT_SETTINGS
from .settings import *
import os
from django.utils.translation import ugettext as _
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
ADMINS = (
('Admin User', 'jamesh@linkinulife.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'xlab_dev', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'django',
'PASSWORD': 'xxxxxxxxxxxxx',
'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '3306', # Set to empty string for default.
'ATOMIC_REQUESTS': True,
},
'oxdata': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'oxdata',
'USER': 'txservice',
'PASSWORD': 'xxxxxxxxxx,
'HOST': '127.0.0.1',
'PORT': '3306',
'ATOMIC_REQUESTS': True,
},
'grid_space': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'grid_space',
'USER': 'django',
'PASSWORD': 'xxxxxxxxxxxxxx',
'HOST': '127.0.0.1',
'PORT': '3306',
'ATOMIC_REQUESTS': True,
},
'robust_grid': {
'NAME': 'robust_grid',
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'USER': 'opensim',
'PORT': '3306',
'PASSWORD': 'xxxxxxxxxxxxxxxxx',
},
'estates': {
'NAME': 'estates',
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'USER': 'os_estates',
'PORT': '3308',
'PASSWORD': 'xxxxxxxxxxxxxxx',
},
}
DATABASE_ROUTERS = ['grid_db.dbhelper.SpaceRouter',
'grid_db.dbhelper.RobustGridRouter',
'grid_db.dbhelper.EstatesRouter',
#'grid_db.dbhelper.StagingRouter',
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'l$ykc444%f&s8dmqvcpsl29@pj8**3&abg$8+l*%(ad=&z*6jkh'
# Site Application
SITE_NAME = 'Linkinu'
SITE_TITLE = 'Linkinu'
# Welecome Application
WELCOME_HEADING = 'Linkinu'
WELCOME_TITLE = 'Linkinu'
# grid_user application
# Activation link, etc.
ACCOUNT_SERVER_ADDRESS = 'http://linkinulife.com'
LOGIN_URL = "/login"
AUTH_USER_MODEL = 'grid_user.User'
AUTH_SERVER_URL = 'http://linkinulife.com:8003'
ACCOUNT_SERVER_URL = 'http://linkinulife.com:8003'
ACCOUNT_ADMIN_EMAIL = 'jamesh@linkinulife.com'
XMLRPC_GATEWAY_IP = '144.76.18.178'
# Grid-Wide Estate Database Default Settings
ESTATE_DATABASE = 'estates'
ESTATE_DATASOURCE = 'linkinulife.com'
ESTATE_DATABASE_PORT = '3308'
ESTATE_DATABASE_USER = 'os_estates'
ESTATE_DATABASE_PASSWORD = 'xxxxxxxxxxxxxxxx'
# Default Estate Settings
DEFAULT_ESTATE_NAME = "Mainland"
DEFAULT_ESTATE_OWNER_NAME = "Governor LinkniU"
# Settings for grid user database for import.
USER_DATA_HOST = "127.0.0.1"
USER_DATA_USERNAME = "root"
USER_DATA_USERPASS = "xxxxxxxxxxxxxxxxxxxxxxxxx"
USER_DATA_DBNAME = "robust_grid"
# Settings for oxdata database for import.
OXDATA_DATA_HOST = "127.0.0.1"
OXDATA_DATA_USERNAME = "txservice"
OXDATA_DATA_USERPASS = "xxxxxxxxxxxxxxxxxxxx"
OXDATA_DATA_DBNAME = "oxdata"
LANGUAGES = (
('en', U'English'),
('ar', u'Arabic'),
('hi', u'Hindi'),
)
LOCALE_PATHS = (
os.path.join(DIRNAME, '../locale')
)
| 26.195652
| 136
| 0.6213
|
from .settings import *
import os
from django.utils.translation import ugettext as _
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
ADMINS = (
('Admin User', 'jamesh@linkinulife.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'xlab_dev',
'USER': 'django',
'PASSWORD': 'xxxxxxxxxxxxx',
'HOST': '127.0.0.1',
'PORT': '3306',
'ATOMIC_REQUESTS': True,
},
'oxdata': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'oxdata',
'USER': 'txservice',
'PASSWORD': 'xxxxxxxxxx,
'HOST': '127.0.0.1',
'PORT': '3306',
'ATOMIC_REQUESTS': True,
},
'grid_space': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'grid_space',
'USER': 'django',
'PASSWORD': 'xxxxxxxxxxxxxx',
'HOST': '127.0.0.1',
'PORT': '3306',
'ATOMIC_REQUESTS': True,
},
'robust_grid': {
'NAME': 'robust_grid',
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'USER': 'opensim',
'PORT': '3306',
'PASSWORD': 'xxxxxxxxxxxxxxxxx',
},
'estates': {
'NAME': 'estates',
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'USER': 'os_estates',
'PORT': '3308',
'PASSWORD': 'xxxxxxxxxxxxxxx',
},
}
DATABASE_ROUTERS = ['grid_db.dbhelper.SpaceRouter',
'grid_db.dbhelper.RobustGridRouter',
'grid_db.dbhelper.EstatesRouter',
#'grid_db.dbhelper.StagingRouter',
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'l$ykc444%f&s8dmqvcpsl29@pj8**3&abg$8+l*%(ad=&z*6jkh'
SITE_NAME = 'Linkinu'
SITE_TITLE = 'Linkinu'
WELCOME_HEADING = 'Linkinu'
WELCOME_TITLE = 'Linkinu'
ACCOUNT_SERVER_ADDRESS = 'http://linkinulife.com'
LOGIN_URL = "/login"
AUTH_USER_MODEL = 'grid_user.User'
AUTH_SERVER_URL = 'http://linkinulife.com:8003'
ACCOUNT_SERVER_URL = 'http://linkinulife.com:8003'
ACCOUNT_ADMIN_EMAIL = 'jamesh@linkinulife.com'
XMLRPC_GATEWAY_IP = '144.76.18.178'
ESTATE_DATABASE = 'estates'
ESTATE_DATASOURCE = 'linkinulife.com'
ESTATE_DATABASE_PORT = '3308'
ESTATE_DATABASE_USER = 'os_estates'
ESTATE_DATABASE_PASSWORD = 'xxxxxxxxxxxxxxxx'
DEFAULT_ESTATE_NAME = "Mainland"
DEFAULT_ESTATE_OWNER_NAME = "Governor LinkniU"
USER_DATA_HOST = "127.0.0.1"
USER_DATA_USERNAME = "root"
USER_DATA_USERPASS = "xxxxxxxxxxxxxxxxxxxxxxxxx"
USER_DATA_DBNAME = "robust_grid"
OXDATA_DATA_HOST = "127.0.0.1"
OXDATA_DATA_USERNAME = "txservice"
OXDATA_DATA_USERPASS = "xxxxxxxxxxxxxxxxxxxx"
OXDATA_DATA_DBNAME = "oxdata"
LANGUAGES = (
('en', U'English'),
('ar', u'Arabic'),
('hi', u'Hindi'),
)
LOCALE_PATHS = (
os.path.join(DIRNAME, '../locale')
)
| false
| true
|
f712e81c27b57e8b397111576bc790c2e0fc749e
| 1,567
|
py
|
Python
|
algorithms_and_data_structures/binary-search.py
|
amgad01/algorithms
|
53eecf06e907cde806d4b78dc78fcd70d0271e3e
|
[
"MIT"
] | 1
|
2021-03-05T18:13:02.000Z
|
2021-03-05T18:13:02.000Z
|
algorithms_and_data_structures/binary-search.py
|
amgad01/algorithms
|
53eecf06e907cde806d4b78dc78fcd70d0271e3e
|
[
"MIT"
] | null | null | null |
algorithms_and_data_structures/binary-search.py
|
amgad01/algorithms
|
53eecf06e907cde806d4b78dc78fcd70d0271e3e
|
[
"MIT"
] | 1
|
2021-07-25T01:55:12.000Z
|
2021-07-25T01:55:12.000Z
|
from util import time_it
# Time complexity: O(N)
@time_it
def linear_search(list1: [], element) -> int:
"""Returns the index of a given element in a given sorted list, otherwise returns -1"""
for index, item in enumerate(list1):
if item == element:
return index
return -1
@time_it
def binary_search(list1: [], element) -> int:
"""Returns the index of a given element in a given sorted list , otherwise returns -1"""
list_length = len(list1)
left = 0
right = list_length - 1
while left <= right:
middle = (left + right) // 2
middle_element = list1[middle]
if element == middle_element:
return middle
if element < middle_element:
right = middle - 1
else:
left = middle + 1
return -1
def binary_search_recursive(list1, element, left=0, right=None):
if right is None:
right = len(list1) - 1
if right < left:
return -1
mid_index = (left + right) // 2
mid_number = list1[mid_index]
if mid_number == element:
return mid_index
if mid_number < element:
left = mid_index + 1
else:
right = mid_index - 1
return binary_search_recursive(list1, element, left, right)
if __name__ == "__main__":
nums = [10, 23, 35, 87, 120, 940]
linear_search([i for i in range(10000001)], 1000000)
binary_search([i for i in range(10000001)], 1000000)
for i in nums:
print( binary_search_recursive(nums, i))
print( binary_search_recursive(nums, i+1))
| 27.017241
| 92
| 0.611997
|
from util import time_it
@time_it
def linear_search(list1: [], element) -> int:
"""Returns the index of a given element in a given sorted list, otherwise returns -1"""
for index, item in enumerate(list1):
if item == element:
return index
return -1
@time_it
def binary_search(list1: [], element) -> int:
"""Returns the index of a given element in a given sorted list , otherwise returns -1"""
list_length = len(list1)
left = 0
right = list_length - 1
while left <= right:
middle = (left + right) // 2
middle_element = list1[middle]
if element == middle_element:
return middle
if element < middle_element:
right = middle - 1
else:
left = middle + 1
return -1
def binary_search_recursive(list1, element, left=0, right=None):
if right is None:
right = len(list1) - 1
if right < left:
return -1
mid_index = (left + right) // 2
mid_number = list1[mid_index]
if mid_number == element:
return mid_index
if mid_number < element:
left = mid_index + 1
else:
right = mid_index - 1
return binary_search_recursive(list1, element, left, right)
if __name__ == "__main__":
nums = [10, 23, 35, 87, 120, 940]
linear_search([i for i in range(10000001)], 1000000)
binary_search([i for i in range(10000001)], 1000000)
for i in nums:
print( binary_search_recursive(nums, i))
print( binary_search_recursive(nums, i+1))
| false
| true
|
f712e8c8aad123e2fecb19d1455f5c0d1ee29767
| 1,768
|
py
|
Python
|
color_format.py
|
among-us-bot/aque-worker
|
662ac8b762af753f1d184b6e8532ef794b6a7a0a
|
[
"MIT"
] | 1
|
2020-11-10T18:21:59.000Z
|
2020-11-10T18:21:59.000Z
|
color_format.py
|
among-us-bot/aque-worker
|
662ac8b762af753f1d184b6e8532ef794b6a7a0a
|
[
"MIT"
] | null | null | null |
color_format.py
|
among-us-bot/aque-worker
|
662ac8b762af753f1d184b6e8532ef794b6a7a0a
|
[
"MIT"
] | null | null | null |
"""
Created by Epic at 10/13/20
Original script by FireDiscordBot on GitHub
"""
import logging
from copy import copy
from logging import Logger, DEBUG
import sys
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def getcolor(color=None):
return COLOR_SEQ % (30 + (color or WHITE))
def formatter_message(message):
for k, v in COLORS.items():
message = message.replace(k, v)
return message
LEVELS = {
"WARNING": YELLOW,
"INFO": GREEN,
"DEBUG": BLUE,
"CRITICAL": YELLOW,
"ERROR": RED
}
COLORS = {
"$GREEN": getcolor(GREEN),
"$BLUE": getcolor(BLUE),
"$RED": getcolor(RED),
"$YELLOW": getcolor(YELLOW),
"$BLACK": getcolor(BLACK),
"$MAGENTA": getcolor(MAGENTA),
"$CYAN": getcolor(CYAN),
"$WHITE": getcolor(WHITE),
"$RESET": RESET_SEQ,
"$BOLD": BOLD_SEQ
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg):
super().__init__(msg)
def format(self, record):
record = copy(record)
levelname = record.levelname
if levelname in LEVELS:
levelname_color = COLOR_SEQ % (30 + LEVELS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
for k, v in COLORS.items():
record.msg = str(record.msg).replace(k, v)
return super().format(record)
def basicConfig(logger: Logger):
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(DEBUG)
color_format = formatter_message(
"[$BOLD%(name)s$RESET][%(levelname)s] %(message)s $RESET($BOLD%(filename)s$RESET:%(lineno)d)")
stdout.setFormatter(ColoredFormatter(color_format))
logger.addHandler(stdout)
| 24.219178
| 102
| 0.636312
|
import logging
from copy import copy
from logging import Logger, DEBUG
import sys
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def getcolor(color=None):
return COLOR_SEQ % (30 + (color or WHITE))
def formatter_message(message):
for k, v in COLORS.items():
message = message.replace(k, v)
return message
LEVELS = {
"WARNING": YELLOW,
"INFO": GREEN,
"DEBUG": BLUE,
"CRITICAL": YELLOW,
"ERROR": RED
}
COLORS = {
"$GREEN": getcolor(GREEN),
"$BLUE": getcolor(BLUE),
"$RED": getcolor(RED),
"$YELLOW": getcolor(YELLOW),
"$BLACK": getcolor(BLACK),
"$MAGENTA": getcolor(MAGENTA),
"$CYAN": getcolor(CYAN),
"$WHITE": getcolor(WHITE),
"$RESET": RESET_SEQ,
"$BOLD": BOLD_SEQ
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg):
super().__init__(msg)
def format(self, record):
record = copy(record)
levelname = record.levelname
if levelname in LEVELS:
levelname_color = COLOR_SEQ % (30 + LEVELS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
for k, v in COLORS.items():
record.msg = str(record.msg).replace(k, v)
return super().format(record)
def basicConfig(logger: Logger):
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(DEBUG)
color_format = formatter_message(
"[$BOLD%(name)s$RESET][%(levelname)s] %(message)s $RESET($BOLD%(filename)s$RESET:%(lineno)d)")
stdout.setFormatter(ColoredFormatter(color_format))
logger.addHandler(stdout)
| true
| true
|
f712e8db6a738c49cf8e7633062bc54f0157a74a
| 713
|
py
|
Python
|
main.py
|
python3f/spectral_clustering
|
bd5900dfa7ada69bd77080b905ef08ea62b420e9
|
[
"MIT"
] | null | null | null |
main.py
|
python3f/spectral_clustering
|
bd5900dfa7ada69bd77080b905ef08ea62b420e9
|
[
"MIT"
] | null | null | null |
main.py
|
python3f/spectral_clustering
|
bd5900dfa7ada69bd77080b905ef08ea62b420e9
|
[
"MIT"
] | null | null | null |
from sklearn.cluster import KMeans
from sklearn.neighbors import kneighbors_graph
from scipy.spatial.distance import pdist, squareform
from scipy.sparse.csgraph import laplacian
import numpy as np
"""Args:
X: input samples, array (num, dim)
n_clusters: no. of clusters
n_neighbours: neighborhood size
Returns:
Y: labels for samples, array (num,)
"""
def spectral_clustering(X, n_clusters=2, n_neighbors=10):
n, d = X.shape
A = kneighbors_graph(X, n_neighbors, mode='connectivity').toarray()
L = laplacian(A, normed=True)
w, v = np.linalg.eig(L)
w, v = w.real, v.real
i = np.argsort(w)
w, v = w[i], v[:,i]
Y = KMeans(n_clusters).fit_predict(v[:,:2])
return Y
| 27.423077
| 71
| 0.683029
|
from sklearn.cluster import KMeans
from sklearn.neighbors import kneighbors_graph
from scipy.spatial.distance import pdist, squareform
from scipy.sparse.csgraph import laplacian
import numpy as np
def spectral_clustering(X, n_clusters=2, n_neighbors=10):
n, d = X.shape
A = kneighbors_graph(X, n_neighbors, mode='connectivity').toarray()
L = laplacian(A, normed=True)
w, v = np.linalg.eig(L)
w, v = w.real, v.real
i = np.argsort(w)
w, v = w[i], v[:,i]
Y = KMeans(n_clusters).fit_predict(v[:,:2])
return Y
| true
| true
|
f712e989579dcf78423172da1b70edb9359eb352
| 1,688
|
py
|
Python
|
set_matrix_zeroes.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | 6
|
2021-05-21T01:10:42.000Z
|
2021-12-16T16:12:30.000Z
|
set_matrix_zeroes.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
set_matrix_zeroes.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
from typing import Callable
class Solution:
def setZeroes(self, matrix: list[list[int]]) -> None:
"""Do not return anything, modify matrix in-place instead."""
first_column_zero = False
for row in matrix:
for j, cell in enumerate(row):
if cell != 0:
continue
row[0] = 0
if j == 0:
first_column_zero = True
else:
matrix[0][j] = 0
for i, row in enumerate(matrix[1:], start=1):
for j, cell in enumerate(row[1:], start=1):
if row[0] == 0:
row[j] = 0
if matrix[0][j] == 0:
matrix[i][j] = 0
# first row check
if matrix[0][0] == 0:
first_row = matrix[0]
for i in range(len(first_row)):
first_row[i] = 0
# first column check
if first_column_zero:
for i in range(len(matrix)):
matrix[i][0] = 0
tests = [
(
([[1, 1, 1], [1, 0, 1], [1, 1, 1]],),
[[1, 0, 1], [0, 0, 0], [1, 0, 1]],
),
(
([[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]],),
[[0, 0, 0, 0], [0, 4, 5, 0], [0, 3, 1, 0]],
),
(
([[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]],),
[[0, 0, 3, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
),
]
def validator(
setZeroes: Callable[[list[list[int]]], None],
inputs: tuple[list[list[int]]],
expected: list[list[int]]
) -> None:
matrix, = inputs
setZeroes(matrix)
assert matrix == expected, (matrix, expected)
| 26.375
| 74
| 0.414692
|
from typing import Callable
class Solution:
def setZeroes(self, matrix: list[list[int]]) -> None:
first_column_zero = False
for row in matrix:
for j, cell in enumerate(row):
if cell != 0:
continue
row[0] = 0
if j == 0:
first_column_zero = True
else:
matrix[0][j] = 0
for i, row in enumerate(matrix[1:], start=1):
for j, cell in enumerate(row[1:], start=1):
if row[0] == 0:
row[j] = 0
if matrix[0][j] == 0:
matrix[i][j] = 0
if matrix[0][0] == 0:
first_row = matrix[0]
for i in range(len(first_row)):
first_row[i] = 0
if first_column_zero:
for i in range(len(matrix)):
matrix[i][0] = 0
tests = [
(
([[1, 1, 1], [1, 0, 1], [1, 1, 1]],),
[[1, 0, 1], [0, 0, 0], [1, 0, 1]],
),
(
([[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]],),
[[0, 0, 0, 0], [0, 4, 5, 0], [0, 3, 1, 0]],
),
(
([[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]],),
[[0, 0, 3, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
),
]
def validator(
setZeroes: Callable[[list[list[int]]], None],
inputs: tuple[list[list[int]]],
expected: list[list[int]]
) -> None:
matrix, = inputs
setZeroes(matrix)
assert matrix == expected, (matrix, expected)
| true
| true
|
f712e99b409c6e441711b6025c241579d0555fa2
| 1,611
|
py
|
Python
|
froide_campaign/migrations/0002_auto_20160123_1454.py
|
okfde/froide-campaign
|
2757888c59dcf89b2b33fba5a4fd517d046aa6cd
|
[
"MIT"
] | 5
|
2016-01-27T19:00:50.000Z
|
2021-11-15T12:23:24.000Z
|
froide_campaign/migrations/0002_auto_20160123_1454.py
|
okfde/froide-campaign
|
2757888c59dcf89b2b33fba5a4fd517d046aa6cd
|
[
"MIT"
] | 2
|
2020-11-02T11:48:44.000Z
|
2020-11-03T15:39:46.000Z
|
froide_campaign/migrations/0002_auto_20160123_1454.py
|
okfde/froide-campaign
|
2757888c59dcf89b2b33fba5a4fd517d046aa6cd
|
[
"MIT"
] | 1
|
2020-10-30T09:20:53.000Z
|
2020-10-30T09:20:53.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('froide_campaign', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='informationobject',
options={'ordering': ('ordering',)},
),
migrations.AddField(
model_name='informationobject',
name='ordering',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='campaign',
name='template',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='context',
field=models.JSONField(blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='documents',
field=models.ManyToManyField(to='foirequest.FoiAttachment', blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='foirequest',
field=models.ForeignKey(blank=True, to='foirequest.FoiRequest', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
migrations.AlterField(
model_name='informationobject',
name='publicbody',
field=models.ForeignKey(blank=True, to='publicbody.PublicBody', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| 32.22
| 133
| 0.599628
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('froide_campaign', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='informationobject',
options={'ordering': ('ordering',)},
),
migrations.AddField(
model_name='informationobject',
name='ordering',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='campaign',
name='template',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='context',
field=models.JSONField(blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='documents',
field=models.ManyToManyField(to='foirequest.FoiAttachment', blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='foirequest',
field=models.ForeignKey(blank=True, to='foirequest.FoiRequest', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
migrations.AlterField(
model_name='informationobject',
name='publicbody',
field=models.ForeignKey(blank=True, to='publicbody.PublicBody', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| true
| true
|
f712ea1efe173795afb893bc3ccf4c0139b4b0b3
| 3,709
|
py
|
Python
|
orbit_transfer/models/__init__.py
|
sinzlab/orbit_transfer
|
812d89af5c7ab26d9ea26766a4250ae023bb20b8
|
[
"MIT"
] | null | null | null |
orbit_transfer/models/__init__.py
|
sinzlab/orbit_transfer
|
812d89af5c7ab26d9ea26766a4250ae023bb20b8
|
[
"MIT"
] | null | null | null |
orbit_transfer/models/__init__.py
|
sinzlab/orbit_transfer
|
812d89af5c7ab26d9ea26766a4250ae023bb20b8
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from torch.hub import load_state_dict_from_url
from nnfabrik.utility.nn_helpers import load_state_dict
from nntransfer.models.resnet import resnet_builder
from nntransfer.models.utils import get_model_parameters
from nntransfer.models.vgg import vgg_builder
from nntransfer.models.lenet import lenet_builder
from nntransfer.models.wrappers import *
from ..configs.model import (
ClassificationModel,
)
from .cnn import cnn_builder
from .group_cnn import gcnn_builder
from .learned_equiv import equiv_builder
from .mlp import mlp_builder
from .vit import vit_builder
def classification_model_builder(data_loader, seed: int, **config):
config = ClassificationModel.from_dict(config)
torch.manual_seed(seed)
np.random.seed(seed)
if "vgg" in config.type:
model = vgg_builder(seed, config)
from torchvision.models.vgg import model_urls
elif "resnet" in config.type:
model = resnet_builder(seed, config)
from torchvision.models.resnet import model_urls
elif "lenet" in config.type:
model = lenet_builder(seed, config)
elif "mlp" in config.type:
model = mlp_builder(seed, config)
elif "vit" in config.type:
model = vit_builder(seed, config)
elif "gcnn" in config.type:
model = gcnn_builder(seed, config)
elif "cnn" in config.type:
model = cnn_builder(seed, config)
elif "equiv_transfer" in config.type:
model = equiv_builder(seed, config)
else:
raise Exception("Unknown type {}".format(config.type))
if config.pretrained:
print("Downloading pretrained model:", flush=True)
url = (
model_urls[config.type]
if not config.pretrained_url
else config.pretrained_url
)
state_dict = load_state_dict_from_url(url, progress=True)
try:
load_state_dict(model, state_dict)
except:
load_state_dict(model, state_dict["model_state_dict"])
print("Model with {} parameters.".format(get_model_parameters(model)))
if config.add_buffer:
for n, p in model.named_parameters():
if p.requires_grad:
n = n.replace(".", "__")
for b in config.add_buffer:
if isinstance(b, str):
model.register_buffer(
f"{n}_{b}",
p.detach().clone().zero_(),
)
else:
k = b[1]
b = b[0]
model.register_buffer(
f"{n}_{b}",
torch.zeros(k, *p.data.shape),
)
if config.add_custom_buffer:
for key, size in config.add_custom_buffer.items():
model.register_buffer(
key,
torch.zeros(size),
)
# Add wrappers
if config.get_intermediate_rep:
model = IntermediateLayerGetter(
model, return_layers=config.get_intermediate_rep, keep_output=True
)
if config.noise_adv_regression or config.noise_adv_classification:
assert not config.self_attention
model = NoiseAdvWrapper(
model,
input_size=model.fc.in_features
if "resnet" in config.type
else model.n_features,
hidden_size=model.fc.in_features if "resnet" in config.type else 4096,
classification=config.noise_adv_classification,
num_noise_readout_layers=config.num_noise_readout_layers,
sigmoid_output=config.noise_sigmoid_output,
)
return model
| 35.663462
| 82
| 0.619035
|
import torch
import numpy as np
from torch.hub import load_state_dict_from_url
from nnfabrik.utility.nn_helpers import load_state_dict
from nntransfer.models.resnet import resnet_builder
from nntransfer.models.utils import get_model_parameters
from nntransfer.models.vgg import vgg_builder
from nntransfer.models.lenet import lenet_builder
from nntransfer.models.wrappers import *
from ..configs.model import (
ClassificationModel,
)
from .cnn import cnn_builder
from .group_cnn import gcnn_builder
from .learned_equiv import equiv_builder
from .mlp import mlp_builder
from .vit import vit_builder
def classification_model_builder(data_loader, seed: int, **config):
config = ClassificationModel.from_dict(config)
torch.manual_seed(seed)
np.random.seed(seed)
if "vgg" in config.type:
model = vgg_builder(seed, config)
from torchvision.models.vgg import model_urls
elif "resnet" in config.type:
model = resnet_builder(seed, config)
from torchvision.models.resnet import model_urls
elif "lenet" in config.type:
model = lenet_builder(seed, config)
elif "mlp" in config.type:
model = mlp_builder(seed, config)
elif "vit" in config.type:
model = vit_builder(seed, config)
elif "gcnn" in config.type:
model = gcnn_builder(seed, config)
elif "cnn" in config.type:
model = cnn_builder(seed, config)
elif "equiv_transfer" in config.type:
model = equiv_builder(seed, config)
else:
raise Exception("Unknown type {}".format(config.type))
if config.pretrained:
print("Downloading pretrained model:", flush=True)
url = (
model_urls[config.type]
if not config.pretrained_url
else config.pretrained_url
)
state_dict = load_state_dict_from_url(url, progress=True)
try:
load_state_dict(model, state_dict)
except:
load_state_dict(model, state_dict["model_state_dict"])
print("Model with {} parameters.".format(get_model_parameters(model)))
if config.add_buffer:
for n, p in model.named_parameters():
if p.requires_grad:
n = n.replace(".", "__")
for b in config.add_buffer:
if isinstance(b, str):
model.register_buffer(
f"{n}_{b}",
p.detach().clone().zero_(),
)
else:
k = b[1]
b = b[0]
model.register_buffer(
f"{n}_{b}",
torch.zeros(k, *p.data.shape),
)
if config.add_custom_buffer:
for key, size in config.add_custom_buffer.items():
model.register_buffer(
key,
torch.zeros(size),
)
if config.get_intermediate_rep:
model = IntermediateLayerGetter(
model, return_layers=config.get_intermediate_rep, keep_output=True
)
if config.noise_adv_regression or config.noise_adv_classification:
assert not config.self_attention
model = NoiseAdvWrapper(
model,
input_size=model.fc.in_features
if "resnet" in config.type
else model.n_features,
hidden_size=model.fc.in_features if "resnet" in config.type else 4096,
classification=config.noise_adv_classification,
num_noise_readout_layers=config.num_noise_readout_layers,
sigmoid_output=config.noise_sigmoid_output,
)
return model
| true
| true
|
f712ea94c86b298e1e59b372d543565e614ba7a8
| 18,505
|
py
|
Python
|
letsencrypt/tests/client_test.py
|
Alearranat/letsencrypt
|
e09398c9a8d11d4449ad8639472f6e212291412a
|
[
"Apache-2.0"
] | null | null | null |
letsencrypt/tests/client_test.py
|
Alearranat/letsencrypt
|
e09398c9a8d11d4449ad8639472f6e212291412a
|
[
"Apache-2.0"
] | null | null | null |
letsencrypt/tests/client_test.py
|
Alearranat/letsencrypt
|
e09398c9a8d11d4449ad8639472f6e212291412a
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for letsencrypt.client."""
import os
import shutil
import tempfile
import unittest
import OpenSSL
import mock
from acme import jose
from letsencrypt import account
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt.tests import test_util
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san.der")
class ConfigHelper(object):
"""Creates a dummy object to imitate a namespace object
Example: cfg = ConfigHelper(redirect=True, hsts=False, uir=False)
will result in: cfg.redirect=True, cfg.hsts=False, etc.
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
class RegisterTest(unittest.TestCase):
"""Tests for letsencrypt.client.register."""
def setUp(self):
self.config = mock.MagicMock(rsa_key_size=1024, register_unsafely_without_email=False)
self.account_storage = account.AccountMemoryStorage()
self.tos_cb = mock.MagicMock()
def _call(self):
from letsencrypt.client import register
return register(self.config, self.account_storage, self.tos_cb)
def test_no_tos(self):
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client.register().terms_of_service = "http://tos"
with mock.patch("letsencrypt.account.report_new_account"):
self.tos_cb.return_value = False
self.assertRaises(errors.Error, self._call)
self.tos_cb.return_value = True
self._call()
self.tos_cb = None
self._call()
def test_it(self):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account.report_new_account"):
self._call()
@mock.patch("letsencrypt.account.report_new_account")
@mock.patch("letsencrypt.client.display_ops.get_email")
def test_email_retry(self, _rep, mock_get_email):
from acme import messages
msg = "Validation of contact mailto:sousaphone@improbablylongggstring.tld failed"
mx_err = messages.Error(detail=msg, typ="malformed", title="title")
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client().register.side_effect = [mx_err, mock.MagicMock()]
self._call()
self.assertEqual(mock_get_email.call_count, 1)
def test_needs_email(self):
self.config.email = None
self.assertRaises(errors.Error, self._call)
@mock.patch("letsencrypt.client.logger")
def test_without_email(self, mock_logger):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account.report_new_account"):
self.config.email = None
self.config.register_unsafely_without_email = True
self._call()
mock_logger.warn.assert_called_once_with(mock.ANY)
class ClientTest(unittest.TestCase):
"""Tests for letsencrypt.client.Client."""
def setUp(self):
self.config = mock.MagicMock(
no_verify_ssl=False, config_dir="/etc/letsencrypt")
# pylint: disable=star-args
self.account = mock.MagicMock(**{"key.pem": KEY})
self.eg_domains = ["example.com", "www.example.com"]
from letsencrypt.client import Client
with mock.patch("letsencrypt.client.acme_client.Client") as acme:
self.acme_client = acme
self.acme = acme.return_value = mock.MagicMock()
self.client = Client(
config=self.config, account_=self.account,
dv_auth=None, installer=None)
def test_init_acme_verify_ssl(self):
net = self.acme_client.call_args[1]["net"]
self.assertTrue(net.verify_ssl)
def _mock_obtain_certificate(self):
self.client.auth_handler = mock.MagicMock()
self.acme.request_issuance.return_value = mock.sentinel.certr
self.acme.fetch_chain.return_value = mock.sentinel.chain
def _check_obtain_certificate(self):
self.client.auth_handler.get_authorizations.assert_called_once_with(self.eg_domains)
self.acme.request_issuance.assert_called_once_with(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, CSR_SAN)),
self.client.auth_handler.get_authorizations())
self.acme.fetch_chain.assert_called_once_with(mock.sentinel.certr)
# FIXME move parts of this to test_cli.py...
@mock.patch("letsencrypt.cli._process_domain")
def test_obtain_certificate_from_csr(self, mock_process_domain):
self._mock_obtain_certificate()
from letsencrypt import cli
test_csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_parsed_args = mock.MagicMock()
with mock.patch("letsencrypt.client.le_util.CSR") as mock_CSR:
mock_CSR.return_value = test_csr
mock_parsed_args.domains = self.eg_domains[:]
mock_parser = mock.MagicMock(cli.HelpfulArgumentParser)
cli.HelpfulArgumentParser.handle_csr(mock_parser, mock_parsed_args)
# make sure cli processing occurred
cli_processed = (call[0][1] for call in mock_process_domain.call_args_list)
self.assertEqual(set(cli_processed), set(("example.com", "www.example.com")))
# Now provoke an inconsistent domains error...
mock_parsed_args.domains.append("hippopotamus.io")
self.assertRaises(errors.ConfigurationError,
cli.HelpfulArgumentParser.handle_csr, mock_parser, mock_parsed_args)
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(self.eg_domains, test_csr))
# and that the cert was obtained correctly
self._check_obtain_certificate()
@mock.patch("letsencrypt.client.crypto_util")
def test_obtain_certificate(self, mock_crypto_util):
self._mock_obtain_certificate()
csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_crypto_util.init_save_csr.return_value = csr
mock_crypto_util.init_save_key.return_value = mock.sentinel.key
domains = ["example.com", "www.example.com"]
self.assertEqual(
self.client.obtain_certificate(domains),
(mock.sentinel.certr, mock.sentinel.chain, mock.sentinel.key, csr))
mock_crypto_util.init_save_key.assert_called_once_with(
self.config.rsa_key_size, self.config.key_dir)
mock_crypto_util.init_save_csr.assert_called_once_with(
mock.sentinel.key, domains, self.config.csr_dir)
self._check_obtain_certificate()
def test_save_certificate(self):
certs = ["matching_cert.pem", "cert.pem", "cert-san.pem"]
tmp_path = tempfile.mkdtemp()
os.chmod(tmp_path, 0o755) # TODO: really??
certr = mock.MagicMock(body=test_util.load_comparable_cert(certs[0]))
chain_cert = [test_util.load_comparable_cert(certs[1]),
test_util.load_comparable_cert(certs[2])]
candidate_cert_path = os.path.join(tmp_path, "certs", "cert.pem")
candidate_chain_path = os.path.join(tmp_path, "chains", "chain.pem")
candidate_fullchain_path = os.path.join(tmp_path, "chains", "fullchain.pem")
cert_path, chain_path, fullchain_path = self.client.save_certificate(
certr, chain_cert, candidate_cert_path, candidate_chain_path,
candidate_fullchain_path)
self.assertEqual(os.path.dirname(cert_path),
os.path.dirname(candidate_cert_path))
self.assertEqual(os.path.dirname(chain_path),
os.path.dirname(candidate_chain_path))
self.assertEqual(os.path.dirname(fullchain_path),
os.path.dirname(candidate_fullchain_path))
with open(cert_path, "r") as cert_file:
cert_contents = cert_file.read()
self.assertEqual(cert_contents, test_util.load_vector(certs[0]))
with open(chain_path, "r") as chain_file:
chain_contents = chain_file.read()
self.assertEqual(chain_contents, test_util.load_vector(certs[1]) +
test_util.load_vector(certs[2]))
shutil.rmtree(tmp_path)
def test_deploy_certificate_success(self):
self.assertRaises(errors.Error, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer = mock.MagicMock()
self.client.installer = installer
self.client.deploy_certificate(
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.deploy_cert.assert_called_once_with(
cert_path=os.path.abspath("cert"),
chain_path=os.path.abspath("chain"),
domain='foo.bar',
fullchain_path='fullchain',
key_path=os.path.abspath("key"))
self.assertEqual(installer.save.call_count, 2)
installer.restart.assert_called_once_with()
def test_deploy_certificate_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.deploy_cert.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
def test_deploy_certificate_save_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.save.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = [errors.PluginError, None]
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure2(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_once_with("foo.bar", "redirect", None)
self.assertEqual(installer.save.call_count, 1)
installer.restart.assert_called_once_with()
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_no_ask(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect", "ensure-http-header"]
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "redirect", None)
config = ConfigHelper(redirect=False, hsts=True, uir=False)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Strict-Transport-Security")
config = ConfigHelper(redirect=False, hsts=False, uir=True)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertEqual(installer.save.call_count, 3)
self.assertEqual(installer.restart.call_count, 3)
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_unsupported(self, mock_enhancements):
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = []
config = ConfigHelper(redirect=None, hsts=True, uir=True)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_not_called()
mock_enhancements.ask.assert_not_called()
def test_enhance_config_no_installer(self):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_enhance_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.enhance.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_save_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.save.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_restart_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = [errors.PluginError, None]
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_restart_failure2(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
class RollbackTest(unittest.TestCase):
"""Tests for letsencrypt.client.rollback."""
def setUp(self):
self.m_install = mock.MagicMock()
@classmethod
def _call(cls, checkpoints, side_effect):
from letsencrypt.client import rollback
with mock.patch("letsencrypt.client"
".display_ops.pick_installer") as mock_pick_installer:
mock_pick_installer.side_effect = side_effect
rollback(None, checkpoints, {}, mock.MagicMock())
def test_no_problems(self):
self._call(1, self.m_install)
self.assertEqual(self.m_install().rollback_checkpoints.call_count, 1)
self.assertEqual(self.m_install().restart.call_count, 1)
def test_no_installer(self):
self._call(1, None) # Just make sure no exceptions are raised
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 43.954869
| 94
| 0.675277
|
import os
import shutil
import tempfile
import unittest
import OpenSSL
import mock
from acme import jose
from letsencrypt import account
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt.tests import test_util
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san.der")
class ConfigHelper(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
class RegisterTest(unittest.TestCase):
def setUp(self):
self.config = mock.MagicMock(rsa_key_size=1024, register_unsafely_without_email=False)
self.account_storage = account.AccountMemoryStorage()
self.tos_cb = mock.MagicMock()
def _call(self):
from letsencrypt.client import register
return register(self.config, self.account_storage, self.tos_cb)
def test_no_tos(self):
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client.register().terms_of_service = "http://tos"
with mock.patch("letsencrypt.account.report_new_account"):
self.tos_cb.return_value = False
self.assertRaises(errors.Error, self._call)
self.tos_cb.return_value = True
self._call()
self.tos_cb = None
self._call()
def test_it(self):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account.report_new_account"):
self._call()
@mock.patch("letsencrypt.account.report_new_account")
@mock.patch("letsencrypt.client.display_ops.get_email")
def test_email_retry(self, _rep, mock_get_email):
from acme import messages
msg = "Validation of contact mailto:sousaphone@improbablylongggstring.tld failed"
mx_err = messages.Error(detail=msg, typ="malformed", title="title")
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client().register.side_effect = [mx_err, mock.MagicMock()]
self._call()
self.assertEqual(mock_get_email.call_count, 1)
def test_needs_email(self):
self.config.email = None
self.assertRaises(errors.Error, self._call)
@mock.patch("letsencrypt.client.logger")
def test_without_email(self, mock_logger):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account.report_new_account"):
self.config.email = None
self.config.register_unsafely_without_email = True
self._call()
mock_logger.warn.assert_called_once_with(mock.ANY)
class ClientTest(unittest.TestCase):
def setUp(self):
self.config = mock.MagicMock(
no_verify_ssl=False, config_dir="/etc/letsencrypt")
self.account = mock.MagicMock(**{"key.pem": KEY})
self.eg_domains = ["example.com", "www.example.com"]
from letsencrypt.client import Client
with mock.patch("letsencrypt.client.acme_client.Client") as acme:
self.acme_client = acme
self.acme = acme.return_value = mock.MagicMock()
self.client = Client(
config=self.config, account_=self.account,
dv_auth=None, installer=None)
def test_init_acme_verify_ssl(self):
net = self.acme_client.call_args[1]["net"]
self.assertTrue(net.verify_ssl)
def _mock_obtain_certificate(self):
self.client.auth_handler = mock.MagicMock()
self.acme.request_issuance.return_value = mock.sentinel.certr
self.acme.fetch_chain.return_value = mock.sentinel.chain
def _check_obtain_certificate(self):
self.client.auth_handler.get_authorizations.assert_called_once_with(self.eg_domains)
self.acme.request_issuance.assert_called_once_with(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, CSR_SAN)),
self.client.auth_handler.get_authorizations())
self.acme.fetch_chain.assert_called_once_with(mock.sentinel.certr)
@mock.patch("letsencrypt.cli._process_domain")
def test_obtain_certificate_from_csr(self, mock_process_domain):
self._mock_obtain_certificate()
from letsencrypt import cli
test_csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_parsed_args = mock.MagicMock()
with mock.patch("letsencrypt.client.le_util.CSR") as mock_CSR:
mock_CSR.return_value = test_csr
mock_parsed_args.domains = self.eg_domains[:]
mock_parser = mock.MagicMock(cli.HelpfulArgumentParser)
cli.HelpfulArgumentParser.handle_csr(mock_parser, mock_parsed_args)
cli_processed = (call[0][1] for call in mock_process_domain.call_args_list)
self.assertEqual(set(cli_processed), set(("example.com", "www.example.com")))
mock_parsed_args.domains.append("hippopotamus.io")
self.assertRaises(errors.ConfigurationError,
cli.HelpfulArgumentParser.handle_csr, mock_parser, mock_parsed_args)
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(self.eg_domains, test_csr))
self._check_obtain_certificate()
@mock.patch("letsencrypt.client.crypto_util")
def test_obtain_certificate(self, mock_crypto_util):
self._mock_obtain_certificate()
csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_crypto_util.init_save_csr.return_value = csr
mock_crypto_util.init_save_key.return_value = mock.sentinel.key
domains = ["example.com", "www.example.com"]
self.assertEqual(
self.client.obtain_certificate(domains),
(mock.sentinel.certr, mock.sentinel.chain, mock.sentinel.key, csr))
mock_crypto_util.init_save_key.assert_called_once_with(
self.config.rsa_key_size, self.config.key_dir)
mock_crypto_util.init_save_csr.assert_called_once_with(
mock.sentinel.key, domains, self.config.csr_dir)
self._check_obtain_certificate()
def test_save_certificate(self):
certs = ["matching_cert.pem", "cert.pem", "cert-san.pem"]
tmp_path = tempfile.mkdtemp()
os.chmod(tmp_path, 0o755)
certr = mock.MagicMock(body=test_util.load_comparable_cert(certs[0]))
chain_cert = [test_util.load_comparable_cert(certs[1]),
test_util.load_comparable_cert(certs[2])]
candidate_cert_path = os.path.join(tmp_path, "certs", "cert.pem")
candidate_chain_path = os.path.join(tmp_path, "chains", "chain.pem")
candidate_fullchain_path = os.path.join(tmp_path, "chains", "fullchain.pem")
cert_path, chain_path, fullchain_path = self.client.save_certificate(
certr, chain_cert, candidate_cert_path, candidate_chain_path,
candidate_fullchain_path)
self.assertEqual(os.path.dirname(cert_path),
os.path.dirname(candidate_cert_path))
self.assertEqual(os.path.dirname(chain_path),
os.path.dirname(candidate_chain_path))
self.assertEqual(os.path.dirname(fullchain_path),
os.path.dirname(candidate_fullchain_path))
with open(cert_path, "r") as cert_file:
cert_contents = cert_file.read()
self.assertEqual(cert_contents, test_util.load_vector(certs[0]))
with open(chain_path, "r") as chain_file:
chain_contents = chain_file.read()
self.assertEqual(chain_contents, test_util.load_vector(certs[1]) +
test_util.load_vector(certs[2]))
shutil.rmtree(tmp_path)
def test_deploy_certificate_success(self):
self.assertRaises(errors.Error, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer = mock.MagicMock()
self.client.installer = installer
self.client.deploy_certificate(
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.deploy_cert.assert_called_once_with(
cert_path=os.path.abspath("cert"),
chain_path=os.path.abspath("chain"),
domain='foo.bar',
fullchain_path='fullchain',
key_path=os.path.abspath("key"))
self.assertEqual(installer.save.call_count, 2)
installer.restart.assert_called_once_with()
def test_deploy_certificate_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.deploy_cert.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
def test_deploy_certificate_save_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.save.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = [errors.PluginError, None]
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure2(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_once_with("foo.bar", "redirect", None)
self.assertEqual(installer.save.call_count, 1)
installer.restart.assert_called_once_with()
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_no_ask(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect", "ensure-http-header"]
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "redirect", None)
config = ConfigHelper(redirect=False, hsts=True, uir=False)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Strict-Transport-Security")
config = ConfigHelper(redirect=False, hsts=False, uir=True)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertEqual(installer.save.call_count, 3)
self.assertEqual(installer.restart.call_count, 3)
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_unsupported(self, mock_enhancements):
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = []
config = ConfigHelper(redirect=None, hsts=True, uir=True)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_not_called()
mock_enhancements.ask.assert_not_called()
def test_enhance_config_no_installer(self):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_enhance_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.enhance.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_save_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.save.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_restart_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = [errors.PluginError, None]
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_restart_failure2(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
class RollbackTest(unittest.TestCase):
def setUp(self):
self.m_install = mock.MagicMock()
@classmethod
def _call(cls, checkpoints, side_effect):
from letsencrypt.client import rollback
with mock.patch("letsencrypt.client"
".display_ops.pick_installer") as mock_pick_installer:
mock_pick_installer.side_effect = side_effect
rollback(None, checkpoints, {}, mock.MagicMock())
def test_no_problems(self):
self._call(1, self.m_install)
self.assertEqual(self.m_install().rollback_checkpoints.call_count, 1)
self.assertEqual(self.m_install().restart.call_count, 1)
def test_no_installer(self):
self._call(1, None)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f712ec4ae2274d14604fa9d454afd175219c4e6f
| 2,981
|
py
|
Python
|
test/language/array_types/python/SubtypedBuiltinAutoArrayTest.py
|
Klebert-Engineering/zserio-1
|
fbb4fc42d9ab6f3afa6c040a36267357399180f4
|
[
"BSD-3-Clause"
] | 2
|
2019-02-06T17:50:24.000Z
|
2019-11-20T16:51:34.000Z
|
test/language/array_types/python/SubtypedBuiltinAutoArrayTest.py
|
Klebert-Engineering/zserio-1
|
fbb4fc42d9ab6f3afa6c040a36267357399180f4
|
[
"BSD-3-Clause"
] | 1
|
2019-11-25T16:25:51.000Z
|
2019-11-25T18:09:39.000Z
|
test/language/array_types/python/SubtypedBuiltinAutoArrayTest.py
|
Klebert-Engineering/zserio-1
|
fbb4fc42d9ab6f3afa6c040a36267357399180f4
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import zserio
from testutils import getZserioApi
class AutoArrayTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "array_types.zs").subtyped_builtin_auto_array
def testBitSizeOfLength1(self):
self._checkBitSizeOf(self.AUTO_ARRAY_LENGTH1)
def testBitSizeOfLength2(self):
self._checkBitSizeOf(self.AUTO_ARRAY_LENGTH2)
def testInitializeOffsetsLength1(self):
self._checkInitializeOffsets(self.AUTO_ARRAY_LENGTH1)
def testInitializeOffsetsLength2(self):
self._checkInitializeOffsets(self.AUTO_ARRAY_LENGTH2)
def testReadLength1(self):
self._checkRead(self.AUTO_ARRAY_LENGTH1)
def testReadLength2(self):
self._checkRead(self.AUTO_ARRAY_LENGTH2)
def testWriteLength1(self):
self._checkWrite(self.AUTO_ARRAY_LENGTH1)
def testWriteLength2(self):
self._checkWrite(self.AUTO_ARRAY_LENGTH2)
def _checkBitSizeOf(self, numElements):
array = list(range(numElements))
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromFields(array)
bitPosition = 2
autoArrayBitSize = 8 + numElements * 8
self.assertEqual(autoArrayBitSize, subtypedBuiltinAutoArray.bitSizeOf(bitPosition))
def _checkInitializeOffsets(self, numElements):
array = list(range(numElements))
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromFields(array)
bitPosition = 2
expectedEndBitPosition = bitPosition + 8 + numElements * 8
self.assertEqual(expectedEndBitPosition, subtypedBuiltinAutoArray.initializeOffsets(bitPosition))
def _checkRead(self, numElements):
writer = zserio.BitStreamWriter()
AutoArrayTest._writeAutoArrayToStream(writer, numElements)
reader = zserio.BitStreamReader(writer.getByteArray())
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromReader(reader)
array = subtypedBuiltinAutoArray.getArray()
self.assertEqual(numElements, len(array))
for i in range(numElements):
self.assertEqual(i, array[i])
def _checkWrite(self, numElements):
array = list(range(numElements))
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromFields(array)
writer = zserio.BitStreamWriter()
subtypedBuiltinAutoArray.write(writer)
reader = zserio.BitStreamReader(writer.getByteArray())
readAutoArray = self.api.SubtypedBuiltinAutoArray.fromReader(reader)
readArray = readAutoArray.getArray()
self.assertEqual(numElements, len(readArray))
for i in range(numElements):
self.assertEqual(i, readArray[i])
@staticmethod
def _writeAutoArrayToStream(writer, numElements):
writer.writeVarUInt64(numElements)
for i in range(numElements):
writer.writeBits(i, 8)
AUTO_ARRAY_LENGTH1 = 5
AUTO_ARRAY_LENGTH2 = 10
| 36.802469
| 105
| 0.724925
|
import unittest
import zserio
from testutils import getZserioApi
class AutoArrayTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "array_types.zs").subtyped_builtin_auto_array
def testBitSizeOfLength1(self):
self._checkBitSizeOf(self.AUTO_ARRAY_LENGTH1)
def testBitSizeOfLength2(self):
self._checkBitSizeOf(self.AUTO_ARRAY_LENGTH2)
def testInitializeOffsetsLength1(self):
self._checkInitializeOffsets(self.AUTO_ARRAY_LENGTH1)
def testInitializeOffsetsLength2(self):
self._checkInitializeOffsets(self.AUTO_ARRAY_LENGTH2)
def testReadLength1(self):
self._checkRead(self.AUTO_ARRAY_LENGTH1)
def testReadLength2(self):
self._checkRead(self.AUTO_ARRAY_LENGTH2)
def testWriteLength1(self):
self._checkWrite(self.AUTO_ARRAY_LENGTH1)
def testWriteLength2(self):
self._checkWrite(self.AUTO_ARRAY_LENGTH2)
def _checkBitSizeOf(self, numElements):
array = list(range(numElements))
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromFields(array)
bitPosition = 2
autoArrayBitSize = 8 + numElements * 8
self.assertEqual(autoArrayBitSize, subtypedBuiltinAutoArray.bitSizeOf(bitPosition))
def _checkInitializeOffsets(self, numElements):
array = list(range(numElements))
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromFields(array)
bitPosition = 2
expectedEndBitPosition = bitPosition + 8 + numElements * 8
self.assertEqual(expectedEndBitPosition, subtypedBuiltinAutoArray.initializeOffsets(bitPosition))
def _checkRead(self, numElements):
writer = zserio.BitStreamWriter()
AutoArrayTest._writeAutoArrayToStream(writer, numElements)
reader = zserio.BitStreamReader(writer.getByteArray())
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromReader(reader)
array = subtypedBuiltinAutoArray.getArray()
self.assertEqual(numElements, len(array))
for i in range(numElements):
self.assertEqual(i, array[i])
def _checkWrite(self, numElements):
array = list(range(numElements))
subtypedBuiltinAutoArray = self.api.SubtypedBuiltinAutoArray.fromFields(array)
writer = zserio.BitStreamWriter()
subtypedBuiltinAutoArray.write(writer)
reader = zserio.BitStreamReader(writer.getByteArray())
readAutoArray = self.api.SubtypedBuiltinAutoArray.fromReader(reader)
readArray = readAutoArray.getArray()
self.assertEqual(numElements, len(readArray))
for i in range(numElements):
self.assertEqual(i, readArray[i])
@staticmethod
def _writeAutoArrayToStream(writer, numElements):
writer.writeVarUInt64(numElements)
for i in range(numElements):
writer.writeBits(i, 8)
AUTO_ARRAY_LENGTH1 = 5
AUTO_ARRAY_LENGTH2 = 10
| true
| true
|
f712edc8a873297e7c4aff9875cda2f07f1f055b
| 1,399
|
py
|
Python
|
Configs/dm.py
|
KoshikKumar17/ilovepdf
|
c0a05787aa9573e3ead2e791b17bd3af4a386d6b
|
[
"Apache-2.0"
] | null | null | null |
Configs/dm.py
|
KoshikKumar17/ilovepdf
|
c0a05787aa9573e3ead2e791b17bd3af4a386d6b
|
[
"Apache-2.0"
] | null | null | null |
Configs/dm.py
|
KoshikKumar17/ilovepdf
|
c0a05787aa9573e3ead2e791b17bd3af4a386d6b
|
[
"Apache-2.0"
] | null | null | null |
# fileName: Configs/dm.py
# copyright ©️ 2021 nabilanavab
import os
#--------------->
#--------> CONFIG VAR.
#------------------->
class Config(object):
# get API_ID, API_HASH values from my.telegram.org (Mandatory)
API_ID = os.environ.get("API_ID")
API_HASH = os.environ.get("API_HASH")
# add API_TOKEN from @botfather (Mandatory)
API_TOKEN = os.environ.get("API_TOKEN")
# channel id for forced Subscription with -100 (Optional)
UPDATE_CHANNEL = os.environ.get("UPDATE_CHANNEL")
# get convertAPI secret (Optional)
CONVERT_API = os.environ.get("CONVERT_API")
# set maximum file size for preventing overload (Optional)
MAX_FILE_SIZE = os.environ.get("MAX_FILE_SIZE")
# add admins Id list by space seperated (Optional)
ADMINS = list(set(int(x) for x in os.environ.get("ADMINS", "0").split()))
if ADMINS:
# Bot only for admins [True/False] (Optional)
ADMIN_ONLY = os.environ.get("ADMIN_ONLY", False)
# banned Users cant use this bot (Optional)
BANNED_USERS = list(set(int(x) for x in os.environ.get("BANNED_USERS", "0").split()))
if not BANNED_USERS:
BANNED_USERS = []
# thumbnail
PDF_THUMBNAIL = "./thumbnail.jpeg"
# Telegram: @nabilanavab
| 26.903846
| 100
| 0.580415
|
import os
class Config(object):
API_ID = os.environ.get("API_ID")
API_HASH = os.environ.get("API_HASH")
API_TOKEN = os.environ.get("API_TOKEN")
UPDATE_CHANNEL = os.environ.get("UPDATE_CHANNEL")
CONVERT_API = os.environ.get("CONVERT_API")
MAX_FILE_SIZE = os.environ.get("MAX_FILE_SIZE")
ADMINS = list(set(int(x) for x in os.environ.get("ADMINS", "0").split()))
if ADMINS:
ADMIN_ONLY = os.environ.get("ADMIN_ONLY", False)
BANNED_USERS = list(set(int(x) for x in os.environ.get("BANNED_USERS", "0").split()))
if not BANNED_USERS:
BANNED_USERS = []
PDF_THUMBNAIL = "./thumbnail.jpeg"
| true
| true
|
f712ee136bc001e5c792d4c74aba2a21258e8d8c
| 788
|
py
|
Python
|
python/quadrotorModel/rb_dynamics.py
|
smallpondtom/quadCaptureSim
|
b0433e353da9bd1f9a6a04a7cf3eeda73bd52019
|
[
"MIT"
] | null | null | null |
python/quadrotorModel/rb_dynamics.py
|
smallpondtom/quadCaptureSim
|
b0433e353da9bd1f9a6a04a7cf3eeda73bd52019
|
[
"MIT"
] | null | null | null |
python/quadrotorModel/rb_dynamics.py
|
smallpondtom/quadCaptureSim
|
b0433e353da9bd1f9a6a04a7cf3eeda73bd52019
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# *****************************************************************************
#
# Copyright (c) 2021
# Georgia Institute of Technology
# Tomoki Koike
# <tkoike3@gatech.edu>
#
# *****************************************************************************
#
# DESCRIPTION:
# Rigid body dynamics model of a quadrotor.
#
# LAST EDITED:
# 10-26-2021
#
# *****************************************************************************
# Modules *********************************************************************
from dataclasses import dataclass
import constants
import numpy as np
import utils
# Class
@dataclass(order=False, frozen=False)
class RBDYNAMICS:
m: float # mass of the quadrotor
I: np.ndarray # constant inertia matrix R[3x3]
small: int
| 22.514286
| 79
| 0.427665
|
from dataclasses import dataclass
import constants
import numpy as np
import utils
@dataclass(order=False, frozen=False)
class RBDYNAMICS:
m: float
I: np.ndarray
small: int
| true
| true
|
f712ee5300d01cf580380c25fb563ff27ed947d0
| 227
|
py
|
Python
|
pandoc/filter/python/FirstFilter/myfilter.py
|
mikoto2000/MiscellaneousStudy
|
3717edf599fea2cf3a63bd9599ca4e1ddfdd10a6
|
[
"MIT"
] | 3
|
2015-06-23T03:09:54.000Z
|
2019-03-27T09:10:13.000Z
|
pandoc/filter/python/FirstFilter/myfilter.py
|
mikoto2000/MiscellaneousStudy
|
3717edf599fea2cf3a63bd9599ca4e1ddfdd10a6
|
[
"MIT"
] | 10
|
2017-05-18T16:19:45.000Z
|
2022-02-26T06:16:43.000Z
|
pandoc/filter/python/FirstFilter/myfilter.py
|
mikoto2000/MiscellaneousStudy
|
3717edf599fea2cf3a63bd9599ca4e1ddfdd10a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
from pandocfilters import toJSONFilter, Link, Str
def myfilter(key, value, form, meta):
if key == 'Link':
return Str("replaced_text")
if __name__ == "__main__":
toJSONFilter(myfilter)
| 18.916667
| 49
| 0.651982
|
from pandocfilters import toJSONFilter, Link, Str
def myfilter(key, value, form, meta):
if key == 'Link':
return Str("replaced_text")
if __name__ == "__main__":
toJSONFilter(myfilter)
| true
| true
|
f712ee6bfe30e8b52429864f41f6b4669282abe5
| 1,352
|
wsgi
|
Python
|
trac/web/templates/deploy_trac.wsgi
|
rwbaumg/trac
|
a3b8eb6db4f4999fab421e31615bb8eb8da6fdba
|
[
"BSD-3-Clause"
] | null | null | null |
trac/web/templates/deploy_trac.wsgi
|
rwbaumg/trac
|
a3b8eb6db4f4999fab421e31615bb8eb8da6fdba
|
[
"BSD-3-Clause"
] | null | null | null |
trac/web/templates/deploy_trac.wsgi
|
rwbaumg/trac
|
a3b8eb6db4f4999fab421e31615bb8eb8da6fdba
|
[
"BSD-3-Clause"
] | null | null | null |
{##}#!${executable}
{##}# -*- coding: utf-8 -*-
{##}#
{##}# Copyright (C)2008-2009 Edgewall Software
{##}# Copyright (C) 2008 Noah Kantrowitz <noah@coderanger.net>
{##}# All rights reserved.
{##}#
{##}# This software is licensed as described in the file COPYING, which
{##}# you should have received as part of this distribution. The terms
{##}# are also available at http://trac.edgewall.org/wiki/TracLicense.
{##}#
{##}# This software consists of voluntary contributions made by many
{##}# individuals. For the exact contribution history, see the revision
{##}# history and logs, available at http://trac.edgewall.org/log/.
{##}#
{##}# Author: Noah Kantrowitz <noah@coderanger.net>
import os
def application(environ, start_request):
if not 'trac.env_parent_dir' in environ:
environ.setdefault('trac.env_path', ${repr(env.path)})
if 'PYTHON_EGG_CACHE' in environ:
os.environ['PYTHON_EGG_CACHE'] = environ['PYTHON_EGG_CACHE']
elif 'trac.env_path' in environ:
os.environ['PYTHON_EGG_CACHE'] = \
os.path.join(environ['trac.env_path'], '.egg-cache')
elif 'trac.env_parent_dir' in environ:
os.environ['PYTHON_EGG_CACHE'] = \
os.path.join(environ['trac.env_parent_dir'], '.egg-cache')
from trac.web.main import dispatch_request
return dispatch_request(environ, start_request)
| 42.25
| 71
| 0.681953
|
{
| false
| true
|
f712eebd0eb39e575762499a0ae3dc87e97345a0
| 20,474
|
py
|
Python
|
lib/parser.py
|
Harshalszz/Pansy
|
07b7072994374fb0c220230fa5e37ba359e7f4b8
|
[
"MIT"
] | null | null | null |
lib/parser.py
|
Harshalszz/Pansy
|
07b7072994374fb0c220230fa5e37ba359e7f4b8
|
[
"MIT"
] | null | null | null |
lib/parser.py
|
Harshalszz/Pansy
|
07b7072994374fb0c220230fa5e37ba359e7f4b8
|
[
"MIT"
] | null | null | null |
from lib.utils import token, nodes
from lib import errors
#######################################
# PARSE RESULT
#######################################
class ParseResult:
def __init__(self):
self.error = None
self.node = None
self.last_registered_advance_count = 0
self.advanced_count = 0
self.to_reverse_count = 0
def register_advancement(self):
self.advanced_count += 1
self.last_registered_advance_count += 1
def register(self, res):
self.last_registered_advance_count = res.advanced_count
self.advanced_count += res.advanced_count
if res.error: self.error = res.error
return res.node
def try_register(self, res):
if res.error:
self.to_reverse_count = res.advanced_count
return None
return self.register(res)
def success(self, node):
self.node = node
return self
def failure(self, error):
if not self.error or self.advanced_count == 0:
self.error = error
return self
#######################################
# PARSER
#######################################
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.tok_idx = -1
self.advance()
def advance(self):
self.tok_idx += 1
self.update_current_tok()
return self.current_tok
def reverse(self, amount=1):
self.tok_idx -= amount
self.update_current_tok()
return self.current_tok
def update_current_tok(self):
if self.tok_idx >= 0 and self.tok_idx < len(self.tokens):
self.current_tok = self.tokens[self.tok_idx]
def parse(self):
res = self.statements()
if not res.error and self.current_tok.type != token.T_EOF:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '+', '-', '*' or '/'"
))
return res
###################################
def statements(self):
res = ParseResult()
statements = []
pos_start = self.current_tok.pos_start.copy()
while self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
statement = res.register(self.statement())
if res.error: return res
statements.append(statement)
more_statements = True
while True:
newline_count = 0
while self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
newline_count += 1
if newline_count == 0:
more_statements = False
if not more_statements: break
statement = res.try_register(self.statement())
if not statement:
self.reverse(res.to_reverse_count)
more_statements = False
continue
statements.append(statement)
return res.success(nodes.ListNode(
statements, pos_start, self.current_tok.pos_end.copy()
))
def statement(self):
res = ParseResult()
pos_start = self.current_tok.pos_start.copy()
if self.current_tok.matches(token.T_KEYWORD, 'return'):
res.register_advancement()
self.advance()
expr = res.try_register(self.expr())
if not expr:
self.reverse(res.to_reverse_count)
return res.success(nodes.ReturnNode(expr, pos_start, self.current_tok.pos_end.copy()))
if self.current_tok.matches(token.T_KEYWORD, 'continue'):
res.register_advancement()
self.advance()
return res.success(nodes.ContinueNode(pos_start, self.current_tok.pos_end.copy()))
if self.current_tok.matches(token.T_KEYWORD, 'break'):
res.register_advancement()
self.advance()
return res.success(nodes.BreakNode(pos_start, self.current_tok.pos_end.copy()))
expr = res.register(self.expr())
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected 'break', 'continue', 'return', 'var', 'if', 'for', 'while', 'func', int, float, identifier, '+', '-', '(', ')' '[' or 'not'"
))
return res.success(expr)
def call(self):
res = ParseResult()
atom = res.register(self.atom())
if res.error: return res
if self.current_tok.type == token.T_LPAREN:
res.register_advancement()
self.advance()
arg_nodes = []
if self.current_tok.type == token.T_RPAREN:
res.register_advancement()
self.advance()
else:
arg_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')', 'var', 'if', 'for', 'while', 'func', int, float, identifier, '+', '-', '(', '[' or 'not'"
))
while self.current_tok.type == token.T_COMMA:
res.register_advancement()
self.advance()
arg_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != token.T_RPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ',' or ')'"
))
res.register_advancement()
self.advance()
return res.success(nodes.CallNode(atom, arg_nodes))
return res.success(atom)
def atom(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (token.T_INT, token.T_FLOAT):
res.register_advancement()
self.advance()
return res.success(nodes.NumberNode(tok))
elif tok.type in (token.T_STRING):
res.register_advancement()
self.advance()
return res.success(nodes.StringNode(tok))
elif tok.type == token.T_IDENTIFIER:
res.register_advancement()
self.advance()
return res.success(nodes.VarAccessNode(tok))
elif tok.type == token.T_LPAREN:
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
if self.current_tok.type == token.T_RPAREN:
res.register_advancement()
self.advance()
return res.success(expr)
else:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')'"
))
elif tok.type == token.T_LSQUARE:
list_expr = res.register(self.list_expr())
if res.error: return res
return res.success(list_expr)
elif tok.matches(token.T_KEYWORD, 'if'):
if_expr = res.register(self.if_expr())
if res.error: return res
return res.success(if_expr)
elif tok.matches(token.T_KEYWORD, 'for'):
for_expr = res.register(self.for_expr())
if res.error: return res
return res.success(for_expr)
elif tok.matches(token.T_KEYWORD, 'while'):
while_expr = res.register(self.while_expr())
if res.error: return res
return res.success(while_expr)
elif tok.matches(token.T_KEYWORD, 'func'):
func_def = res.register(self.func_def())
if res.error: return res
return res.success(func_def)
return res.failure(errors.InvalidSyntaxError(
tok.pos_start, tok.pos_end,
"Expected int or float, identifier, '+', '-' or '(', , '[', 'if', 'for', 'while' or 'func'"
))
def power(self):
return self.bin_op(self.call, (token.T_POW, ), self.factor)
def list_expr(self):
res = ParseResult()
element_nodes = []
pos_start = self.current_tok.pos_start.copy()
if self.current_tok.type != token.T_LSQUARE:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '['"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_RSQUARE:
res.register_advancement()
self.advance()
else:
element_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ']', 'var', 'if', 'for', 'while', 'func', int, float, identifier, '+', '-', '(', '[' or 'not'"
))
while self.current_tok.type == token.T_COMMA:
res.register_advancement()
self.advance()
element_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != token.T_RSQUARE:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ',' or ']'"
))
res.register_advancement()
self.advance()
return res.success(nodes.ListNode(
element_nodes, pos_start, self.current_tok.pos_end.copy()
))
def factor(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (token.T_PLUS, token.T_MINUS):
res.register_advancement()
self.advance()
factor = res.register(self.factor())
if res.error: return res
return res.success(nodes.UnaryOpNode(tok, factor))
return self.power()
def term(self):
return self.bin_op(self.factor, (token.T_MUL, token.T_DIV, token.T_INT_DIV, token.T_REMAINDER))
def arithm_expr(self):
return self.bin_op(self.term, (token.T_PLUS, token.T_MINUS))
def comp_expr(self):
res = ParseResult()
if self.current_tok.matches(token.T_KEYWORD, 'not'):
op_tok = self.current_tok
res.register_advancement()
self.advance()
node = res.register(self.comp_expr())
if res.error: return res
return res.success(nodes.UnaryOpNode(op_tok, node))
node = res.register(self.bin_op(self.arithm_expr, (token.T_EE, token.T_NE, token.T_LT, token.T_GT, token.T_LTE, token.T_GTE)))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected int or float, identifier, '+', '-', '(', '[' or 'not'"
))
return res.success(node)
def expr(self):
res = ParseResult()
if self.current_tok.matches(token.T_KEYWORD, 'var'):
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_IDENTIFIER:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_EQ:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '='"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
return res.success(nodes.VarAssignNode(var_name, expr))
node = res.register(self.bin_op(self.comp_expr, ((token.T_KEYWORD, 'and'), (token.T_KEYWORD, 'or'))))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected int or float, identifier, 'var', 'if', 'for', 'while', 'func', '+', '-', '(' or '['"
))
return res.success(node)
def if_expr(self):
res = ParseResult()
all_cases = res.register(self.if_expr_cases('if'))
if res.error: return res
cases, else_case = all_cases
return res.success(nodes.IfNode(cases, else_case))
def if_expr_cases(self, case_keyword):
res = ParseResult()
cases = []
else_case = None
if not self.current_tok.matches(token.T_KEYWORD, case_keyword):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '{case_keyword}'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.type == token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
statements = res.register(self.statements())
if res.error: return res
cases.append((condition, statements, True))
if self.current_tok.matches(token.T_KEYWORD, 'end'):
res.register_advancement()
self.advance()
else:
all_cases = res.register(self.if_expr_b_or_c())
if res.error: return res
new_cases, else_case = all_cases
cases.extend(new_cases)
else:
expr = res.register(self.statement())
if res.error: return res
cases.append((condition, expr, False))
all_cases = res.register(self.if_expr_b_or_c())
if res.error: return res
new_cases, else_case = all_cases
cases.extend(new_cases)
return res.success((cases, else_case))
def if_expr_b(self):
return self.if_expr_cases('elif')
def if_expr_c(self):
res = ParseResult()
else_case = None
if self.current_tok.matches(token.T_KEYWORD, 'else'):
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
statements = res.register(self.statements())
if res.error: return res
else_case = (statements, True)
if self.current_tok.matches(token.T_KEYWORD, 'end'):
res.register_advancement()
self.advance()
else:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected 'end'"
))
else:
expr = res.register(self.statement())
if res.error: return res
else_case = (expr, False)
return res.success(else_case)
def if_expr_b_or_c(self):
res = ParseResult()
cases, else_case = [], None
if self.current_tok.matches(token.T_KEYWORD, 'elif'):
all_cases = res.register(self.if_expr_b())
if res.error: return res
cases, else_case = all_cases
else:
else_case = res.register(self.if_expr_c())
if res.error: return res
return res.success((cases, else_case))
def for_expr(self):
res = ParseResult()
if not self.current_tok.matches(token.T_KEYWORD, 'for'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'for'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_IDENTIFIER:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_EQ:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '='"
))
res.register_advancement()
self.advance()
start_value = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(token.T_KEYWORD, 'to'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'to'"
))
res.register_advancement()
self.advance()
end_value = res.register(self.expr())
if res.error: return res
if self.current_tok.matches(token.T_KEYWORD, 'step'):
res.register_advancement()
self.advance()
step_value = res.register(self.expr())
if res.error: return res
else:
step_value = None
if not self.current_tok.type == token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
body = res.register(self.statements())
if res.error: return res
if not self.current_tok.matches(token.T_KEYWORD, 'end'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'end'"
))
res.register_advancement()
self.advance()
return res.success(nodes.ForNode(var_name, start_value, end_value, step_value, body, True))
body = res.register(self.statement())
if res.error: return res
return res.success(nodes.ForNode(var_name, start_value, end_value, step_value, body, False))
def while_expr(self):
res = ParseResult()
if not self.current_tok.matches(token.T_KEYWORD, 'while'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'while'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.type == token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
body = res.register(self.statements())
if res.error: return res
if not self.current_tok.matches(token.T_KEYWORD, 'end'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'end'"
))
res.register_advancement()
self.advance()
return res.success(nodes.WhileNode(condition, body, True))
body = res.register(self.statement())
if res.error: return res
return res.success(nodes.WhileNode(condition, body, False))
def func_def(self):
res = ParseResult()
if not self.current_tok.matches(token.T_KEYWORD, 'func'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'func'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_IDENTIFIER:
var_name_tok = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_LPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '('"
))
else:
var_name_tok = None
if self.current_tok.type != token.T_LPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or '('"
))
res.register_advancement()
self.advance()
arg_name_toks = []
if self.current_tok.type == token.T_IDENTIFIER:
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
while self.current_tok.type == token.T_COMMA:
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_IDENTIFIER:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_RPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
else:
if self.current_tok.type != token.T_RPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or ')'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_ARROW:
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(nodes.FunctionDefNode(
var_name_tok,
arg_name_toks,
body,
True
))
if self.current_tok.type != token.T_LCURLY:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '{'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_NEWLINE:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '->' or a new line"
))
res.register_advancement()
self.advance()
body = res.register(self.statements())
if res.error: return res
if self.current_tok.type != token.T_RCURLY:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '}'"
))
res.register_advancement()
self.advance()
return res.success(nodes.FunctionDefNode(
var_name_tok,
arg_name_toks,
body,
False
))
###################################
def bin_op(self, func_a, ops, func_b=None):
if func_b == None:
func_b = func_a
res = ParseResult()
left = res.register(func_a())
if res.error: return res
while self.current_tok.type in ops or (self.current_tok.type, self.current_tok.value) in ops:
op_tok = self.current_tok
res.register_advancement()
self.advance()
right = res.register(func_b())
if res.error: return res
left = nodes.BinOpNode(left, op_tok, right)
return res.success(left)
| 26.452196
| 138
| 0.69283
|
from lib.utils import token, nodes
from lib import errors
es.error: return res
if self.current_tok.type == token.T_LPAREN:
res.register_advancement()
self.advance()
arg_nodes = []
if self.current_tok.type == token.T_RPAREN:
res.register_advancement()
self.advance()
else:
arg_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')', 'var', 'if', 'for', 'while', 'func', int, float, identifier, '+', '-', '(', '[' or 'not'"
))
while self.current_tok.type == token.T_COMMA:
res.register_advancement()
self.advance()
arg_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != token.T_RPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ',' or ')'"
))
res.register_advancement()
self.advance()
return res.success(nodes.CallNode(atom, arg_nodes))
return res.success(atom)
def atom(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (token.T_INT, token.T_FLOAT):
res.register_advancement()
self.advance()
return res.success(nodes.NumberNode(tok))
elif tok.type in (token.T_STRING):
res.register_advancement()
self.advance()
return res.success(nodes.StringNode(tok))
elif tok.type == token.T_IDENTIFIER:
res.register_advancement()
self.advance()
return res.success(nodes.VarAccessNode(tok))
elif tok.type == token.T_LPAREN:
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
if self.current_tok.type == token.T_RPAREN:
res.register_advancement()
self.advance()
return res.success(expr)
else:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')'"
))
elif tok.type == token.T_LSQUARE:
list_expr = res.register(self.list_expr())
if res.error: return res
return res.success(list_expr)
elif tok.matches(token.T_KEYWORD, 'if'):
if_expr = res.register(self.if_expr())
if res.error: return res
return res.success(if_expr)
elif tok.matches(token.T_KEYWORD, 'for'):
for_expr = res.register(self.for_expr())
if res.error: return res
return res.success(for_expr)
elif tok.matches(token.T_KEYWORD, 'while'):
while_expr = res.register(self.while_expr())
if res.error: return res
return res.success(while_expr)
elif tok.matches(token.T_KEYWORD, 'func'):
func_def = res.register(self.func_def())
if res.error: return res
return res.success(func_def)
return res.failure(errors.InvalidSyntaxError(
tok.pos_start, tok.pos_end,
"Expected int or float, identifier, '+', '-' or '(', , '[', 'if', 'for', 'while' or 'func'"
))
def power(self):
return self.bin_op(self.call, (token.T_POW, ), self.factor)
def list_expr(self):
res = ParseResult()
element_nodes = []
pos_start = self.current_tok.pos_start.copy()
if self.current_tok.type != token.T_LSQUARE:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '['"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_RSQUARE:
res.register_advancement()
self.advance()
else:
element_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ']', 'var', 'if', 'for', 'while', 'func', int, float, identifier, '+', '-', '(', '[' or 'not'"
))
while self.current_tok.type == token.T_COMMA:
res.register_advancement()
self.advance()
element_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != token.T_RSQUARE:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ',' or ']'"
))
res.register_advancement()
self.advance()
return res.success(nodes.ListNode(
element_nodes, pos_start, self.current_tok.pos_end.copy()
))
def factor(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (token.T_PLUS, token.T_MINUS):
res.register_advancement()
self.advance()
factor = res.register(self.factor())
if res.error: return res
return res.success(nodes.UnaryOpNode(tok, factor))
return self.power()
def term(self):
return self.bin_op(self.factor, (token.T_MUL, token.T_DIV, token.T_INT_DIV, token.T_REMAINDER))
def arithm_expr(self):
return self.bin_op(self.term, (token.T_PLUS, token.T_MINUS))
def comp_expr(self):
res = ParseResult()
if self.current_tok.matches(token.T_KEYWORD, 'not'):
op_tok = self.current_tok
res.register_advancement()
self.advance()
node = res.register(self.comp_expr())
if res.error: return res
return res.success(nodes.UnaryOpNode(op_tok, node))
node = res.register(self.bin_op(self.arithm_expr, (token.T_EE, token.T_NE, token.T_LT, token.T_GT, token.T_LTE, token.T_GTE)))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected int or float, identifier, '+', '-', '(', '[' or 'not'"
))
return res.success(node)
def expr(self):
res = ParseResult()
if self.current_tok.matches(token.T_KEYWORD, 'var'):
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_IDENTIFIER:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_EQ:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '='"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
return res.success(nodes.VarAssignNode(var_name, expr))
node = res.register(self.bin_op(self.comp_expr, ((token.T_KEYWORD, 'and'), (token.T_KEYWORD, 'or'))))
if res.error:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected int or float, identifier, 'var', 'if', 'for', 'while', 'func', '+', '-', '(' or '['"
))
return res.success(node)
def if_expr(self):
res = ParseResult()
all_cases = res.register(self.if_expr_cases('if'))
if res.error: return res
cases, else_case = all_cases
return res.success(nodes.IfNode(cases, else_case))
def if_expr_cases(self, case_keyword):
res = ParseResult()
cases = []
else_case = None
if not self.current_tok.matches(token.T_KEYWORD, case_keyword):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '{case_keyword}'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.type == token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
statements = res.register(self.statements())
if res.error: return res
cases.append((condition, statements, True))
if self.current_tok.matches(token.T_KEYWORD, 'end'):
res.register_advancement()
self.advance()
else:
all_cases = res.register(self.if_expr_b_or_c())
if res.error: return res
new_cases, else_case = all_cases
cases.extend(new_cases)
else:
expr = res.register(self.statement())
if res.error: return res
cases.append((condition, expr, False))
all_cases = res.register(self.if_expr_b_or_c())
if res.error: return res
new_cases, else_case = all_cases
cases.extend(new_cases)
return res.success((cases, else_case))
def if_expr_b(self):
return self.if_expr_cases('elif')
def if_expr_c(self):
res = ParseResult()
else_case = None
if self.current_tok.matches(token.T_KEYWORD, 'else'):
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
statements = res.register(self.statements())
if res.error: return res
else_case = (statements, True)
if self.current_tok.matches(token.T_KEYWORD, 'end'):
res.register_advancement()
self.advance()
else:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected 'end'"
))
else:
expr = res.register(self.statement())
if res.error: return res
else_case = (expr, False)
return res.success(else_case)
def if_expr_b_or_c(self):
res = ParseResult()
cases, else_case = [], None
if self.current_tok.matches(token.T_KEYWORD, 'elif'):
all_cases = res.register(self.if_expr_b())
if res.error: return res
cases, else_case = all_cases
else:
else_case = res.register(self.if_expr_c())
if res.error: return res
return res.success((cases, else_case))
def for_expr(self):
res = ParseResult()
if not self.current_tok.matches(token.T_KEYWORD, 'for'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'for'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_IDENTIFIER:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_EQ:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '='"
))
res.register_advancement()
self.advance()
start_value = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(token.T_KEYWORD, 'to'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'to'"
))
res.register_advancement()
self.advance()
end_value = res.register(self.expr())
if res.error: return res
if self.current_tok.matches(token.T_KEYWORD, 'step'):
res.register_advancement()
self.advance()
step_value = res.register(self.expr())
if res.error: return res
else:
step_value = None
if not self.current_tok.type == token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
body = res.register(self.statements())
if res.error: return res
if not self.current_tok.matches(token.T_KEYWORD, 'end'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'end'"
))
res.register_advancement()
self.advance()
return res.success(nodes.ForNode(var_name, start_value, end_value, step_value, body, True))
body = res.register(self.statement())
if res.error: return res
return res.success(nodes.ForNode(var_name, start_value, end_value, step_value, body, False))
def while_expr(self):
res = ParseResult()
if not self.current_tok.matches(token.T_KEYWORD, 'while'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'while'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.type == token.T_COLON:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ':'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_NEWLINE:
res.register_advancement()
self.advance()
body = res.register(self.statements())
if res.error: return res
if not self.current_tok.matches(token.T_KEYWORD, 'end'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'end'"
))
res.register_advancement()
self.advance()
return res.success(nodes.WhileNode(condition, body, True))
body = res.register(self.statement())
if res.error: return res
return res.success(nodes.WhileNode(condition, body, False))
def func_def(self):
res = ParseResult()
if not self.current_tok.matches(token.T_KEYWORD, 'func'):
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'func'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_IDENTIFIER:
var_name_tok = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_LPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '('"
))
else:
var_name_tok = None
if self.current_tok.type != token.T_LPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or '('"
))
res.register_advancement()
self.advance()
arg_name_toks = []
if self.current_tok.type == token.T_IDENTIFIER:
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
while self.current_tok.type == token.T_COMMA:
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_IDENTIFIER:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_RPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
else:
if self.current_tok.type != token.T_RPAREN:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or ')'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == token.T_ARROW:
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(nodes.FunctionDefNode(
var_name_tok,
arg_name_toks,
body,
True
))
if self.current_tok.type != token.T_LCURLY:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '{'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != token.T_NEWLINE:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '->' or a new line"
))
res.register_advancement()
self.advance()
body = res.register(self.statements())
if res.error: return res
if self.current_tok.type != token.T_RCURLY:
return res.failure(errors.InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '}'"
))
res.register_advancement()
self.advance()
return res.success(nodes.FunctionDefNode(
var_name_tok,
arg_name_toks,
body,
False
))
| true
| true
|
f712efa6c8e65b144dbf8de914a5873c5e8bb92d
| 6,135
|
py
|
Python
|
sanic_wtf/__init__.py
|
omarryhan/sanic-wtf
|
41c24f061fa16652a82d83753c3bee56f746e23a
|
[
"BSD-3-Clause"
] | 3
|
2019-04-11T11:01:54.000Z
|
2020-03-09T12:19:26.000Z
|
sanic_wtf/__init__.py
|
omarryhan/sanic-wtf
|
41c24f061fa16652a82d83753c3bee56f746e23a
|
[
"BSD-3-Clause"
] | null | null | null |
sanic_wtf/__init__.py
|
omarryhan/sanic-wtf
|
41c24f061fa16652a82d83753c3bee56f746e23a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import ChainMap
from datetime import timedelta
from itertools import chain
from wtforms import Form
from wtforms.csrf.session import SessionCSRF
from wtforms.meta import DefaultMeta
from wtforms.validators import DataRequired, StopValidation
from wtforms.fields.core import Field
from ._patch import patch
from .recaptcha import RecaptchaField
__version__ = '1.0.3.dev0'
__all__ = [
'SanicForm',
'FileAllowed', 'file_allowed', 'FileRequired', 'file_required', 'RecaptchaField'
]
def to_bytes(text, encoding='utf8'):
if isinstance(text, str):
return text.encode(encoding)
return bytes(text)
def meta_for_request(request):
"""Create a meta dict object with settings from request.app"""
meta = {'csrf': False}
if not request:
return meta
config = request.app.config
csrf = meta['csrf'] = config.get('WTF_CSRF_ENABLED', True)
if not csrf:
return meta
meta['csrf_field_name'] = config.get('WTF_CSRF_FIELD_NAME', 'csrf_token')
secret = config.get('WTF_CSRF_SECRET_KEY')
if secret is None:
secret = config.get('SECRET_KEY')
if not secret:
raise ValueError(
'CSRF protection needs either WTF_CSRF_SECRET_KEY or SECRET_KEY')
meta['csrf_secret'] = to_bytes(secret)
seconds = config.get('WTF_CSRF_TIME_LIMIT', 1800)
meta['csrf_time_limit'] = timedelta(seconds=seconds)
name = config.get('WTF_CSRF_CONTEXT_NAME', 'session')
meta['csrf_context'] = request[name]
return meta
SUBMIT_VERBS = frozenset({'DELETE', 'PATCH', 'POST', 'PUT'})
sentinel = object()
class FileRequired(DataRequired):
"""Validate that the data is a non-empty `sanic.request.File` object"""
def __call__(self, form, field):
# type sanic.request.File as of v 0.5.4 is:
# File = namedtuple('File', ['type', 'body', 'name'])
# here, we check whether the name contains anything
if not getattr(field.data, 'name', ''):
msg = self.message or field.gettext('This field is required.')
raise StopValidation(msg)
file_required = FileRequired
class FileAllowed:
"""Validate that the file (by extention) is one of the listed types"""
def __init__(self, extensions, message=None):
extensions = (ext.lower() for ext in extensions)
extensions = (
ext if ext.startswith('.') else '.' + ext for ext in extensions)
self.extensions = frozenset(extensions)
self.message = message
def __call__(self, form, field):
filename = getattr(field.data, 'name', '')
if not filename:
return
filename = filename.lower()
# testing with .endswith instead of the fastest `in` test, because
# there may be extensions with more than one dot (.), e.g. ".tar.gz"
if any(filename.endswith(ext) for ext in self.extensions):
return
raise StopValidation(self.message or field.gettext(
'File type does not allowed.'))
file_allowed = FileAllowed
class ChainRequestParameters(ChainMap):
"""ChainMap with sanic.RequestParameters style API"""
def get(self, name, default=None):
"""Return the first element with key `name`"""
return super().get(name, [default])[0]
def getlist(self, name, default=None):
"""Return all elements with key `name`
Only elementes of the first chained map with such key are return.
"""
return super().get(name, default)
class SanicForm(Form):
"""Form with session-based CSRF Protection.
Upon initialization, the form instance will setup CSRF protection with
settings fetched from provided Sanic style request object. With no
request object provided, CSRF protection will be disabled.
"""
class Meta(DefaultMeta):
csrf = True
csrf_class = SessionCSRF
def __init__(self, request=None, *args, meta=None, **kwargs):
# Patching status
self.patched = False
# Meta
form_meta = meta_for_request(request)
form_meta.update(meta or {})
kwargs['meta'] = form_meta
# Formdata
self.request = request
if request is not None:
formdata = kwargs.pop('formdata', sentinel)
if formdata is sentinel:
if request.files:
formdata = ChainRequestParameters(
request.form, request.files)
else:
formdata = request.form
# signature of wtforms.Form (formdata, obj, prefix, ...)
args = chain([formdata], args)
super().__init__(*args, **kwargs)
# Pass app to fields that need it
if self.request is not None:
for name, field in self._fields.items():
if hasattr(field, '_get_app'):
field._get_app(self.request.app)
# @unpatch ??
def validate_on_submit(self):
''' For async validators: use self.validate_on_submit_async.
This method is only still here for backward compatibility
'''
if self.patched is not False:
raise RuntimeError('Once you go async, you can never go back. :)\
Continue using validate_on_submit_async \
instead of validate_on submit')
"""Return `True` if this form is submited and all fields verified"""
return self.request and (self.request.method in SUBMIT_VERBS) and \
self.validate()
@patch
async def validate_on_submit_async(self):
''' Adds support for async validators and Sanic-WTF Recaptcha
.. note::
As a side effect of patching wtforms to support async,
there's a restriction you must be aware of:
Don't use SanifForm.validate_on_submit() (the sync version) after running this method.
Doing so will most likely cause an error.
'''
return self.request and (self.request.method in SUBMIT_VERBS) and \
await self.validate()
| 33.342391
| 98
| 0.631622
|
from collections import ChainMap
from datetime import timedelta
from itertools import chain
from wtforms import Form
from wtforms.csrf.session import SessionCSRF
from wtforms.meta import DefaultMeta
from wtforms.validators import DataRequired, StopValidation
from wtforms.fields.core import Field
from ._patch import patch
from .recaptcha import RecaptchaField
__version__ = '1.0.3.dev0'
__all__ = [
'SanicForm',
'FileAllowed', 'file_allowed', 'FileRequired', 'file_required', 'RecaptchaField'
]
def to_bytes(text, encoding='utf8'):
if isinstance(text, str):
return text.encode(encoding)
return bytes(text)
def meta_for_request(request):
meta = {'csrf': False}
if not request:
return meta
config = request.app.config
csrf = meta['csrf'] = config.get('WTF_CSRF_ENABLED', True)
if not csrf:
return meta
meta['csrf_field_name'] = config.get('WTF_CSRF_FIELD_NAME', 'csrf_token')
secret = config.get('WTF_CSRF_SECRET_KEY')
if secret is None:
secret = config.get('SECRET_KEY')
if not secret:
raise ValueError(
'CSRF protection needs either WTF_CSRF_SECRET_KEY or SECRET_KEY')
meta['csrf_secret'] = to_bytes(secret)
seconds = config.get('WTF_CSRF_TIME_LIMIT', 1800)
meta['csrf_time_limit'] = timedelta(seconds=seconds)
name = config.get('WTF_CSRF_CONTEXT_NAME', 'session')
meta['csrf_context'] = request[name]
return meta
SUBMIT_VERBS = frozenset({'DELETE', 'PATCH', 'POST', 'PUT'})
sentinel = object()
class FileRequired(DataRequired):
def __call__(self, form, field):
if not getattr(field.data, 'name', ''):
msg = self.message or field.gettext('This field is required.')
raise StopValidation(msg)
file_required = FileRequired
class FileAllowed:
def __init__(self, extensions, message=None):
extensions = (ext.lower() for ext in extensions)
extensions = (
ext if ext.startswith('.') else '.' + ext for ext in extensions)
self.extensions = frozenset(extensions)
self.message = message
def __call__(self, form, field):
filename = getattr(field.data, 'name', '')
if not filename:
return
filename = filename.lower()
if any(filename.endswith(ext) for ext in self.extensions):
return
raise StopValidation(self.message or field.gettext(
'File type does not allowed.'))
file_allowed = FileAllowed
class ChainRequestParameters(ChainMap):
def get(self, name, default=None):
return super().get(name, [default])[0]
def getlist(self, name, default=None):
return super().get(name, default)
class SanicForm(Form):
class Meta(DefaultMeta):
csrf = True
csrf_class = SessionCSRF
def __init__(self, request=None, *args, meta=None, **kwargs):
self.patched = False
form_meta = meta_for_request(request)
form_meta.update(meta or {})
kwargs['meta'] = form_meta
self.request = request
if request is not None:
formdata = kwargs.pop('formdata', sentinel)
if formdata is sentinel:
if request.files:
formdata = ChainRequestParameters(
request.form, request.files)
else:
formdata = request.form
args = chain([formdata], args)
super().__init__(*args, **kwargs)
if self.request is not None:
for name, field in self._fields.items():
if hasattr(field, '_get_app'):
field._get_app(self.request.app)
def validate_on_submit(self):
if self.patched is not False:
raise RuntimeError('Once you go async, you can never go back. :)\
Continue using validate_on_submit_async \
instead of validate_on submit')
return self.request and (self.request.method in SUBMIT_VERBS) and \
self.validate()
@patch
async def validate_on_submit_async(self):
return self.request and (self.request.method in SUBMIT_VERBS) and \
await self.validate()
| true
| true
|
f712efe144ad625bebe21fb5d3a4946f8efaa3af
| 2,203
|
py
|
Python
|
GAN_tf/src/model/flags.py
|
inamori/DeepLearningImplementations
|
8bbd3c5a4a7d24b2c098ba47cfd45fe2c152771d
|
[
"MIT"
] | 2,010
|
2016-09-22T22:22:05.000Z
|
2022-03-21T02:43:21.000Z
|
GAN_tf/src/model/flags.py
|
inamori/DeepLearningImplementations
|
8bbd3c5a4a7d24b2c098ba47cfd45fe2c152771d
|
[
"MIT"
] | 81
|
2016-09-13T01:00:06.000Z
|
2021-08-17T15:10:28.000Z
|
GAN_tf/src/model/flags.py
|
inamori/DeepLearningImplementations
|
8bbd3c5a4a7d24b2c098ba47cfd45fe2c152771d
|
[
"MIT"
] | 777
|
2016-09-10T12:44:23.000Z
|
2022-01-16T04:19:55.000Z
|
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def define_flags():
############
# Run mode
############
tf.app.flags.DEFINE_string('run', None, "Which operation to run. [train|inference]")
##########################
# Training parameters
###########################
tf.app.flags.DEFINE_integer('nb_epoch', 400, "Number of epochs")
tf.app.flags.DEFINE_integer('batch_size', 64, "Number of samples per batch.")
tf.app.flags.DEFINE_integer('nb_batch_per_epoch', 500, "Number of batches per epoch")
tf.app.flags.DEFINE_float('learning_rate', 2E-4, "Learning rate used for AdamOptimizer")
tf.app.flags.DEFINE_integer('noise_dim', 100, "Noise dimension for GAN generation")
tf.app.flags.DEFINE_integer('random_seed', 0, "Seed used to initialize rng.")
############################################
# General tensorflow parameters parameters
#############################################
tf.app.flags.DEFINE_bool('use_XLA', False, "Whether to use XLA compiler.")
tf.app.flags.DEFINE_integer('num_threads', 2, "Number of threads to fetch the data")
tf.app.flags.DEFINE_float('capacity_factor', 32, "Nuumber of batches to store in queue")
##########
# Datasets
##########
tf.app.flags.DEFINE_string('data_format', "NCHW", "Tensorflow image data format.")
tf.app.flags.DEFINE_string('celebA_path', "../../data/raw/img_align_celeba", "Path to celebA images")
tf.app.flags.DEFINE_integer('channels', 3, "Number of channels")
tf.app.flags.DEFINE_float('central_fraction', 0.8, "Central crop as a fraction of total image")
tf.app.flags.DEFINE_integer('img_size', 64, "Image size")
##############
# Directories
##############
tf.app.flags.DEFINE_string('model_dir', '../../models', "Output folder where checkpoints are dumped.")
tf.app.flags.DEFINE_string('log_dir', '../../logs', "Logs for tensorboard.")
tf.app.flags.DEFINE_string('fig_dir', '../../figures', "Where to save figures.")
tf.app.flags.DEFINE_string('raw_dir', '../../data/raw', "Where raw data is saved")
tf.app.flags.DEFINE_string('data_dir', '../../data/processed', "Where processed data is saved")
| 45.895833
| 106
| 0.622333
|
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def define_flags():
| true
| true
|
f712f03d95082cb78501e5b7a4c3b0f6128a8166
| 266
|
py
|
Python
|
accounting/accounting/doctype/account/account.py
|
athul/accounting-app
|
cde48fde0ee96be212281b3a6a28c82e90d39d8a
|
[
"MIT"
] | 1
|
2021-05-17T09:48:38.000Z
|
2021-05-17T09:48:38.000Z
|
accounting/accounting/doctype/account/account.py
|
athul/accounting-app
|
cde48fde0ee96be212281b3a6a28c82e90d39d8a
|
[
"MIT"
] | null | null | null |
accounting/accounting/doctype/account/account.py
|
athul/accounting-app
|
cde48fde0ee96be212281b3a6a28c82e90d39d8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Athul Cyriac Ajay and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.utils.nestedset import NestedSet
class Account(NestedSet):
pass
| 24.181818
| 56
| 0.774436
|
from __future__ import unicode_literals
from frappe.utils.nestedset import NestedSet
class Account(NestedSet):
pass
| true
| true
|
f712f0e14d8999105f1c03a1a69665fda1b7c3be
| 2,465
|
py
|
Python
|
matrixrecovery/matrixrecovery.py
|
kwangsungjun/lrbandit
|
2f1f7ca4bbefe2bfd3e0bc50c4423a9791bfcde8
|
[
"Apache-2.0"
] | 2
|
2019-08-11T22:50:49.000Z
|
2021-04-21T17:48:20.000Z
|
matrixrecovery/matrixrecovery.py
|
kwangsungjun/lrbandit
|
2f1f7ca4bbefe2bfd3e0bc50c4423a9791bfcde8
|
[
"Apache-2.0"
] | null | null | null |
matrixrecovery/matrixrecovery.py
|
kwangsungjun/lrbandit
|
2f1f7ca4bbefe2bfd3e0bc50c4423a9791bfcde8
|
[
"Apache-2.0"
] | 2
|
2019-08-11T22:52:57.000Z
|
2021-09-18T05:31:32.000Z
|
import myutils_cython
import numpy as np, numpy.random as ra, scipy.linalg as sla
from tqdm import tqdm
def rankone(X,Z,y,r,R=.1, C=.1, tolPred=0.01, tolTh=0.01, maxIter=400, verbose=False):
"""
matrix recovery with rank-one measurements using Burer-Monteiro approach
measurement model: (X[i,:] @ Theta) @ Z[i,:] == y[i]
(IN)
X, Z: N by d matrix
y: N-dim vector
r: the deemed rank of Theta
R: noise level (subgaussian parameter)
C: regularization parameter (larger => more regularization)
tol: stopping condition
maxIter: maximum number of iterations
(OUT)
(U,V,out_nIter,stat) so that U@V.T ≈ Theta;
stat['objs'] has the objective values over time
stat['stoppingPredList'], stat['stoppingThetaList'] has stopping conditions over time
"""
N,d = X.shape
initU = ra.randn(d,r)
U = initU
V = initU # just a placeholder
M = np.zeros( (d*r,d*r) )
hatTh = initU @ initU.T # very bad initial hatTh
if (verbose):
my_tqdm = tqdm
else:
my_tqdm = lambda x: x
objs = []; stoppingPredList = []; stoppingThetaList = []
myeye = R*C*np.eye(d*r)
for iIter in my_tqdm(range(1,1+maxIter)):
D = np.zeros((N,d*r))
if iIter % 2 == 0: # update U
ZV = Z @ V
myutils_cython.calcRowwiseKron(D, X, ZV) #- note D will be written!
else: # update V
XU = X @ U
myutils_cython.calcRowwiseKron(D, Z, XU)
M[:,:] = myeye + D.T@D
b = D.T @ y
sol = sla.solve(M,b, assume_a='pos', overwrite_a=True).reshape(d,r)
if iIter % 2 == 0:
prevU = U
U = sol
else:
prevV = V
V = sol
prev_hatTh = hatTh
hatTh = U@V.T
#- compute residual
predy = ((X@hatTh)*Z).sum(1)
obj = sla.norm(predy - y, 2)**2 + R*C*(sla.norm(U, 'fro')**2 + sla.norm(V, 'fro')**2)
objs.append( obj )
stoppingPred = sla.norm(predy - y, 2) / sla.norm(y,2)
stoppingPredList.append( stoppingPred )
stoppingTheta = sla.norm(hatTh - prev_hatTh, 'fro')
stoppingThetaList.append( stoppingTheta )
if (stoppingPred < tolPred):
break
if (stoppingTheta < tolTh):
break
out_nIter = iIter
stat = {'objs': objs, 'stoppingPredList': stoppingPredList, 'stoppingThetaList': stoppingThetaList}
return U,V,out_nIter,stat
| 34.71831
| 103
| 0.571197
|
import myutils_cython
import numpy as np, numpy.random as ra, scipy.linalg as sla
from tqdm import tqdm
def rankone(X,Z,y,r,R=.1, C=.1, tolPred=0.01, tolTh=0.01, maxIter=400, verbose=False):
N,d = X.shape
initU = ra.randn(d,r)
U = initU
V = initU
M = np.zeros( (d*r,d*r) )
hatTh = initU @ initU.T
if (verbose):
my_tqdm = tqdm
else:
my_tqdm = lambda x: x
objs = []; stoppingPredList = []; stoppingThetaList = []
myeye = R*C*np.eye(d*r)
for iIter in my_tqdm(range(1,1+maxIter)):
D = np.zeros((N,d*r))
if iIter % 2 == 0:
ZV = Z @ V
myutils_cython.calcRowwiseKron(D, X, ZV)
else:
XU = X @ U
myutils_cython.calcRowwiseKron(D, Z, XU)
M[:,:] = myeye + D.T@D
b = D.T @ y
sol = sla.solve(M,b, assume_a='pos', overwrite_a=True).reshape(d,r)
if iIter % 2 == 0:
prevU = U
U = sol
else:
prevV = V
V = sol
prev_hatTh = hatTh
hatTh = U@V.T
predy = ((X@hatTh)*Z).sum(1)
obj = sla.norm(predy - y, 2)**2 + R*C*(sla.norm(U, 'fro')**2 + sla.norm(V, 'fro')**2)
objs.append( obj )
stoppingPred = sla.norm(predy - y, 2) / sla.norm(y,2)
stoppingPredList.append( stoppingPred )
stoppingTheta = sla.norm(hatTh - prev_hatTh, 'fro')
stoppingThetaList.append( stoppingTheta )
if (stoppingPred < tolPred):
break
if (stoppingTheta < tolTh):
break
out_nIter = iIter
stat = {'objs': objs, 'stoppingPredList': stoppingPredList, 'stoppingThetaList': stoppingThetaList}
return U,V,out_nIter,stat
| true
| true
|
f712f1594cbc629a2413df1610f21e67bb57e812
| 20,937
|
py
|
Python
|
tests/tests_complexity.py
|
kassyray/NeuroKit
|
b84d110a71d5d17c0d1efde0d60d00446fda16cb
|
[
"MIT"
] | null | null | null |
tests/tests_complexity.py
|
kassyray/NeuroKit
|
b84d110a71d5d17c0d1efde0d60d00446fda16cb
|
[
"MIT"
] | null | null | null |
tests/tests_complexity.py
|
kassyray/NeuroKit
|
b84d110a71d5d17c0d1efde0d60d00446fda16cb
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import neurokit2 as nk
import nolds
from pyentrp import entropy as pyentrp
"""
For the testing of complexity, we test our implementations against existing and established ones.
However, some of these other implementations are not really packaged in a way
SO THAT we can easily import them. Thus, we directly copied their content in this file
(below the tests).
"""
# =============================================================================
# Some sanity checks
# =============================================================================
def test_complexity_sanity():
signal = np.cos(np.linspace(start=0, stop=30, num=1000))
# Entropy
assert np.allclose(nk.entropy_fuzzy(signal), nk.entropy_sample(signal, fuzzy=True), atol=0.000001)
# Fractal
assert np.allclose(nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])), 2.1009048365682133, atol=0.000001)
assert np.allclose(nk.fractal_dfa(signal), 1.957966586191164, atol=0.000001)
assert np.allclose(nk.fractal_dfa(signal, multifractal=True), 1.957966586191164, atol=0.000001)
assert np.allclose(nk.fractal_correlation(signal), 0.7884473170763334, atol=0.000001)
assert np.allclose(nk.fractal_correlation(signal, r="nolds"), nolds.corr_dim(signal, 2), atol=0.0001)
# =============================================================================
# Comparison against R
# =============================================================================
"""
R code:
library(TSEntropies)
library(pracma)
signal <- read.csv("https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv")$RSP
r <- 0.2 * sd(signal)
# ApEn --------------------------------------------------------------------
TSEntropies::ApEn(signal, dim=2, lag=1, r=r)
0.04383386
TSEntropies::ApEn(signal, dim=3, lag=2, r=1)
0.0004269369
pracma::approx_entropy(signal[1:200], edim=2, r=r, elag=1)
0.03632554
# SampEn ------------------------------------------------------------------
TSEntropies::SampEn(signal[1:300], dim=2, lag=1, r=r)
0.04777648
TSEntropies::FastSampEn(signal[1:300], dim=2, lag=1, r=r)
0.003490405
pracma::sample_entropy(signal[1:300], edim=2, tau=1, r=r)
0.03784376
pracma::sample_entropy(signal[1:300], edim=3, tau=2, r=r)
0.09185509
"""
def test_complexity_vs_R():
signal = pd.read_csv("https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv")["RSP"].values
r = 0.2 * np.std(signal, ddof=1)
# ApEn
apen = nk.entropy_approximate(signal, dimension=2, r=r)
assert np.allclose(apen, 0.04383386, atol=0.0001)
apen = nk.entropy_approximate(signal, dimension=3, delay=2, r=1)
assert np.allclose(apen, 0.0004269369, atol=0.0001)
apen = nk.entropy_approximate(signal[0:200], dimension=2, delay=1, r=r)
assert np.allclose(apen, 0.03632554, atol=0.0001)
# SampEn
sampen = nk.entropy_sample(signal[0:300], dimension=2, r=r)
assert np.allclose(sampen, nk.entropy_sample(signal[0:300], dimension=2, r=r, distance="infinity"), atol=0.001)
assert np.allclose(sampen, 0.03784376, atol=0.001)
sampen = nk.entropy_sample(signal[0:300], dimension=3, delay=2, r=r)
assert np.allclose(sampen, 0.09185509, atol=0.01)
# =============================================================================
# Comparison against Python implementations
# =============================================================================
def test_complexity_vs_Python():
signal = np.cos(np.linspace(start=0, stop=30, num=100))
# Shannon
shannon = nk.entropy_shannon(signal)
# assert scipy.stats.entropy(shannon, pd.Series(signal).value_counts())
assert np.allclose(shannon - pyentrp.shannon_entropy(signal), 0)
# Approximate
assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
assert np.allclose(nk.entropy_approximate(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0)
assert nk.entropy_approximate(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) != pyeeg_ap_entropy(signal, 2, 0.2*np.std(signal, ddof=1))
# Sample
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - nolds.sampen(signal, 2, 0.2), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0)
# import sampen
# sampen.sampen2(signal[0:300], mm=2, r=r)
assert nk.entropy_sample(signal, dimension=2, r=0.2) != pyentrp.sample_entropy(signal, 2, 0.2)[1]
assert nk.entropy_sample(signal, dimension=2, r=0.2*np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2]
# MSE
# assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list"))
# assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))
# Fuzzy
assert np.allclose(nk.entropy_fuzzy(signal, dimension=2, r=0.2, delay=1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
# DFA
assert nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])) != nolds.dfa(signal, nvals=[4, 8, 12, 20], fit_exp="poly")
# =============================================================================
# Wikipedia
# =============================================================================
def wikipedia_sampen(signal, m=2, r=1):
N = len(signal)
B = 0.0
A = 0.0
# Split time series and save all templates of length m
xmi = np.array([signal[i : i + m] for i in range(N - m)])
xmj = np.array([signal[i : i + m] for i in range(N - m + 1)])
# Save all matches minus the self-match, compute B
B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi])
# Similar for computing A
m += 1
xm = np.array([signal[i : i + m] for i in range(N - m + 1)])
A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm])
# Return SampEn
return -np.log(A / B)
# =============================================================================
# Pyeeg
# =============================================================================
def pyeeg_embed_seq(time_series, tau, embedding_dimension):
if not type(time_series) == np.ndarray:
typed_time_series = np.asarray(time_series)
else:
typed_time_series = time_series
shape = (
typed_time_series.size - tau * (embedding_dimension - 1),
embedding_dimension
)
strides = (typed_time_series.itemsize, tau * typed_time_series.itemsize)
return np.lib.stride_tricks.as_strided(
typed_time_series,
shape=shape,
strides=strides
)
def pyeeg_bin_power(X, Band, Fs):
C = np.fft.fft(X)
C = abs(C)
Power = np.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[int(np.floor(Freq / Fs * len(X))):
int(np.floor(Next_Freq / Fs * len(X)))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio
def pyeeg_ap_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
# Probability that random M-sequences are in range
Cm = InRange.mean(axis=0)
# M+1-sequences in range if M-sequences are in range & last values are close
Dp = np.abs(
np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T
)
Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
def pyeeg_samp_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)[:-1]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
np.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = np.abs(
np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T
)
Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))
return Samp_En
# =============================================================================
# Entropy
# =============================================================================
from sklearn.neighbors import KDTree
def entropy_embed(x, order=3, delay=1):
N = len(x)
if order * delay > N:
raise ValueError("Error: order * delay should be lower than x.size")
if delay < 1:
raise ValueError("Delay has to be at least 1.")
if order < 2:
raise ValueError("Order has to be at least 2.")
Y = np.zeros((order, N - (order - 1) * delay))
for i in range(order):
Y[i] = x[i * delay:i * delay + Y.shape[1]]
return Y.T
def entropy_app_samp_entropy(x, order, metric='chebyshev', approximate=True):
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError('The given metric (%s) is not valid. The valid '
'metric names are: %s' % (metric, _all_metrics))
phi = np.zeros(2)
r = 0.2 * np.std(x, axis=-1, ddof=1)
# compute phi(order, r)
_emb_data1 = entropy_embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r,
count_only=True
).astype(np.float64)
# compute phi(order + 1, r)
emb_data2 = entropy_embed(x, order + 1, 1)
count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r,
count_only=True
).astype(np.float64)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
def entropy_app_entropy(x, order=2, metric='chebyshev'):
phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=True)
return np.subtract(phi[0], phi[1])
def entropy_sample_entropy(x, order=2, metric='chebyshev'):
x = np.asarray(x, dtype=np.float64)
phi = entropy_app_samp_entropy(x, order=order, metric=metric,
approximate=False)
return -np.log(np.divide(phi[1], phi[0]))
# =============================================================================
# entro-py
# =============================================================================
def entro_py_sampen(x, dim, r, scale=True):
return entro_py_entropy(x, dim, r, scale=scale)
def entro_py_cross_sampen(x1, x2, dim, r, scale=True):
return entro_py_entropy([x1, x2], dim, r, scale)
def entro_py_fuzzyen(x, dim, r, n, scale=True):
return entro_py_entropy(x, dim, r, n=n, scale=scale, remove_baseline=True)
def entro_py_cross_fuzzyen(x1, x2, dim, r, n, scale=True):
return entro_py_entropy([x1, x2], dim, r, n, scale=scale, remove_baseline=True)
def entro_py_pattern_mat(x, m):
x = np.asarray(x).ravel()
if m == 1:
return x
else:
N = len(x)
patterns = np.zeros((m, N-m+1))
for i in range(m):
patterns[i, :] = x[i:N-m+i+1]
return patterns
def entro_py_entropy(x, dim, r, n=1, scale=True, remove_baseline=False):
fuzzy = True if remove_baseline else False
cross = True if type(x) == list else False
N = len(x[0]) if cross else len(x)
if scale:
if cross:
x = [entro_py_scale(np.copy(x[0])), entro_py_scale(np.copy(x[1]))]
else:
x = entro_py_scale(np.copy(x))
phi = [0, 0] # phi(m), phi(m+1)
for j in [0, 1]:
m = dim + j
npat = N-dim # https://github.com/ixjlyons/entro-py/pull/2/files
if cross:
# patterns = [entro_py_pattern_mat(x[0], m), entro_py_pattern_mat(x[1], m)]
patterns = [entro_py_pattern_mat(x[0], m)[:, :npat], entro_py_pattern_mat(x[1], m)[:, :npat]] # https://github.com/ixjlyons/entro-py/pull/2/files
else:
# patterns = entro_py_pattern_mat(x, m)
patterns = entro_py_pattern_mat(x, m)[:, :npat]
if remove_baseline:
if cross:
patterns[0] = entro_py_remove_baseline(patterns[0], axis=0)
patterns[1] = entro_py_remove_baseline(patterns[1], axis=0)
else:
patterns = entro_py_remove_baseline(patterns, axis=0)
# count = np.zeros(N-m) # https://github.com/ixjlyons/entro-py/pull/2/files
# for i in range(N-m): # https://github.com/ixjlyons/entro-py/pull/2/files
count = np.zeros(npat)
for i in range(npat):
if cross:
if m == 1:
sub = patterns[1][i]
else:
sub = patterns[1][:, [i]]
dist = np.max(np.abs(patterns[0] - sub), axis=0)
else:
if m == 1:
sub = patterns[i]
else:
sub = patterns[:, [i]]
dist = np.max(np.abs(patterns - sub), axis=0)
if fuzzy:
sim = np.exp(-np.power(dist, n) / r)
else:
sim = dist < r
count[i] = np.sum(sim) - 1
# phi[j] = np.mean(count) / (N-m-1)
phi[j] = np.mean(count) / (N-dim-1) # https://github.com/ixjlyons/entro-py/pull/2/files
return np.log(phi[0] / phi[1])
def entro_py_scale(x, axis=None):
x = entro_py_remove_baseline(x, axis=axis)
x /= np.std(x, ddof=1, axis=axis, keepdims=True)
return x
def entro_py_remove_baseline(x, axis=None):
x -= np.mean(x, axis=axis, keepdims=True)
return x
# =============================================================================
# MultiscaleEntropy https://github.com/reatank/MultiscaleEntropy/blob/master/MultiscaleEntropy/mse.py
# =============================================================================
import math
from collections.abc import Iterable
def MultiscaleEntropy_init_return_type(return_type):
if return_type == 'dict':
return {}
else:
return []
def MultiscaleEntropy_check_type(x, num_type, name):
if isinstance(x, num_type):
tmp = [x]
elif not isinstance(x, Iterable):
raise ValueError(name + ' should be a ' + num_type.__name__ + ' or an iterator of ' + num_type.__name__)
else:
tmp = []
for i in x:
tmp.append(i)
if not isinstance(i, num_type):
raise ValueError(name + ' should be a ' + num_type.__name__ + ' or an iterator of ' + num_type.__name__)
return tmp
# sum of seperate intervals of x
def MultiscaleEntropy_coarse_grain(x, scale_factor):
x = np.array(x)
x_len = len(x)
if x_len % scale_factor:
padded_len = (1+int(x_len/scale_factor))*scale_factor
else:
padded_len = x_len
tmp_x = np.zeros(padded_len)
tmp_x[:x_len] = x
tmp_x = np.reshape(tmp_x, (int(padded_len/scale_factor), scale_factor))
ans = np.reshape(np.sum(tmp_x, axis=1), (-1))/scale_factor
return ans
def MultiscaleEntropy_sample_entropy(x, m=[2], r=[0.15], sd=None, return_type='dict', safe_mode=False):
'''[Sample Entropy, the threshold will be r*sd]
Arguments:
x {[input signal]} -- [an iterator of numbers]
Keyword Arguments:
m {list} -- [m in sample entropy] (default: {[2]})
r {list} -- [r in sample entropy] (default: {[0.15]})
sd {number} -- [standard derivation of x, if None, will be calculated] (default: {None})
return_type {str} -- [can be dict or list] (default: {'dict'})
safe_mode {bool} -- [if set True, type checking will be skipped] (default: {False})
Raises:
ValueError -- [some values too big]
Returns:
[dict or list as return_type indicates] -- [if dict, nest as [scale_factor][m][r] for each value of m, r; if list, nest as [i][j] for lengths of m, r]
'''
# type checking
if not safe_mode:
m = MultiscaleEntropy_check_type(m, int, 'm')
r = MultiscaleEntropy_check_type(r, float, 'r')
if not (sd == None) and not (isinstance(sd, float) or isinstance(sd, int)):
raise ValueError('sd should be a number')
try:
x = np.array(x)
except:
raise ValueError('x should be a sequence of numbers')
# value checking
if len(x) < max(m):
raise ValueError('the max m is bigger than x\'s length')
# initialization
if sd == None:
sd = np.sqrt(np.var(x))
ans = MultiscaleEntropy_init_return_type(return_type)
# calculation
for i, rr in enumerate(r):
threshold = rr * sd
if return_type == 'dict':
ans[rr] = MultiscaleEntropy_init_return_type(return_type)
else:
ans.append(MultiscaleEntropy_init_return_type(return_type))
count = {}
tmp_m = []
for mm in m:
tmp_m.append(mm)
tmp_m.append(mm+1)
tmp_m = list(set(tmp_m))
for mm in tmp_m:
count[mm] = 0
for j in range(1, len(x)-min(m)+1):
cont = 0
for inc in range(0, len(x)-j):
if abs(x[inc]-x[j+inc]) < threshold:
cont += 1
elif cont > 0:
for mm in tmp_m:
tmp = cont - mm + 1
count[mm] += tmp if tmp > 0 else 0
cont = 0
if cont > 0:
for mm in tmp_m:
tmp = cont - mm + 1
count[mm] += tmp if tmp > 0 else 0
for mm in m:
if count[mm+1] == 0 or count[mm] == 0:
t = len(x)-mm+1
tmp = -math.log(1/(t*(t-1)))
else:
tmp = -math.log(count[mm+1]/count[mm])
if return_type == 'dict':
ans[rr][mm] = tmp
else:
ans[i].append(tmp)
return ans
def MultiscaleEntropy_mse(x, scale_factor=[i for i in range(1,21)], m=[2], r=[0.15], return_type='dict', safe_mode=False):
'''[Multiscale Entropy]
Arguments:
x {[input signal]} -- [an iterator of numbers]
Keyword Arguments:
scale_factor {list} -- [scale factors of coarse graining] (default: {[i for i in range(1,21)]})
m {list} -- [m in sample entropy] (default: {[2]})
r {list} -- [r in sample entropy] (default: {[0.15]})
return_type {str} -- [can be dict or list] (default: {'dict'})
safe_mode {bool} -- [if set True, type checking will be skipped] (default: {False})
Raises:
ValueError -- [some values too big]
Returns:
[dict or list as return_type indicates] -- [if dict, nest as [scale_factor][m][r] for each value of scale_factor, m, r; if list nest as [i][j][k] for lengths of scale_factor, m, r]
'''
# type checking
if not safe_mode:
m = MultiscaleEntropy_check_type(m, int, 'm')
r = MultiscaleEntropy_check_type(r, float, 'r')
scale_factor = MultiscaleEntropy_check_type(scale_factor, int, 'scale_factor')
try:
x = np.array(x)
except:
print('x should be a sequence of numbers')
# value checking
if max(scale_factor) > len(x):
raise ValueError('the max scale_factor is bigger than x\'s length')
# calculation
sd = np.sqrt(np.var(x))
ms_en = MultiscaleEntropy_init_return_type(return_type)
for s_f in scale_factor:
y = MultiscaleEntropy_coarse_grain(x, s_f)
if return_type == 'dict':
ms_en[s_f] = MultiscaleEntropy_sample_entropy(y, m, r, sd, 'dict', True)
else:
ms_en.append(MultiscaleEntropy_sample_entropy(y, m, r, sd, 'list', True))
if return_type == "list":
ms_en = [i[0] for i in ms_en]
ms_en = [i[0] for i in ms_en]
return ms_en
| 34.77907
| 188
| 0.561542
|
import numpy as np
import pandas as pd
import neurokit2 as nk
import nolds
from pyentrp import entropy as pyentrp
def test_complexity_sanity():
signal = np.cos(np.linspace(start=0, stop=30, num=1000))
assert np.allclose(nk.entropy_fuzzy(signal), nk.entropy_sample(signal, fuzzy=True), atol=0.000001)
assert np.allclose(nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])), 2.1009048365682133, atol=0.000001)
assert np.allclose(nk.fractal_dfa(signal), 1.957966586191164, atol=0.000001)
assert np.allclose(nk.fractal_dfa(signal, multifractal=True), 1.957966586191164, atol=0.000001)
assert np.allclose(nk.fractal_correlation(signal), 0.7884473170763334, atol=0.000001)
assert np.allclose(nk.fractal_correlation(signal, r="nolds"), nolds.corr_dim(signal, 2), atol=0.0001)
def test_complexity_vs_R():
signal = pd.read_csv("https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv")["RSP"].values
r = 0.2 * np.std(signal, ddof=1)
apen = nk.entropy_approximate(signal, dimension=2, r=r)
assert np.allclose(apen, 0.04383386, atol=0.0001)
apen = nk.entropy_approximate(signal, dimension=3, delay=2, r=1)
assert np.allclose(apen, 0.0004269369, atol=0.0001)
apen = nk.entropy_approximate(signal[0:200], dimension=2, delay=1, r=r)
assert np.allclose(apen, 0.03632554, atol=0.0001)
sampen = nk.entropy_sample(signal[0:300], dimension=2, r=r)
assert np.allclose(sampen, nk.entropy_sample(signal[0:300], dimension=2, r=r, distance="infinity"), atol=0.001)
assert np.allclose(sampen, 0.03784376, atol=0.001)
sampen = nk.entropy_sample(signal[0:300], dimension=3, delay=2, r=r)
assert np.allclose(sampen, 0.09185509, atol=0.01)
def test_complexity_vs_Python():
signal = np.cos(np.linspace(start=0, stop=30, num=100))
shannon = nk.entropy_shannon(signal)
assert np.allclose(shannon - pyentrp.shannon_entropy(signal), 0)
assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
assert np.allclose(nk.entropy_approximate(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0)
assert nk.entropy_approximate(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) != pyeeg_ap_entropy(signal, 2, 0.2*np.std(signal, ddof=1))
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - nolds.sampen(signal, 2, 0.2), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0)
assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0)
assert nk.entropy_sample(signal, dimension=2, r=0.2) != pyentrp.sample_entropy(signal, 2, 0.2)[1]
assert nk.entropy_sample(signal, dimension=2, r=0.2*np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2]
assert np.allclose(nk.entropy_fuzzy(signal, dimension=2, r=0.2, delay=1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
assert nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])) != nolds.dfa(signal, nvals=[4, 8, 12, 20], fit_exp="poly")
def wikipedia_sampen(signal, m=2, r=1):
N = len(signal)
B = 0.0
A = 0.0
xmi = np.array([signal[i : i + m] for i in range(N - m)])
xmj = np.array([signal[i : i + m] for i in range(N - m + 1)])
B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi])
m += 1
xm = np.array([signal[i : i + m] for i in range(N - m + 1)])
A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm])
return -np.log(A / B)
def pyeeg_embed_seq(time_series, tau, embedding_dimension):
if not type(time_series) == np.ndarray:
typed_time_series = np.asarray(time_series)
else:
typed_time_series = time_series
shape = (
typed_time_series.size - tau * (embedding_dimension - 1),
embedding_dimension
)
strides = (typed_time_series.itemsize, tau * typed_time_series.itemsize)
return np.lib.stride_tricks.as_strided(
typed_time_series,
shape=shape,
strides=strides
)
def pyeeg_bin_power(X, Band, Fs):
C = np.fft.fft(X)
C = abs(C)
Power = np.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[int(np.floor(Freq / Fs * len(X))):
int(np.floor(Next_Freq / Fs * len(X)))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio
def pyeeg_ap_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B)
InRange = np.max(D, axis=2) <= R
Cm = InRange.mean(axis=0)
Dp = np.abs(
np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T
)
Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
def pyeeg_samp_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)[:-1]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B)
InRange = np.max(D, axis=2) <= R
np.fill_diagonal(InRange, 0)
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = np.abs(
np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T
)
Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))
return Samp_En
# =============================================================================
# Entropy
# =============================================================================
from sklearn.neighbors import KDTree
def entropy_embed(x, order=3, delay=1):
N = len(x)
if order * delay > N:
raise ValueError("Error: order * delay should be lower than x.size")
if delay < 1:
raise ValueError("Delay has to be at least 1.")
if order < 2:
raise ValueError("Order has to be at least 2.")
Y = np.zeros((order, N - (order - 1) * delay))
for i in range(order):
Y[i] = x[i * delay:i * delay + Y.shape[1]]
return Y.T
def entropy_app_samp_entropy(x, order, metric='chebyshev', approximate=True):
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError('The given metric (%s) is not valid. The valid '
'metric names are: %s' % (metric, _all_metrics))
phi = np.zeros(2)
r = 0.2 * np.std(x, axis=-1, ddof=1)
# compute phi(order, r)
_emb_data1 = entropy_embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r,
count_only=True
).astype(np.float64)
# compute phi(order + 1, r)
emb_data2 = entropy_embed(x, order + 1, 1)
count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r,
count_only=True
).astype(np.float64)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
def entropy_app_entropy(x, order=2, metric='chebyshev'):
phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=True)
return np.subtract(phi[0], phi[1])
def entropy_sample_entropy(x, order=2, metric='chebyshev'):
x = np.asarray(x, dtype=np.float64)
phi = entropy_app_samp_entropy(x, order=order, metric=metric,
approximate=False)
return -np.log(np.divide(phi[1], phi[0]))
# =============================================================================
# entro-py
# =============================================================================
def entro_py_sampen(x, dim, r, scale=True):
return entro_py_entropy(x, dim, r, scale=scale)
def entro_py_cross_sampen(x1, x2, dim, r, scale=True):
return entro_py_entropy([x1, x2], dim, r, scale)
def entro_py_fuzzyen(x, dim, r, n, scale=True):
return entro_py_entropy(x, dim, r, n=n, scale=scale, remove_baseline=True)
def entro_py_cross_fuzzyen(x1, x2, dim, r, n, scale=True):
return entro_py_entropy([x1, x2], dim, r, n, scale=scale, remove_baseline=True)
def entro_py_pattern_mat(x, m):
x = np.asarray(x).ravel()
if m == 1:
return x
else:
N = len(x)
patterns = np.zeros((m, N-m+1))
for i in range(m):
patterns[i, :] = x[i:N-m+i+1]
return patterns
def entro_py_entropy(x, dim, r, n=1, scale=True, remove_baseline=False):
fuzzy = True if remove_baseline else False
cross = True if type(x) == list else False
N = len(x[0]) if cross else len(x)
if scale:
if cross:
x = [entro_py_scale(np.copy(x[0])), entro_py_scale(np.copy(x[1]))]
else:
x = entro_py_scale(np.copy(x))
phi = [0, 0] # phi(m), phi(m+1)
for j in [0, 1]:
m = dim + j
npat = N-dim # https://github.com/ixjlyons/entro-py/pull/2/files
if cross:
# patterns = [entro_py_pattern_mat(x[0], m), entro_py_pattern_mat(x[1], m)]
patterns = [entro_py_pattern_mat(x[0], m)[:, :npat], entro_py_pattern_mat(x[1], m)[:, :npat]] # https://github.com/ixjlyons/entro-py/pull/2/files
else:
# patterns = entro_py_pattern_mat(x, m)
patterns = entro_py_pattern_mat(x, m)[:, :npat]
if remove_baseline:
if cross:
patterns[0] = entro_py_remove_baseline(patterns[0], axis=0)
patterns[1] = entro_py_remove_baseline(patterns[1], axis=0)
else:
patterns = entro_py_remove_baseline(patterns, axis=0)
# count = np.zeros(N-m) # https://github.com/ixjlyons/entro-py/pull/2/files
# for i in range(N-m): # https://github.com/ixjlyons/entro-py/pull/2/files
count = np.zeros(npat)
for i in range(npat):
if cross:
if m == 1:
sub = patterns[1][i]
else:
sub = patterns[1][:, [i]]
dist = np.max(np.abs(patterns[0] - sub), axis=0)
else:
if m == 1:
sub = patterns[i]
else:
sub = patterns[:, [i]]
dist = np.max(np.abs(patterns - sub), axis=0)
if fuzzy:
sim = np.exp(-np.power(dist, n) / r)
else:
sim = dist < r
count[i] = np.sum(sim) - 1
# phi[j] = np.mean(count) / (N-m-1)
phi[j] = np.mean(count) / (N-dim-1) # https://github.com/ixjlyons/entro-py/pull/2/files
return np.log(phi[0] / phi[1])
def entro_py_scale(x, axis=None):
x = entro_py_remove_baseline(x, axis=axis)
x /= np.std(x, ddof=1, axis=axis, keepdims=True)
return x
def entro_py_remove_baseline(x, axis=None):
x -= np.mean(x, axis=axis, keepdims=True)
return x
# =============================================================================
# MultiscaleEntropy https://github.com/reatank/MultiscaleEntropy/blob/master/MultiscaleEntropy/mse.py
# =============================================================================
import math
from collections.abc import Iterable
def MultiscaleEntropy_init_return_type(return_type):
if return_type == 'dict':
return {}
else:
return []
def MultiscaleEntropy_check_type(x, num_type, name):
if isinstance(x, num_type):
tmp = [x]
elif not isinstance(x, Iterable):
raise ValueError(name + ' should be a ' + num_type.__name__ + ' or an iterator of ' + num_type.__name__)
else:
tmp = []
for i in x:
tmp.append(i)
if not isinstance(i, num_type):
raise ValueError(name + ' should be a ' + num_type.__name__ + ' or an iterator of ' + num_type.__name__)
return tmp
# sum of seperate intervals of x
def MultiscaleEntropy_coarse_grain(x, scale_factor):
x = np.array(x)
x_len = len(x)
if x_len % scale_factor:
padded_len = (1+int(x_len/scale_factor))*scale_factor
else:
padded_len = x_len
tmp_x = np.zeros(padded_len)
tmp_x[:x_len] = x
tmp_x = np.reshape(tmp_x, (int(padded_len/scale_factor), scale_factor))
ans = np.reshape(np.sum(tmp_x, axis=1), (-1))/scale_factor
return ans
def MultiscaleEntropy_sample_entropy(x, m=[2], r=[0.15], sd=None, return_type='dict', safe_mode=False):
# type checking
if not safe_mode:
m = MultiscaleEntropy_check_type(m, int, 'm')
r = MultiscaleEntropy_check_type(r, float, 'r')
if not (sd == None) and not (isinstance(sd, float) or isinstance(sd, int)):
raise ValueError('sd should be a number')
try:
x = np.array(x)
except:
raise ValueError('x should be a sequence of numbers')
# value checking
if len(x) < max(m):
raise ValueError('the max m is bigger than x\'s length')
if sd == None:
sd = np.sqrt(np.var(x))
ans = MultiscaleEntropy_init_return_type(return_type)
for i, rr in enumerate(r):
threshold = rr * sd
if return_type == 'dict':
ans[rr] = MultiscaleEntropy_init_return_type(return_type)
else:
ans.append(MultiscaleEntropy_init_return_type(return_type))
count = {}
tmp_m = []
for mm in m:
tmp_m.append(mm)
tmp_m.append(mm+1)
tmp_m = list(set(tmp_m))
for mm in tmp_m:
count[mm] = 0
for j in range(1, len(x)-min(m)+1):
cont = 0
for inc in range(0, len(x)-j):
if abs(x[inc]-x[j+inc]) < threshold:
cont += 1
elif cont > 0:
for mm in tmp_m:
tmp = cont - mm + 1
count[mm] += tmp if tmp > 0 else 0
cont = 0
if cont > 0:
for mm in tmp_m:
tmp = cont - mm + 1
count[mm] += tmp if tmp > 0 else 0
for mm in m:
if count[mm+1] == 0 or count[mm] == 0:
t = len(x)-mm+1
tmp = -math.log(1/(t*(t-1)))
else:
tmp = -math.log(count[mm+1]/count[mm])
if return_type == 'dict':
ans[rr][mm] = tmp
else:
ans[i].append(tmp)
return ans
def MultiscaleEntropy_mse(x, scale_factor=[i for i in range(1,21)], m=[2], r=[0.15], return_type='dict', safe_mode=False):
if not safe_mode:
m = MultiscaleEntropy_check_type(m, int, 'm')
r = MultiscaleEntropy_check_type(r, float, 'r')
scale_factor = MultiscaleEntropy_check_type(scale_factor, int, 'scale_factor')
try:
x = np.array(x)
except:
print('x should be a sequence of numbers')
if max(scale_factor) > len(x):
raise ValueError('the max scale_factor is bigger than x\'s length')
# calculation
sd = np.sqrt(np.var(x))
ms_en = MultiscaleEntropy_init_return_type(return_type)
for s_f in scale_factor:
y = MultiscaleEntropy_coarse_grain(x, s_f)
if return_type == 'dict':
ms_en[s_f] = MultiscaleEntropy_sample_entropy(y, m, r, sd, 'dict', True)
else:
ms_en.append(MultiscaleEntropy_sample_entropy(y, m, r, sd, 'list', True))
if return_type == "list":
ms_en = [i[0] for i in ms_en]
ms_en = [i[0] for i in ms_en]
return ms_en
| true
| true
|
f712f324f95489601d591ac5a91a4fc13b4cd06d
| 63,499
|
py
|
Python
|
tests/models/tapas/test_tokenization_tapas.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | 5
|
2020-09-01T09:15:48.000Z
|
2020-09-15T03:25:05.000Z
|
tests/models/tapas/test_tokenization_tapas.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | null | null | null |
tests/models/tapas/test_tokenization_tapas.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | 3
|
2020-08-20T04:46:25.000Z
|
2020-10-14T08:39:13.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import shutil
import tempfile
import unittest
from typing import List
import numpy as np
import pandas as pd
from transformers import AddedToken
from transformers.models.tapas.tokenization_tapas import (
VOCAB_FILES_NAMES,
BasicTokenizer,
TapasTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_pandas,
require_scatter,
require_tensorflow_probability,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english, merge_model_tokenizer_mappings
@require_tokenizers
@require_pandas
class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = TapasTokenizer
test_rust_tokenizer = False
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
test_seq2seq = False
def get_table(
self,
tokenizer: TapasTokenizer,
length=5,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
if length == 0:
data = {}
else:
data = {toks[0]: [toks[tok] for tok in range(1, length)]}
table = pd.DataFrame.from_dict(data)
return table
def get_table_and_query(
self,
tokenizer: TapasTokenizer,
length=5,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
table = self.get_table(tokenizer, length=length - 3)
query = " ".join(toks[:3])
return table, query
def get_clean_sequence(
self,
tokenizer: TapasTokenizer,
with_prefix_space=False,
max_length=20,
min_length=5,
empty_table: bool = False,
add_special_tokens: bool = True,
return_table_and_query: bool = False,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
if empty_table:
table = pd.DataFrame.from_dict({})
query = " ".join(toks[:min_length])
else:
data = {toks[0]: [toks[tok] for tok in range(1, min_length - 3)]}
table = pd.DataFrame.from_dict(data)
query = " ".join(toks[:3])
output_ids = tokenizer.encode(table, query, add_special_tokens=add_special_tokens)
output_txt = tokenizer.decode(output_ids)
assert len(output_ids) >= min_length, "Update the code to generate the sequences so that they are larger"
assert len(output_ids) <= max_length, "Update the code to generate the sequences so that they are smaller"
if return_table_and_query:
return output_txt, output_ids, table, query
return output_txt, output_ids
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
@require_tensorflow_probability
def test_tf_encode_plus_sent_to_model(self):
super().test_tf_encode_plus_sent_to_model()
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
# With lower casing
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual(
[tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], ["[EMPTY]"], ["[UNK]"]]
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("google/tapas-base-finetuned-wtq")
empty_table = self.get_table(tokenizer, length=0)
table = self.get_table(tokenizer, length=10)
text = tokenizer.encode(table, add_special_tokens=False)
text_2 = tokenizer.encode(empty_table, "multi-sequence build", add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == [101] + text + [102] + text_2
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
expected_results = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_add_special_tokens(self):
tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_table = self.get_table(tokenizer, length=0)
special_token = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(input_table, special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode(table, "aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
table,
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l",
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(table, input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence = "Sequence"
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer.encode_plus(table, sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
table,
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
not_padded_sequence = tokenizer.encode_plus(
table,
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
# Test right padding
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
table,
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
assert sequence_length + padding_size == right_padded_sequence_length
assert input_ids + [padding_idx] * padding_size == right_padded_input_ids
assert special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask
# Test left padding
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
table,
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
assert sequence_length + padding_size == left_padded_sequence_length
assert [padding_idx] * padding_size + input_ids == left_padded_input_ids
assert [1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert (
token_type_ids + [[token_type_padding_idx] * 7] * padding_size == right_padded_token_type_ids
)
assert [[token_type_padding_idx] * 7] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
assert attention_mask + [0] * padding_size == right_padded_attention_mask
assert [0] * padding_size + attention_mask == left_padded_attention_mask
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(table, input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table, query = self.get_table_and_query(tokenizer)
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(table, query, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
@unittest.skip("TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip("TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_single_input(self):
pass
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table, query = self.get_table_and_query(tokenizer)
sequences = tokenizer.encode(table, query, add_special_tokens=False)
attached_sequences = tokenizer.encode(table, query, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding_to_max_length(self):
"""We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated"""
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer)
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
# FIXME: the next line should be padding(max_length) to avoid warning
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# Check that nothing is done when a maximum length is not specified
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
# Test not batched
table = self.get_table(tokenizer, length=0)
encoded_sequences_1 = tokenizer.encode_plus(table, sequences[0])
encoded_sequences_2 = tokenizer(table, sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
table = self.get_table(tokenizer, length=10)
encoded_sequences_1 = tokenizer.encode_plus(table, sequences[1])
encoded_sequences_2 = tokenizer(table, sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
table = self.get_table(tokenizer, length=0)
encoded_sequences_1 = tokenizer.batch_encode_plus(table, sequences)
encoded_sequences_2 = tokenizer(table, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
encoded_sequences = [tokenizer.encode_plus(table, sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(table, sequences, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [
tokenizer.encode_plus(table, sequence, max_length=maximum_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(table, sequences, padding=True)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
table, sequences, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
table, sequences, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(table, sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
table, sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(table, sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
table, sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
empty_tokens = tokenizer(table, padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(table, "This is a sample input", padding=True, pad_to_multiple_of=8)
for key, value in empty_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(table, "This", pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer(table, "This", padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
@unittest.skip("TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`")
def test_prepare_for_model(self):
pass
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
empty_table = self.get_table(tokenizer, length=0)
table = self.get_table(tokenizer, length=10)
encoded_sequence = tokenizer.encode(empty_table, sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(table, "", add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
table,
sequence_0,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence_0 = "Encode this."
# Testing single inputs
encoded_sequence = tokenizer.encode(table, sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
table, sequence_0, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
table = self.get_table(tokenizer, length=0)
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
before_tokens = tokenizer.encode(table, sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(table, sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
@unittest.skip("Not implemented")
def test_right_and_left_truncation(self):
pass
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(table, sequence, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(table, sequence, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
empty_table = self.get_table(tokenizer, length=0)
seq_0 = "Test this method."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(empty_table, seq_0, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that each token type ID has 7 values
self.assertTrue(all(len(token_type_ids) == 7 for token_type_ids in output["token_type_ids"]))
# Do the same test as modeling common.
self.assertIn(0, output["token_type_ids"][0])
@require_torch
@slow
@require_scatter
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
table = self.get_table(tokenizer, length=0)
encoded_sequence = tokenizer.encode_plus(table, sequence, return_tensors="pt")
batch_encoded_sequence = tokenizer.batch_encode_plus(table, [sequence, sequence], return_tensors="pt")
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
@unittest.skip("TAPAS doesn't handle pre-tokenized inputs.")
def test_pretokenized_inputs(self):
pass
@slow
def test_tapas_truncation_integration_test(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = [
"When was Brad Pitt born?",
"Which actor appeared in the least number of movies?",
"What is the average number of movies?",
]
table = pd.DataFrame.from_dict(data)
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", model_max_length=512)
for i in range(12):
# The table cannot even encode the headers, so raise an error
with self.assertRaises(ValueError):
tokenizer.encode(table=table, query=queries[0], max_length=i, truncation="drop_rows_to_fit")
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(
table=table, query=queries[0], max_length=i, truncation="drop_rows_to_fit"
)
# Ensure that the input IDs are less than the max length defined.
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation=True)
dropped_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation="drop_rows_to_fit")
# Ensure that the input IDs are still truncated when no max_length is specified
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
@slow
def test_min_max_question_length(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = "When was Brad Pitt born?"
table = pd.DataFrame.from_dict(data)
# test max_question_length
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", max_question_length=2)
encoding = tokenizer(table=table, queries=queries)
# query should not be tokenized as it's longer than the specified max_question_length
expected_results = [101, 102]
self.assertListEqual(encoding.input_ids[:2], expected_results)
# test min_question_length
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", min_question_length=30)
encoding = tokenizer(table=table, queries=queries)
# query should not be tokenized as it's shorter than the specified min_question_length
expected_results = [101, 102]
self.assertListEqual(encoding.input_ids[:2], expected_results)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
table = self.get_table(tokenizer, length=0)
# A Tensor cannot be build by sequences which are not the same size
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
table,
sequences,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
table,
sequences,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(table, sequences, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(
table, sequences, padding="longest", return_tensors="tf"
)
encoded_sequences = tokenizer.batch_encode_plus(table, sequences, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
@slow
def test_tapas_integration_test(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = [
"When was Brad Pitt born?",
"Which actor appeared in the least number of movies?",
"What is the average number of movies?",
]
table = pd.DataFrame.from_dict(data)
tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq", model_max_length=512)
# fmt: off
expected_results = {'input_ids':[101,2043,2001,8226,15091,2141,1029,102,5889,2287,2193,1997,5691,3058,1997,4182,8226,15091,5179,6584,2324,2285,3699,14720,4487,6178,9488,3429,5187,2340,2281,3326,2577,18856,7828,3240,5354,6353,1020,2089,3777],'attention_mask':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],'token_type_ids':[[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[1,1,0,0,0,0,0],[1,2,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,1,1,0,0,0,0],[1,1,1,0,0,0,0],[1,2,1,0,2,2,0],[1,3,1,0,3,1,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,2,2,0,1,3,0],[1,3,2,0,1,3,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,2,3,0,3,1,0],[1,3,3,0,2,2,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0]]} # noqa: E231
# fmt: on
new_encoded_inputs = tokenizer.encode_plus(table=table, query=queries[0])
self.assertDictEqual(dict(new_encoded_inputs), expected_results)
@slow
def test_full_tokenizer(self):
data = [
["Pos", "No", "Driver", "Team", "Laps", "Time/Retired", "Grid", "Points"],
["1", "32", "Patrick Carpentier", "Team Player's", "87", "1:48:11.023", "1", "22"],
["2", "1", "Bruno Junqueira", "Newman/Haas Racing", "87", "+0.8 secs", "2", "17"],
["3", "3", "Paul Tracy", "Team Player's", "87", "+28.6 secs", "3", "14"],
["4", "9", "Michel Jourdain, Jr.", "Team Rahal", "87", "+40.8 secs", "13", "12"],
["5", "34", "Mario Haberfeld", "Mi-Jack Conquest Racing", "87", "+42.1 secs", "6", "10"],
["6", "20", "Oriol Servia", "Patrick Racing", "87", "+1:00.2", "10", "8"],
["7", "51", "Adrian Fernandez", "Fernandez Racing", "87", "+1:01.4", "5", "6"],
["8", "12", "Jimmy Vasser", "American Spirit Team Johansson", "87", "+1:01.8", "8", "5"],
["9", "7", "Tiago Monteiro", "Fittipaldi-Dingman Racing", "86", "+ 1 Lap", "15", "4"],
["10", "55", "Mario Dominguez", "Herdez Competition", "86", "+ 1 Lap", "11", "3"],
["11", "27", "Bryan Herta", "PK Racing", "86", "+ 1 Lap", "12", "2"],
["12", "31", "Ryan Hunter-Reay", "American Spirit Team Johansson", "86", "+ 1 Lap", "17", "1"],
["13", "19", "Joel Camathias", "Dale Coyne Racing", "85", "+ 2 Laps", "18", "0"],
["14", "33", "Alex Tagliani", "Rocketsports Racing", "85", "+ 2 Laps", "14", "0"],
["15", "4", "Roberto Moreno", "Herdez Competition", "85", "+ 2 Laps", "9", "0"],
["16", "11", "Geoff Boss", "Dale Coyne Racing", "83", "Mechanical", "19", "0"],
["17", "2", "Sebastien Bourdais", "Newman/Haas Racing", "77", "Mechanical", "4", "0"],
["18", "15", "Darren Manning", "Walker Racing", "12", "Mechanical", "7", "0"],
["19", "5", "Rodolfo Lavin", "Walker Racing", "10", "Mechanical", "16", "0"],
]
query = "what were the drivers names?"
table = pd.DataFrame.from_records(data[1:], columns=data[0])
tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq", model_max_length=512)
model_inputs = tokenizer(table, query, padding="max_length")
input_ids = model_inputs["input_ids"]
token_type_ids = np.array(model_inputs["token_type_ids"])
segment_ids = token_type_ids[:, 0]
column_ids = token_type_ids[:, 1]
row_ids = token_type_ids[:, 2]
# fmt: off
expected_results = {'input_ids':[101,2054,2020,1996,6853,3415,1029,102,13433,2015,2053,4062,2136,10876,2051,1013,3394,8370,2685,1015,3590,4754,29267,4765,3771,2136,2447,1005,1055,6584,1015,1024,4466,1024,2340,1012,6185,2509,1015,2570,1016,1015,10391,12022,4226,7895,10625,1013,22996,3868,6584,1009,1014,1012,1022,10819,2015,1016,2459,1017,1017,2703,10555,2136,2447,1005,1055,6584,1009,2654,1012,1020,10819,2015,1017,2403,1018,1023,8709,8183,3126,21351,2078,1010,3781,1012,2136,10958,8865,6584,1009,2871,1012,1022,10819,2015,2410,2260,1019,4090,7986,5292,5677,8151,2771,1011,2990,9187,3868,6584,1009,4413,1012,1015,10819,2015,1020,2184,1020,2322,2030,20282,14262,9035,4754,3868,6584,1009,1015,1024,4002,1012,1016,2184,1022,1021,4868,7918,12023,12023,3868,6584,1009,1015,1024,5890,1012,1018,1019,1020,1022,2260,5261,12436,18116,2137,4382,2136,26447,6584,1009,1015,1024,5890,1012,1022,1022,1019,1023,1021,27339,3995,10125,9711,4906,25101,24657,1011,22033,2386,3868,6564,1009,1015,5001,2321,1018,2184,4583,7986,14383,2075,29488,14906,9351,2971,6564,1009,1015,5001,2340,1017,2340,2676,8527,2014,2696,1052,2243,3868,6564,1009,1015,5001,2260,1016,2260,2861,4575,4477,1011,2128,4710,2137,4382,2136,26447,6564,1009,1015,5001,2459,1015,2410,2539,8963,11503,25457,3022,8512,2522,9654,3868,5594,1009,1016,10876,2324,1014,2403,3943,4074,6415,15204,2072,12496,25378,3868,5594,1009,1016,10876,2403,1014,2321,1018,10704,17921,14906,9351,2971,5594,1009,1016,10876,1023,1014,2385,2340,14915,5795,8512,2522,9654,3868,6640,6228,2539,1014,2459,1016,28328,8945,3126,21351,2015,10625,1013,22996,3868,6255,6228,1018,1014,2324,2321,12270,11956,5232,3868,2260,6228,1021,1014,2539,1019,8473,28027,2080,2474,6371,5232,3868,2184,6228,2385,1014,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'column_ids':[0,0,0,0,0,0,0,0,1,1,2,3,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,3,3,3,3,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,7,8,1,2,3,3,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,5,6,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'row_ids':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'segment_ids':[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]} # noqa: E231
# fmt: on
self.assertListEqual(input_ids, expected_results["input_ids"])
self.assertListEqual(segment_ids.tolist(), expected_results["segment_ids"])
self.assertListEqual(column_ids.tolist(), expected_results["column_ids"])
self.assertListEqual(row_ids.tolist(), expected_results["row_ids"])
@unittest.skip("Skip this test while all models are still to be uploaded.")
def test_pretrained_model_lists(self):
pass
@unittest.skip("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
| 51.044212
| 5,363
| 0.605632
|
import inspect
import os
import shutil
import tempfile
import unittest
from typing import List
import numpy as np
import pandas as pd
from transformers import AddedToken
from transformers.models.tapas.tokenization_tapas import (
VOCAB_FILES_NAMES,
BasicTokenizer,
TapasTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_pandas,
require_scatter,
require_tensorflow_probability,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english, merge_model_tokenizer_mappings
@require_tokenizers
@require_pandas
class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = TapasTokenizer
test_rust_tokenizer = False
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
test_seq2seq = False
def get_table(
self,
tokenizer: TapasTokenizer,
length=5,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
if length == 0:
data = {}
else:
data = {toks[0]: [toks[tok] for tok in range(1, length)]}
table = pd.DataFrame.from_dict(data)
return table
def get_table_and_query(
self,
tokenizer: TapasTokenizer,
length=5,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
table = self.get_table(tokenizer, length=length - 3)
query = " ".join(toks[:3])
return table, query
def get_clean_sequence(
self,
tokenizer: TapasTokenizer,
with_prefix_space=False,
max_length=20,
min_length=5,
empty_table: bool = False,
add_special_tokens: bool = True,
return_table_and_query: bool = False,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
if empty_table:
table = pd.DataFrame.from_dict({})
query = " ".join(toks[:min_length])
else:
data = {toks[0]: [toks[tok] for tok in range(1, min_length - 3)]}
table = pd.DataFrame.from_dict(data)
query = " ".join(toks[:3])
output_ids = tokenizer.encode(table, query, add_special_tokens=add_special_tokens)
output_txt = tokenizer.decode(output_ids)
assert len(output_ids) >= min_length, "Update the code to generate the sequences so that they are larger"
assert len(output_ids) <= max_length, "Update the code to generate the sequences so that they are smaller"
if return_table_and_query:
return output_txt, output_ids, table, query
return output_txt, output_ids
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
@require_tensorflow_probability
def test_tf_encode_plus_sent_to_model(self):
super().test_tf_encode_plus_sent_to_model()
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
self.assertListEqual(
[tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], ["[EMPTY]"], ["[UNK]"]]
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("google/tapas-base-finetuned-wtq")
empty_table = self.get_table(tokenizer, length=0)
table = self.get_table(tokenizer, length=10)
text = tokenizer.encode(table, add_special_tokens=False)
text_2 = tokenizer.encode(empty_table, "multi-sequence build", add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == [101] + text + [102] + text_2
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
expected_results = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_add_special_tokens(self):
tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_table = self.get_table(tokenizer, length=0)
special_token = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(input_table, special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode(table, "aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
table,
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l",
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(table, input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence = "Sequence"
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer.encode_plus(table, sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
table,
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
not_padded_sequence = tokenizer.encode_plus(
table,
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
table,
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
assert sequence_length + padding_size == right_padded_sequence_length
assert input_ids + [padding_idx] * padding_size == right_padded_input_ids
assert special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
table,
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
assert sequence_length + padding_size == left_padded_sequence_length
assert [padding_idx] * padding_size + input_ids == left_padded_input_ids
assert [1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert (
token_type_ids + [[token_type_padding_idx] * 7] * padding_size == right_padded_token_type_ids
)
assert [[token_type_padding_idx] * 7] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
assert attention_mask + [0] * padding_size == right_padded_attention_mask
assert [0] * padding_size + attention_mask == left_padded_attention_mask
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(table, input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table, query = self.get_table_and_query(tokenizer)
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(table, query, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
@unittest.skip("TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip("TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_single_input(self):
pass
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table, query = self.get_table_and_query(tokenizer)
sequences = tokenizer.encode(table, query, add_special_tokens=False)
attached_sequences = tokenizer.encode(table, query, add_special_tokens=True)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding_to_max_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer)
sequence = "Sequence"
padding_size = 10
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_call(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
table = self.get_table(tokenizer, length=0)
encoded_sequences_1 = tokenizer.encode_plus(table, sequences[0])
encoded_sequences_2 = tokenizer(table, sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
table = self.get_table(tokenizer, length=10)
encoded_sequences_1 = tokenizer.encode_plus(table, sequences[1])
encoded_sequences_2 = tokenizer(table, sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
table = self.get_table(tokenizer, length=0)
encoded_sequences_1 = tokenizer.batch_encode_plus(table, sequences)
encoded_sequences_2 = tokenizer(table, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
encoded_sequences = [tokenizer.encode_plus(table, sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(table, sequences, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [
tokenizer.encode_plus(table, sequence, max_length=maximum_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(table, sequences, padding=True)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
table, sequences, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
table, sequences, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(table, sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
table, sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(table, sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
table, sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
empty_tokens = tokenizer(table, padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(table, "This is a sample input", padding=True, pad_to_multiple_of=8)
for key, value in empty_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(table, "This", pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(table, "This", padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
@unittest.skip("TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`")
def test_prepare_for_model(self):
pass
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
empty_table = self.get_table(tokenizer, length=0)
table = self.get_table(tokenizer, length=10)
encoded_sequence = tokenizer.encode(empty_table, sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(table, "", add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
table,
sequence_0,
add_special_tokens=True,
return_special_tokens_mask=True,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence_0 = "Encode this."
encoded_sequence = tokenizer.encode(table, sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
table, sequence_0, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
table = self.get_table(tokenizer, length=0)
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
before_tokens = tokenizer.encode(table, sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(table, sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
@unittest.skip("Not implemented")
def test_right_and_left_truncation(self):
pass
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(table, sequence, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(table, sequence, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
empty_table = self.get_table(tokenizer, length=0)
seq_0 = "Test this method."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(empty_table, seq_0, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that each token type ID has 7 values
self.assertTrue(all(len(token_type_ids) == 7 for token_type_ids in output["token_type_ids"]))
# Do the same test as modeling common.
self.assertIn(0, output["token_type_ids"][0])
@require_torch
@slow
@require_scatter
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
table = self.get_table(tokenizer, length=0)
encoded_sequence = tokenizer.encode_plus(table, sequence, return_tensors="pt")
batch_encoded_sequence = tokenizer.batch_encode_plus(table, [sequence, sequence], return_tensors="pt")
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
@unittest.skip("TAPAS doesn't handle pre-tokenized inputs.")
def test_pretokenized_inputs(self):
pass
@slow
def test_tapas_truncation_integration_test(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = [
"When was Brad Pitt born?",
"Which actor appeared in the least number of movies?",
"What is the average number of movies?",
]
table = pd.DataFrame.from_dict(data)
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", model_max_length=512)
for i in range(12):
with self.assertRaises(ValueError):
tokenizer.encode(table=table, query=queries[0], max_length=i, truncation="drop_rows_to_fit")
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(
table=table, query=queries[0], max_length=i, truncation="drop_rows_to_fit"
)
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation=True)
dropped_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation="drop_rows_to_fit")
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
@slow
def test_min_max_question_length(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = "When was Brad Pitt born?"
table = pd.DataFrame.from_dict(data)
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", max_question_length=2)
encoding = tokenizer(table=table, queries=queries)
expected_results = [101, 102]
self.assertListEqual(encoding.input_ids[:2], expected_results)
# test min_question_length
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", min_question_length=30)
encoding = tokenizer(table=table, queries=queries)
# query should not be tokenized as it's shorter than the specified min_question_length
expected_results = [101, 102]
self.assertListEqual(encoding.input_ids[:2], expected_results)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
table = self.get_table(tokenizer, length=0)
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
table,
sequences,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
table,
sequences,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(table, sequences, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(
table, sequences, padding="longest", return_tensors="tf"
)
encoded_sequences = tokenizer.batch_encode_plus(table, sequences, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
@slow
def test_tapas_integration_test(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = [
"When was Brad Pitt born?",
"Which actor appeared in the least number of movies?",
"What is the average number of movies?",
]
table = pd.DataFrame.from_dict(data)
tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq", model_max_length=512)
expected_results = {'input_ids':[101,2043,2001,8226,15091,2141,1029,102,5889,2287,2193,1997,5691,3058,1997,4182,8226,15091,5179,6584,2324,2285,3699,14720,4487,6178,9488,3429,5187,2340,2281,3326,2577,18856,7828,3240,5354,6353,1020,2089,3777],'attention_mask':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],'token_type_ids':[[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[1,1,0,0,0,0,0],[1,2,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,1,1,0,0,0,0],[1,1,1,0,0,0,0],[1,2,1,0,2,2,0],[1,3,1,0,3,1,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,2,2,0,1,3,0],[1,3,2,0,1,3,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,2,3,0,3,1,0],[1,3,3,0,2,2,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0]]}
new_encoded_inputs = tokenizer.encode_plus(table=table, query=queries[0])
self.assertDictEqual(dict(new_encoded_inputs), expected_results)
@slow
def test_full_tokenizer(self):
data = [
["Pos", "No", "Driver", "Team", "Laps", "Time/Retired", "Grid", "Points"],
["1", "32", "Patrick Carpentier", "Team Player's", "87", "1:48:11.023", "1", "22"],
["2", "1", "Bruno Junqueira", "Newman/Haas Racing", "87", "+0.8 secs", "2", "17"],
["3", "3", "Paul Tracy", "Team Player's", "87", "+28.6 secs", "3", "14"],
["4", "9", "Michel Jourdain, Jr.", "Team Rahal", "87", "+40.8 secs", "13", "12"],
["5", "34", "Mario Haberfeld", "Mi-Jack Conquest Racing", "87", "+42.1 secs", "6", "10"],
["6", "20", "Oriol Servia", "Patrick Racing", "87", "+1:00.2", "10", "8"],
["7", "51", "Adrian Fernandez", "Fernandez Racing", "87", "+1:01.4", "5", "6"],
["8", "12", "Jimmy Vasser", "American Spirit Team Johansson", "87", "+1:01.8", "8", "5"],
["9", "7", "Tiago Monteiro", "Fittipaldi-Dingman Racing", "86", "+ 1 Lap", "15", "4"],
["10", "55", "Mario Dominguez", "Herdez Competition", "86", "+ 1 Lap", "11", "3"],
["11", "27", "Bryan Herta", "PK Racing", "86", "+ 1 Lap", "12", "2"],
["12", "31", "Ryan Hunter-Reay", "American Spirit Team Johansson", "86", "+ 1 Lap", "17", "1"],
["13", "19", "Joel Camathias", "Dale Coyne Racing", "85", "+ 2 Laps", "18", "0"],
["14", "33", "Alex Tagliani", "Rocketsports Racing", "85", "+ 2 Laps", "14", "0"],
["15", "4", "Roberto Moreno", "Herdez Competition", "85", "+ 2 Laps", "9", "0"],
["16", "11", "Geoff Boss", "Dale Coyne Racing", "83", "Mechanical", "19", "0"],
["17", "2", "Sebastien Bourdais", "Newman/Haas Racing", "77", "Mechanical", "4", "0"],
["18", "15", "Darren Manning", "Walker Racing", "12", "Mechanical", "7", "0"],
["19", "5", "Rodolfo Lavin", "Walker Racing", "10", "Mechanical", "16", "0"],
]
query = "what were the drivers names?"
table = pd.DataFrame.from_records(data[1:], columns=data[0])
tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq", model_max_length=512)
model_inputs = tokenizer(table, query, padding="max_length")
input_ids = model_inputs["input_ids"]
token_type_ids = np.array(model_inputs["token_type_ids"])
segment_ids = token_type_ids[:, 0]
column_ids = token_type_ids[:, 1]
row_ids = token_type_ids[:, 2]
expected_results = {'input_ids':[101,2054,2020,1996,6853,3415,1029,102,13433,2015,2053,4062,2136,10876,2051,1013,3394,8370,2685,1015,3590,4754,29267,4765,3771,2136,2447,1005,1055,6584,1015,1024,4466,1024,2340,1012,6185,2509,1015,2570,1016,1015,10391,12022,4226,7895,10625,1013,22996,3868,6584,1009,1014,1012,1022,10819,2015,1016,2459,1017,1017,2703,10555,2136,2447,1005,1055,6584,1009,2654,1012,1020,10819,2015,1017,2403,1018,1023,8709,8183,3126,21351,2078,1010,3781,1012,2136,10958,8865,6584,1009,2871,1012,1022,10819,2015,2410,2260,1019,4090,7986,5292,5677,8151,2771,1011,2990,9187,3868,6584,1009,4413,1012,1015,10819,2015,1020,2184,1020,2322,2030,20282,14262,9035,4754,3868,6584,1009,1015,1024,4002,1012,1016,2184,1022,1021,4868,7918,12023,12023,3868,6584,1009,1015,1024,5890,1012,1018,1019,1020,1022,2260,5261,12436,18116,2137,4382,2136,26447,6584,1009,1015,1024,5890,1012,1022,1022,1019,1023,1021,27339,3995,10125,9711,4906,25101,24657,1011,22033,2386,3868,6564,1009,1015,5001,2321,1018,2184,4583,7986,14383,2075,29488,14906,9351,2971,6564,1009,1015,5001,2340,1017,2340,2676,8527,2014,2696,1052,2243,3868,6564,1009,1015,5001,2260,1016,2260,2861,4575,4477,1011,2128,4710,2137,4382,2136,26447,6564,1009,1015,5001,2459,1015,2410,2539,8963,11503,25457,3022,8512,2522,9654,3868,5594,1009,1016,10876,2324,1014,2403,3943,4074,6415,15204,2072,12496,25378,3868,5594,1009,1016,10876,2403,1014,2321,1018,10704,17921,14906,9351,2971,5594,1009,1016,10876,1023,1014,2385,2340,14915,5795,8512,2522,9654,3868,6640,6228,2539,1014,2459,1016,28328,8945,3126,21351,2015,10625,1013,22996,3868,6255,6228,1018,1014,2324,2321,12270,11956,5232,3868,2260,6228,1021,1014,2539,1019,8473,28027,2080,2474,6371,5232,3868,2184,6228,2385,1014,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'column_ids':[0,0,0,0,0,0,0,0,1,1,2,3,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,3,3,3,3,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,7,8,1,2,3,3,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,5,6,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'row_ids':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'segment_ids':[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}
self.assertListEqual(input_ids, expected_results["input_ids"])
self.assertListEqual(segment_ids.tolist(), expected_results["segment_ids"])
self.assertListEqual(column_ids.tolist(), expected_results["column_ids"])
self.assertListEqual(row_ids.tolist(), expected_results["row_ids"])
@unittest.skip("Skip this test while all models are still to be uploaded.")
def test_pretrained_model_lists(self):
pass
@unittest.skip("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
| true
| true
|
f712f35d11e9baf743377f65259f9f175c99f59d
| 9,052
|
py
|
Python
|
challenges/multiview_matting/worker.py
|
Haiper-ai/kubric
|
d096ba178b8a78ea2c840ae121646d36281d31d9
|
[
"Apache-2.0"
] | null | null | null |
challenges/multiview_matting/worker.py
|
Haiper-ai/kubric
|
d096ba178b8a78ea2c840ae121646d36281d31d9
|
[
"Apache-2.0"
] | null | null | null |
challenges/multiview_matting/worker.py
|
Haiper-ai/kubric
|
d096ba178b8a78ea2c840ae121646d36281d31d9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Worker file for the Multi-View Background removal dataset.
This dataset creates a scene where a foreground object is to be distinguished
from the background. Foreground objects are borrowed from shapnet. Backgrounds
are from indoor scenes of polyhaven. All foreground objects are situated on top
of a "table" which is gernated to be random in color. Instead of background
removal with a single image. This dataset is special in that multiple images of
the foreground object (taken from different camera poses) are given. This
"multi-view" persepctive should be very helpful for background removal but is
currently underexplored in the literature.
"""
import logging
import numpy as np
import kubric as kb
from kubric.renderer import Blender as KubricRenderer
# --- WARNING: this path is not yet public
source_path = (
"gs://tensorflow-graphics/public/60c9de9c410be30098c297ac/ShapeNetCore.v2")
# --- CLI arguments (and modified defaults)
parser = kb.ArgumentParser()
parser.set_defaults(
seed=1,
frame_start=1,
frame_end=10,
width=128,
height=128,
)
parser.add_argument("--backgrounds_split",
choices=["train", "test"], default="train")
parser.add_argument("--dataset_mode",
choices=["easy", "hard"], default="hard")
parser.add_argument("--hdri_dir",
type=str, default="gs://mv_bckgr_removal/hdri_haven/4k/")
# "/mnt/mydata/images/"
FLAGS = parser.parse_args()
if FLAGS.dataset_mode == "hard":
add_distractors = False
def add_hdri_dome(hdri_source, scene, background_hdri=None):
dome_path = hdri_source.fetch("dome.blend")
dome = kb.FileBasedObject(
name="BackgroundDome",
position=(0, 0, 0),
static=True, background=True,
simulation_filename=None,
render_filename=str(dome_path),
render_import_kwargs={
"filepath": str(dome_path / "Object" / "Dome"),
"directory": str(dome_path / "Object"),
"filename": "Dome",
})
scene.add(dome)
# pylint: disable=import-outside-toplevel
from kubric.renderer import Blender
import bpy
blender_renderer = [v for v in scene.views if isinstance(v, Blender)]
if blender_renderer:
dome_blender = dome.linked_objects[blender_renderer[0]]
dome_blender.cycles_visibility.shadow = False
if background_hdri is not None:
dome_mat = dome_blender.data.materials[0]
texture_node = dome_mat.node_tree.nodes["Image Texture"]
texture_node.image = bpy.data.images.load(background_hdri.filename)
return dome
# --- Common setups
kb.utils.setup_logging(FLAGS.logging_level)
kb.utils.log_my_flags(FLAGS)
job_dir = kb.as_path(FLAGS.job_dir)
rng = np.random.RandomState(FLAGS.seed)
scene = kb.Scene.from_flags(FLAGS)
# --- Add a renderer
renderer = KubricRenderer(scene,
use_denoising=True,
adaptive_sampling=False,
background_transparency=True)
# --- Fetch a random asset
asset_source = kb.AssetSource(source_path)
all_ids = list(asset_source.db['id'])
fraction = 0.1
held_out_obj_ids = list(asset_source.db.sample(
frac=fraction, replace=False, random_state=42)["id"])
train_obj_ids = [i for i in asset_source.db["id"] if
i not in held_out_obj_ids]
if FLAGS.backgrounds_split == "train":
asset_id = rng.choice(train_obj_ids)
else:
asset_id = rng.choice(held_out_obj_ids)
obj = asset_source.create(asset_id=asset_id)
logging.info(f"selected '{asset_id}'")
# --- make object flat on X/Y and not penetrate floor
obj.quaternion = kb.Quaternion(axis=[1,0,0], degrees=90)
obj.position = obj.position - (0, 0, obj.aabbox[0][2])
obj_size = np.linalg.norm(obj.aabbox[1] - obj.aabbox[0])
if add_distractors:
obj_radius = np.linalg.norm(obj.aabbox[1][:2] - obj.aabbox[0][:2])
obj_height = obj.aabbox[1][2] - obj.aabbox[0][2]
obj.metadata = {
"asset_id": obj.asset_id,
"category": asset_source.db[
asset_source.db["id"] == obj.asset_id].iloc[0]["category_name"],
}
scene.add(obj)
size_multiple = 1.
if add_distractors:
distractor_locs = []
for i in range(4):
asset_id_2 = rng.choice(train_obj_ids)
obj2 = asset_source.create(asset_id=asset_id_2)
logging.info(f"selected '{asset_id}'")
# --- make object flat on X/Y and not penetrate floor
obj2.quaternion = kb.Quaternion(axis=[1,0,0], degrees=90)
obj_2_radius = np.linalg.norm(obj2.aabbox[1][:2] - obj2.aabbox[0][:2])
position = rng.rand((2)) * 2 - 1
position /= np.linalg.norm(position)
position *= (obj_radius + obj_2_radius) / 2.
distractor_locs.append(-position)
obj2.position = obj2.position - (position[0], position[1], obj2.aabbox[0][2])
obj_size_2 = np.linalg.norm(obj2.aabbox[1] - obj2.aabbox[0])
obj_height_2 = obj2.aabbox[1][2] - obj2.aabbox[0][2]
obj2.metadata = {
"asset_id": obj.asset_id,
"category": asset_source.db[
asset_source.db["id"] == obj2.asset_id].iloc[0]["category_name"],
}
scene.add(obj2)
distractor_dir = np.vstack(distractor_locs)
distractor_dir /= np.linalg.norm(distractor_dir, axis=-1, keepdims=True)
size_multiple = 1.5
material = kb.PrincipledBSDFMaterial(
color=kb.Color.from_hsv(rng.uniform(), 1, 1),
metallic=1.0, roughness=0.2, ior=2.5)
table = kb.Cube(name="floor", scale=(obj_size*size_multiple, obj_size*size_multiple, 0.02),
position=(0, 0, -0.02), material=material)
scene += table
logging.info("Loading background HDRIs from %s", FLAGS.hdri_dir)
hdri_source = kb.TextureSource(FLAGS.hdri_dir)
train_backgrounds, held_out_backgrounds = hdri_source.get_test_split(
fraction=0.1)
if FLAGS.backgrounds_split == "train":
logging.info("Choosing one of the %d training backgrounds...",
len(train_backgrounds))
background_hdri = hdri_source.create(texture_name=rng.choice(train_backgrounds))
else:
logging.info("Choosing one of the %d held-out backgrounds...",
len(held_out_backgrounds))
background_hdri = hdri_source.create(
texture_name=rng.choice(held_out_backgrounds))
dome = kb.assets.utils.add_hdri_dome(hdri_source, scene, background_hdri)
dome = add_hdri_dome(hdri_source, scene, background_hdri)
renderer._set_ambient_light_hdri(background_hdri.filename)
# table = add_table(hdri_source, scene, background_hdri)
# --- Add Klevr-like lights to the scene
scene += kb.assets.utils.get_clevr_lights(rng=rng)
# scene.ambient_illumination = kb.Color.from_hsv(np.random.uniform(), 1, 1)
# scene.ambient_illumination = kb.Color(0.05, 0.05, 0.05)
def sample_point_in_half_sphere_shell(
inner_radius: float,
outer_radius: float,
rng: np.random.RandomState
):
"""Uniformly sample points that are in a given distance
range from the origin and with z >= 0."""
while True:
v = rng.uniform((-outer_radius, -outer_radius, obj_height/1.2),
(outer_radius, outer_radius, obj_height))
len_v = np.linalg.norm(v)
correct_angle = True
if add_distractors:
cam_dir = v[:2] / np.linalg.norm(v[:2])
correct_angle = np.all(np.dot(distractor_dir, cam_dir) < np.cos(np.pi / 9.))
if inner_radius <= len_v <= outer_radius and correct_angle:
return tuple(v)
# --- Keyframe the camera
scene.camera = kb.PerspectiveCamera()
for frame in range(FLAGS.frame_start, FLAGS.frame_end + 1):
# scene.camera.position = (1, 1, 1) #< frozen camera
scene.camera.position = sample_point_in_half_sphere_shell(
obj_size*1.7, obj_size*2, rng)
scene.camera.look_at((0, 0, obj_height/2))
scene.camera.keyframe_insert("position", frame)
scene.camera.keyframe_insert("quaternion", frame)
# --- Rendering
logging.info("Rendering the scene ...")
renderer.save_state(job_dir / "scene.blend")
data_stack = renderer.render()
# --- Postprocessing
kb.compute_visibility(data_stack["segmentation"], scene.assets)
data_stack["segmentation"] = kb.adjust_segmentation_idxs(
data_stack["segmentation"],
scene.assets,
[obj]).astype(np.uint8)
# --- Discard non-used information
del data_stack["uv"]
del data_stack["forward_flow"]
del data_stack["backward_flow"]
del data_stack["depth"]
del data_stack["normal"]
# --- Save to image files
kb.file_io.write_image_dict(data_stack, job_dir)
# --- Collect metadata
logging.info("Collecting and storing metadata for each object.")
data = {
"metadata": kb.get_scene_metadata(scene),
"camera": kb.get_camera_info(scene.camera),
}
kb.file_io.write_json(filename=job_dir / "metadata.json", data=data)
kb.done()
| 35.085271
| 91
| 0.715091
|
import logging
import numpy as np
import kubric as kb
from kubric.renderer import Blender as KubricRenderer
source_path = (
"gs://tensorflow-graphics/public/60c9de9c410be30098c297ac/ShapeNetCore.v2")
parser = kb.ArgumentParser()
parser.set_defaults(
seed=1,
frame_start=1,
frame_end=10,
width=128,
height=128,
)
parser.add_argument("--backgrounds_split",
choices=["train", "test"], default="train")
parser.add_argument("--dataset_mode",
choices=["easy", "hard"], default="hard")
parser.add_argument("--hdri_dir",
type=str, default="gs://mv_bckgr_removal/hdri_haven/4k/")
FLAGS = parser.parse_args()
if FLAGS.dataset_mode == "hard":
add_distractors = False
def add_hdri_dome(hdri_source, scene, background_hdri=None):
dome_path = hdri_source.fetch("dome.blend")
dome = kb.FileBasedObject(
name="BackgroundDome",
position=(0, 0, 0),
static=True, background=True,
simulation_filename=None,
render_filename=str(dome_path),
render_import_kwargs={
"filepath": str(dome_path / "Object" / "Dome"),
"directory": str(dome_path / "Object"),
"filename": "Dome",
})
scene.add(dome)
from kubric.renderer import Blender
import bpy
blender_renderer = [v for v in scene.views if isinstance(v, Blender)]
if blender_renderer:
dome_blender = dome.linked_objects[blender_renderer[0]]
dome_blender.cycles_visibility.shadow = False
if background_hdri is not None:
dome_mat = dome_blender.data.materials[0]
texture_node = dome_mat.node_tree.nodes["Image Texture"]
texture_node.image = bpy.data.images.load(background_hdri.filename)
return dome
kb.utils.setup_logging(FLAGS.logging_level)
kb.utils.log_my_flags(FLAGS)
job_dir = kb.as_path(FLAGS.job_dir)
rng = np.random.RandomState(FLAGS.seed)
scene = kb.Scene.from_flags(FLAGS)
renderer = KubricRenderer(scene,
use_denoising=True,
adaptive_sampling=False,
background_transparency=True)
asset_source = kb.AssetSource(source_path)
all_ids = list(asset_source.db['id'])
fraction = 0.1
held_out_obj_ids = list(asset_source.db.sample(
frac=fraction, replace=False, random_state=42)["id"])
train_obj_ids = [i for i in asset_source.db["id"] if
i not in held_out_obj_ids]
if FLAGS.backgrounds_split == "train":
asset_id = rng.choice(train_obj_ids)
else:
asset_id = rng.choice(held_out_obj_ids)
obj = asset_source.create(asset_id=asset_id)
logging.info(f"selected '{asset_id}'")
obj.quaternion = kb.Quaternion(axis=[1,0,0], degrees=90)
obj.position = obj.position - (0, 0, obj.aabbox[0][2])
obj_size = np.linalg.norm(obj.aabbox[1] - obj.aabbox[0])
if add_distractors:
obj_radius = np.linalg.norm(obj.aabbox[1][:2] - obj.aabbox[0][:2])
obj_height = obj.aabbox[1][2] - obj.aabbox[0][2]
obj.metadata = {
"asset_id": obj.asset_id,
"category": asset_source.db[
asset_source.db["id"] == obj.asset_id].iloc[0]["category_name"],
}
scene.add(obj)
size_multiple = 1.
if add_distractors:
distractor_locs = []
for i in range(4):
asset_id_2 = rng.choice(train_obj_ids)
obj2 = asset_source.create(asset_id=asset_id_2)
logging.info(f"selected '{asset_id}'")
obj2.quaternion = kb.Quaternion(axis=[1,0,0], degrees=90)
obj_2_radius = np.linalg.norm(obj2.aabbox[1][:2] - obj2.aabbox[0][:2])
position = rng.rand((2)) * 2 - 1
position /= np.linalg.norm(position)
position *= (obj_radius + obj_2_radius) / 2.
distractor_locs.append(-position)
obj2.position = obj2.position - (position[0], position[1], obj2.aabbox[0][2])
obj_size_2 = np.linalg.norm(obj2.aabbox[1] - obj2.aabbox[0])
obj_height_2 = obj2.aabbox[1][2] - obj2.aabbox[0][2]
obj2.metadata = {
"asset_id": obj.asset_id,
"category": asset_source.db[
asset_source.db["id"] == obj2.asset_id].iloc[0]["category_name"],
}
scene.add(obj2)
distractor_dir = np.vstack(distractor_locs)
distractor_dir /= np.linalg.norm(distractor_dir, axis=-1, keepdims=True)
size_multiple = 1.5
material = kb.PrincipledBSDFMaterial(
color=kb.Color.from_hsv(rng.uniform(), 1, 1),
metallic=1.0, roughness=0.2, ior=2.5)
table = kb.Cube(name="floor", scale=(obj_size*size_multiple, obj_size*size_multiple, 0.02),
position=(0, 0, -0.02), material=material)
scene += table
logging.info("Loading background HDRIs from %s", FLAGS.hdri_dir)
hdri_source = kb.TextureSource(FLAGS.hdri_dir)
train_backgrounds, held_out_backgrounds = hdri_source.get_test_split(
fraction=0.1)
if FLAGS.backgrounds_split == "train":
logging.info("Choosing one of the %d training backgrounds...",
len(train_backgrounds))
background_hdri = hdri_source.create(texture_name=rng.choice(train_backgrounds))
else:
logging.info("Choosing one of the %d held-out backgrounds...",
len(held_out_backgrounds))
background_hdri = hdri_source.create(
texture_name=rng.choice(held_out_backgrounds))
dome = kb.assets.utils.add_hdri_dome(hdri_source, scene, background_hdri)
dome = add_hdri_dome(hdri_source, scene, background_hdri)
renderer._set_ambient_light_hdri(background_hdri.filename)
scene += kb.assets.utils.get_clevr_lights(rng=rng)
def sample_point_in_half_sphere_shell(
inner_radius: float,
outer_radius: float,
rng: np.random.RandomState
):
while True:
v = rng.uniform((-outer_radius, -outer_radius, obj_height/1.2),
(outer_radius, outer_radius, obj_height))
len_v = np.linalg.norm(v)
correct_angle = True
if add_distractors:
cam_dir = v[:2] / np.linalg.norm(v[:2])
correct_angle = np.all(np.dot(distractor_dir, cam_dir) < np.cos(np.pi / 9.))
if inner_radius <= len_v <= outer_radius and correct_angle:
return tuple(v)
scene.camera = kb.PerspectiveCamera()
for frame in range(FLAGS.frame_start, FLAGS.frame_end + 1):
position = sample_point_in_half_sphere_shell(
obj_size*1.7, obj_size*2, rng)
scene.camera.look_at((0, 0, obj_height/2))
scene.camera.keyframe_insert("position", frame)
scene.camera.keyframe_insert("quaternion", frame)
logging.info("Rendering the scene ...")
renderer.save_state(job_dir / "scene.blend")
data_stack = renderer.render()
kb.compute_visibility(data_stack["segmentation"], scene.assets)
data_stack["segmentation"] = kb.adjust_segmentation_idxs(
data_stack["segmentation"],
scene.assets,
[obj]).astype(np.uint8)
del data_stack["uv"]
del data_stack["forward_flow"]
del data_stack["backward_flow"]
del data_stack["depth"]
del data_stack["normal"]
kb.file_io.write_image_dict(data_stack, job_dir)
logging.info("Collecting and storing metadata for each object.")
data = {
"metadata": kb.get_scene_metadata(scene),
"camera": kb.get_camera_info(scene.camera),
}
kb.file_io.write_json(filename=job_dir / "metadata.json", data=data)
kb.done()
| true
| true
|
f712f558e36700c9a2b16f8b5e67cffbd02340f7
| 558
|
py
|
Python
|
booking/migrations/0012_auto_20210920_0214.py
|
starsouf/Python-Django-web-app
|
0af1a4f97a7b7583858bd3e487d8a1b502b4daa7
|
[
"Unlicense"
] | null | null | null |
booking/migrations/0012_auto_20210920_0214.py
|
starsouf/Python-Django-web-app
|
0af1a4f97a7b7583858bd3e487d8a1b502b4daa7
|
[
"Unlicense"
] | null | null | null |
booking/migrations/0012_auto_20210920_0214.py
|
starsouf/Python-Django-web-app
|
0af1a4f97a7b7583858bd3e487d8a1b502b4daa7
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-09-20 06:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0011_auto_20210920_0204'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='appointment_statue',
field=models.CharField(choices=[('waiting', 'ينتظر'), ('in progress', 'يعمل على قصة شعره'), ('finished', 'انتهى'), ('did not show up', 'لم يحضر')], default=('waiting', 'ينتظر'), max_length=20),
),
]
| 29.368421
| 205
| 0.611111
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0011_auto_20210920_0204'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='appointment_statue',
field=models.CharField(choices=[('waiting', 'ينتظر'), ('in progress', 'يعمل على قصة شعره'), ('finished', 'انتهى'), ('did not show up', 'لم يحضر')], default=('waiting', 'ينتظر'), max_length=20),
),
]
| true
| true
|
f712f6558ed50db7fff7120d2677a6ee59fe1aa4
| 15,039
|
py
|
Python
|
defoe/fmp/document.py
|
kallewesterling/defoe
|
d72af2f748fd4363a4718c93bb0b0284b8cb1f3e
|
[
"MIT"
] | 2
|
2022-02-14T12:10:54.000Z
|
2022-02-14T12:35:44.000Z
|
defoe/fmp/document.py
|
kallewesterling/defoe
|
d72af2f748fd4363a4718c93bb0b0284b8cb1f3e
|
[
"MIT"
] | 17
|
2022-02-09T21:46:14.000Z
|
2022-02-25T14:55:09.000Z
|
defoe/fmp/document.py
|
kallewesterling/defoe
|
d72af2f748fd4363a4718c93bb0b0284b8cb1f3e
|
[
"MIT"
] | 1
|
2022-02-14T13:19:08.000Z
|
2022-02-14T13:19:08.000Z
|
"""
Object model representation of a document represented as a collection
of XML files in METS/MODS format.
"""
from defoe.fmp.page import Page
from lxml import etree
import re
class Document(object):
"""
Object model representation of a document represented as a
collection of XML files in METS/MODS format.
"""
def __init__(self, code, archive):
"""
Constructor
:param code: identifier for this document within an archive
:type code: str or unicode
:param archive: archive to which this document belongs
:type archive: defoe.alto.archive.Archive
"""
self.namespaces = {
"mods": "http://www.loc.gov/mods/v3",
"mets": "http://www.loc.gov/METS/",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"premis": "info:lc/xmlns/premis-v2",
"dcterms": "http://purl.org/dc/terms/",
"fits": "http://hul.harvard.edu/ois/xml/ns/fits/fits_output",
"xlink": "http://www.w3.org/1999/xlink",
}
self.archive = archive
self.code = code
self.num_pages = 0
self.metadata = self.archive.open_document(self.code)
self.metadata_tree = etree.parse(self.metadata)
self.title = self.single_query("//mods:title/text()")
self.page_codes = sorted(
self.archive.document_codes[self.code], key=Document.sorter
)
self.num_pages = len(self.page_codes)
self.years = Document.parse_year(self.single_query("//mods:dateIssued/text()"))
self.publisher = self.single_query("//mods:publisher/text()")
self.place = self.single_query("//mods:placeTerm/text()")
# place may often have a year in.
self.years += Document.parse_year(self.place)
self.years = sorted(self.years)
self.documentId = self.single_query("//mods:identifier/text()")
if self.years:
self.year = self.years[0]
else:
self.year = None
self.date = self.single_query("//mods:dateIssued/text()")
self.document_type = "newspaper"
self.model = "fmp"
#### New ############
# [art0001, art0002, art0003]
self.articlesId = self.parse_structMap_Logical()
# {'#art0001':['#pa0001001', '#pa0001002', '#pa0001003', '#pa0001004', '#pa0001005', '#pa0001006', '#pa0001007'], '#art0002': ['#pa0001008', '#pa0001009' ..]}
# {'pa0001001': 'page1 area1', 'pa0001003': 'page1 area3'}
self.articlesParts, self.partsPage = self.parse_structLink()
# {'pa0001001': ['RECT', '1220,5,2893,221'], 'pa0001003': ['RECT', '2934,14,3709,211'], 'pa0004044': ['RECT', '5334,2088,5584,2121']}
self.partsCoord = self.parse_structMap_Physical()
self.num_articles = len(self.articlesId)
#######################
@staticmethod
def parse_year(text):
"""
Parse text to extract years of form 16xx to 19xx.
Any date of form NN following a year of form CCYY to CCYY
is used to derive a date CCNN.
As an exception to this rule, single years are parsed
from dates precisely matching the format YYYY-MM-DD.
For example:
* "1862, [1861]" returns [1861, 1862]
* "1847 [1846, 47]" returns [1846, 1847]
* "1873-80" returns [1873, 1880]
* "1870-09-01" returns [1870]
:param text: text to parse
:type text: str or unicode
:return: years
:rtype: set(int)
"""
try:
date_pattern = re.compile(
"(1[6-9]\d{2}(-|/)(0[1-9]|1[0-2])(-|/)(0[1-9]|[12]\d|3[01]))"
)
if date_pattern.match(text):
return [int(text[0:4])]
long_pattern = re.compile("(1[6-9]\d\d)")
short_pattern = re.compile("\d\d")
results = []
chunks = iter(long_pattern.split(text)[1:])
for year, rest in zip(chunks, chunks):
results.append(int(year))
century = year[0:2]
short_years = short_pattern.findall(rest)
for short_year in short_years:
results.append(int(century + short_year))
return sorted(set(results))
except TypeError:
return []
@staticmethod
def sorter(page_code):
"""
Given a page code of form [0-9]*(_[0-9]*), split this
into the sub-codes. For example, given 123_456, return
[123, 456]
:param page_code: page code
:type page_code: str or unicode
:return: list of page codes
:rtype: list(int)
"""
codes = list(map(int, page_code.split("_")))
return codes
def query(self, query):
"""
Run XPath query.
:param query: XPath query
:type query: str or unicode
:return: list of query results or None if none
:rtype: list(lxml.etree.<MODULE>) (depends on query)
"""
return self.metadata_tree.xpath(query, namespaces=self.namespaces)
def single_query(self, query):
"""
Run XPath query and return first result.
:param query: XPath query
:type query: str or unicode
:return: query result or None if none
:rtype: str or unicode
"""
result = self.query(query)
if not result:
return None
return str(result[0])
def page(self, code):
"""
Given a page code, return a new Page object.
:param code: page code
:type code: str or unicode
:return: Page object
:rtype: defoe.alto.page.Page
"""
return Page(self, code)
def get_document_info(self):
"""
Gets information from ZIP file about metadata file
corresponding to this document.
:return: information
:rtype: zipfile.ZipInfo
"""
return self.archive.get_document_info(self.code)
def get_page_info(self, page_code):
"""
Gets information from ZIP file about a page file within
this document.
:param page_code: file code
:type page_code: str or unicode
:return: information
:rtype: zipfile.ZipInfo
"""
return self.archive.get_page_info(self.code, page_code)
def __getitem__(self, index):
"""
Given a page index, return a new Page object.
:param index: page index
:type index: int
:return: Page object
:rtype: defoe.alto.page.Page
"""
return self.page(self.page_codes[index])
def __iter__(self):
"""
Iterate over page codes, returning new Page objects.
:return: Page object
:rtype: defoe.alto.page.Page
"""
for page_code in self.page_codes:
yield self.page(page_code)
def scan_strings(self):
"""
Iterate over strings in pages.
:return: page and string
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for string in page.strings:
yield page, string
def scan_tb(self):
"""
Iterate over textblocks in pages
:return: page and textblock
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for tb in page.tb:
yield page, tb
def scan_words(self):
"""
Iterate over words in pages.
:return: page and word
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for word in page.words:
yield page, word
def scan_wc(self):
"""
Iterate over words cualities in pages.
:return: page and wc
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for wc in page.wc:
yield page, wc
@property
def articles(self):
"""
Iterate calculates the articles in each page.
:return: a dictionary per page with all the articles. Each articles is conformed by one or more textblocks
:rtype: dictionary of articles. Each
{'art0001': ['pa0001001': ['RECT', '1220,5,2893,221', 'page1 area1'], 'pa0001003': ['RECT', '2934,14,3709,211', page1 area3], ...]], ...}
"""
self.document_articles = {}
articlesInfo = self.articles_info()
for page in self:
for tb in page.tb:
for articleId in articlesInfo:
for partId in articlesInfo[articleId]:
if partId == tb.textblock_id:
if articleId not in self.document_articles:
self.document_articles[articleId] = []
tb.textblock_shape = articlesInfo[articleId][partId][0]
tb.textblock_coords = articlesInfo[articleId][partId][1]
tb.textblock_page_area = articlesInfo[articleId][partId][2]
self.document_articles[articleId].append(tb)
return self.document_articles
def scan_cc(self):
"""
Iterate over characters cualities in pages.
:return: page and cc
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for cc in page.cc:
yield page, cc
def scan_images(self):
"""
Iterate over images in pages.
:return: page and XML fragment with image
:rtype: tuple(defoe.alto.page.Page, lxml.etree._Element)
"""
for page in self:
for image in page.images:
yield page, image
def strings(self):
"""
Iterate over strings.
:return: string
:rtype: str or unicode
"""
for _, string in self.scan_strings():
yield string
def tb(self):
"""
Iterate over strings.
:return: string
:rtype: str or unicode
"""
for _, tb in self.scan_tb():
yield tb
def words(self):
"""
Iterate over strings.
:return: word
:rtype: str or unicode
"""
for _, word in self.scan_words():
yield word
def images(self):
"""
Iterate over images.
:return: XML fragment with image
:rtype: lxml.etree._Element
"""
for _, image in self.scan_images():
yield image
def wc(self):
"""
Iterate over words cualities.
:return: wc
:rtype: str or unicode
"""
for _, wc in self.scan_wc():
yield wc
def cc(self):
"""
Iterate over characters cualities.
:return: wc
:rtype: str or unicode
"""
for _, cc in self.scan_cc():
yield cc
def parse_structMap_Physical(self):
"""
Parse the structMap Physical information
:return: dictionary with the ID of each part as a keyword. For each part, it gets the shape and coord.
:rtype: dictionary
{'pa0001001': ['RECT', '1220,5,2893,221'], 'pa0001003': ['RECT', '2934,14,3709,211'], 'pa0004044': ['RECT', '5334,2088,5584,2121']}
"""
partsCoord = dict()
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="PHYSICAL"]', self.namespaces
)
for physic in elem:
parts = physic.findall('mets:div[@TYPE="page"]', self.namespaces)
for part in parts:
metadata_parts = part.findall("mets:div", self.namespaces)
for metadata in metadata_parts:
fptr = metadata.find("mets:fptr", self.namespaces)
for fp in fptr:
partsCoord[list(metadata.values())[0]] = [
list(fp.values())[1],
list(fp.values())[2],
]
return partsCoord
def parse_structMap_Logical(self):
"""
Parse the structMap Logical information
:return: list of articlesID that conforms each document/issue. It only returns the articles ID, no other type of elements.
:rtype: list
[art0001, art0002, art0003]
"""
articlesId = []
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="LOGICAL"]', self.namespaces
)
for logic in elem:
articles = logic.findall('mets:div[@TYPE="ARTICLE"]', self.namespaces)
for article in articles:
articlesId.append(list(article.values())[0])
return articlesId
def parse_structLink(self):
"""
Parse the strucLink information
:return: 1) A dictionary with articles IDs as keys. And per article ID, we have a list of parts/textblokcs ids that conform each article.
2) A dictionary with parts/textblocks ids as keys, and page and area as values.
:rtype: two dictionaries
{'#art0001':['#pa0001001', '#pa0001002', '#pa0001003', '#pa0001004', '#pa0001005', '#pa0001006', '#pa0001007'], '#art0002': ['#pa0001008', '#pa0001009' ..]}
{'pa0001001': 'page1 area1', 'pa0001003': 'page1 area3'}
"""
articlesId = []
articlesParts = dict()
partsPage = dict()
elem = self.metadata_tree.findall("mets:structLink", self.namespaces)
for smlinkgrp in elem:
parts = smlinkgrp.findall("mets:smLinkGrp", self.namespaces)
for linklocator in smlinkgrp:
linkl = linklocator.findall("mets:smLocatorLink", self.namespaces)
article_parts = []
for link in linkl:
idstring = list(link.values())[0]
partId = re.sub("[^A-Za-z0-9]+", "", idstring)
article_parts.append(partId)
partsPage[partId] = list(link.values())[1]
articlesParts[article_parts[0]] = article_parts[1:]
return articlesParts, partsPage
def articles_info(self):
"""
:return: create a dicitionary, with articles IDs as keys. Each entry has has a dictionary of parts/textblocks as values, with all the parts information (shape, coords and page_area).
:rtype: dictionary
#{'art0001 {'pa0001001': ['RECT', '1220,5,2893,221', 'page1 area1'], 'pa0001003': ['RECT', '2934,14,3709,211', 'page1 area3'], ....}}
"""
articlesId = []
articlesInfo = dict()
for a_id in self.articlesId:
articlesInfo[a_id] = dict()
for p_id in self.articlesParts[a_id]:
if p_id in self.partsCoord:
self.partsCoord[p_id].append(self.partsPage[p_id])
articlesInfo[a_id][p_id] = self.partsCoord[p_id]
return articlesInfo
| 33.948081
| 191
| 0.554558
|
from defoe.fmp.page import Page
from lxml import etree
import re
class Document(object):
def __init__(self, code, archive):
self.namespaces = {
"mods": "http://www.loc.gov/mods/v3",
"mets": "http://www.loc.gov/METS/",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"premis": "info:lc/xmlns/premis-v2",
"dcterms": "http://purl.org/dc/terms/",
"fits": "http://hul.harvard.edu/ois/xml/ns/fits/fits_output",
"xlink": "http://www.w3.org/1999/xlink",
}
self.archive = archive
self.code = code
self.num_pages = 0
self.metadata = self.archive.open_document(self.code)
self.metadata_tree = etree.parse(self.metadata)
self.title = self.single_query("//mods:title/text()")
self.page_codes = sorted(
self.archive.document_codes[self.code], key=Document.sorter
)
self.num_pages = len(self.page_codes)
self.years = Document.parse_year(self.single_query("//mods:dateIssued/text()"))
self.publisher = self.single_query("//mods:publisher/text()")
self.place = self.single_query("//mods:placeTerm/text()")
self.years += Document.parse_year(self.place)
self.years = sorted(self.years)
self.documentId = self.single_query("//mods:identifier/text()")
if self.years:
self.year = self.years[0]
else:
self.year = None
self.date = self.single_query("//mods:dateIssued/text()")
self.document_type = "newspaper"
self.model = "fmp"
parse_structLink()
self.partsCoord = self.parse_structMap_Physical()
self.num_articles = len(self.articlesId)
[int(text[0:4])]
long_pattern = re.compile("(1[6-9]\d\d)")
short_pattern = re.compile("\d\d")
results = []
chunks = iter(long_pattern.split(text)[1:])
for year, rest in zip(chunks, chunks):
results.append(int(year))
century = year[0:2]
short_years = short_pattern.findall(rest)
for short_year in short_years:
results.append(int(century + short_year))
return sorted(set(results))
except TypeError:
return []
@staticmethod
def sorter(page_code):
codes = list(map(int, page_code.split("_")))
return codes
def query(self, query):
return self.metadata_tree.xpath(query, namespaces=self.namespaces)
def single_query(self, query):
result = self.query(query)
if not result:
return None
return str(result[0])
def page(self, code):
return Page(self, code)
def get_document_info(self):
return self.archive.get_document_info(self.code)
def get_page_info(self, page_code):
return self.archive.get_page_info(self.code, page_code)
def __getitem__(self, index):
return self.page(self.page_codes[index])
def __iter__(self):
for page_code in self.page_codes:
yield self.page(page_code)
def scan_strings(self):
for page in self:
for string in page.strings:
yield page, string
def scan_tb(self):
for page in self:
for tb in page.tb:
yield page, tb
def scan_words(self):
for page in self:
for word in page.words:
yield page, word
def scan_wc(self):
for page in self:
for wc in page.wc:
yield page, wc
@property
def articles(self):
self.document_articles = {}
articlesInfo = self.articles_info()
for page in self:
for tb in page.tb:
for articleId in articlesInfo:
for partId in articlesInfo[articleId]:
if partId == tb.textblock_id:
if articleId not in self.document_articles:
self.document_articles[articleId] = []
tb.textblock_shape = articlesInfo[articleId][partId][0]
tb.textblock_coords = articlesInfo[articleId][partId][1]
tb.textblock_page_area = articlesInfo[articleId][partId][2]
self.document_articles[articleId].append(tb)
return self.document_articles
def scan_cc(self):
for page in self:
for cc in page.cc:
yield page, cc
def scan_images(self):
for page in self:
for image in page.images:
yield page, image
def strings(self):
for _, string in self.scan_strings():
yield string
def tb(self):
for _, tb in self.scan_tb():
yield tb
def words(self):
for _, word in self.scan_words():
yield word
def images(self):
for _, image in self.scan_images():
yield image
def wc(self):
for _, wc in self.scan_wc():
yield wc
def cc(self):
for _, cc in self.scan_cc():
yield cc
def parse_structMap_Physical(self):
partsCoord = dict()
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="PHYSICAL"]', self.namespaces
)
for physic in elem:
parts = physic.findall('mets:div[@TYPE="page"]', self.namespaces)
for part in parts:
metadata_parts = part.findall("mets:div", self.namespaces)
for metadata in metadata_parts:
fptr = metadata.find("mets:fptr", self.namespaces)
for fp in fptr:
partsCoord[list(metadata.values())[0]] = [
list(fp.values())[1],
list(fp.values())[2],
]
return partsCoord
def parse_structMap_Logical(self):
articlesId = []
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="LOGICAL"]', self.namespaces
)
for logic in elem:
articles = logic.findall('mets:div[@TYPE="ARTICLE"]', self.namespaces)
for article in articles:
articlesId.append(list(article.values())[0])
return articlesId
def parse_structLink(self):
articlesId = []
articlesParts = dict()
partsPage = dict()
elem = self.metadata_tree.findall("mets:structLink", self.namespaces)
for smlinkgrp in elem:
parts = smlinkgrp.findall("mets:smLinkGrp", self.namespaces)
for linklocator in smlinkgrp:
linkl = linklocator.findall("mets:smLocatorLink", self.namespaces)
article_parts = []
for link in linkl:
idstring = list(link.values())[0]
partId = re.sub("[^A-Za-z0-9]+", "", idstring)
article_parts.append(partId)
partsPage[partId] = list(link.values())[1]
articlesParts[article_parts[0]] = article_parts[1:]
return articlesParts, partsPage
def articles_info(self):
articlesId = []
articlesInfo = dict()
for a_id in self.articlesId:
articlesInfo[a_id] = dict()
for p_id in self.articlesParts[a_id]:
if p_id in self.partsCoord:
self.partsCoord[p_id].append(self.partsPage[p_id])
articlesInfo[a_id][p_id] = self.partsCoord[p_id]
return articlesInfo
| true
| true
|
f712f6eace062c31fb8cd15f637e74b23214b100
| 4,302
|
py
|
Python
|
tfnn_mlp.py
|
KiLJ4EdeN/tf2_nn
|
0ccec7692f061e7e066a4a2439683e3b09faa7bc
|
[
"MIT"
] | 1
|
2021-11-19T16:04:39.000Z
|
2021-11-19T16:04:39.000Z
|
tfnn_mlp.py
|
KiLJ4EdeN/tf2_nn
|
0ccec7692f061e7e066a4a2439683e3b09faa7bc
|
[
"MIT"
] | null | null | null |
tfnn_mlp.py
|
KiLJ4EdeN/tf2_nn
|
0ccec7692f061e7e066a4a2439683e3b09faa7bc
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import matplotlib.pyplot as plt
# MNIST dataset parameters.
num_classes = 10 # 0 to 9 digits
num_features = 784 # 28*28
# Training parameters.
learning_rate = 0.001
training_steps = 1000
batch_size = 256
display_step = 100
# Network parameters.
n_hidden_1 = 128 # 1st layer number of neurons.
n_hidden_2 = 256 # 2nd layer number of neurons.
# Prepare MNIST data.
from tensorflow.keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# Convert to float32.
X_train = tf.Variable(X_train, dtype=tf.float32)
X_test = tf.Variable(X_test, dtype=tf.float32)
# Flatten images to 1-D vector of 784 features (28*28).
X_train = tf.reshape(X_train, [-1, num_features])
X_test = tf.reshape(X_test, [-1, num_features])
# Normalize images value from [0, 255] to [0, 1].
X_train = X_train / 255.
X_test = X_test / 255.
print(X_train.shape)
print(X_test.shape)
# Use tf.data API to shuffle and batch data.
train_data = tf.data.Dataset.from_tensor_slices((X_train, Y_train))
# repeat adds the data again, prefetch speeds up outputs with the cost of ram.
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
num_hidden_units = [n_hidden_1, n_hidden_2, num_classes]
random_normal = tf.initializers.RandomNormal()
# Weight of shape [784, 10], the 28*28 image features, and total number of classes.
W1 = tf.Variable(random_normal([num_features, num_hidden_units[0]]), name="weight1")
W2 = tf.Variable(random_normal([num_hidden_units[0], num_hidden_units[1]]), name="weight2")
W3 = tf.Variable(random_normal([num_hidden_units[1], num_hidden_units[2]]), name="weight3")
# Bias of shape [10], the total number of classes.
b1 = tf.Variable(tf.zeros([num_hidden_units[0]]), name="bias1")
b2 = tf.Variable(tf.zeros([num_hidden_units[1]]), name="bias2")
b3 = tf.Variable(tf.zeros([num_hidden_units[2]]), name="bias3")
def multilayer_perceptron(x):
# Apply softmax to normalize the logits to a probability distribution.
h1 = tf.nn.relu(tf.add(tf.matmul(x, W1), b1))
h2 = tf.nn.relu(tf.add(tf.matmul(h1, W2), b2))
h3 = tf.nn.relu(tf.add(tf.matmul(h2, W3), b3))
return tf.nn.softmax(h3)
# Cross-Entropy loss function.
def cross_entropy(y_pred, y_true):
# Encode label to a one hot vector.
y_true = tf.one_hot(y_true, depth=num_classes)
# Clip prediction values to avoid log(0) error.
y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)
# Compute cross-entropy.
return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))
# Accuracy metric.
def accuracy(y_pred, y_true):
# Predicted class is the index of highest score in prediction vector (i.e. argmax).
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)
# Stochastic gradient descent optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# Optimization process.
def run_optimization(x, y):
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
pred = multilayer_perceptron(x)
loss = cross_entropy(pred, y)
# Compute gradients.
gradients = g.gradient(loss, [W1, W2, W3, b1, b2, b3])
# Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, [W1, W2, W3, b1, b2, b3]))
# Run training for the given number of steps.
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
# Run the optimization to update W and b values.
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = multilayer_perceptron(batch_x)
loss = cross_entropy(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
# Test model on validation set.
pred = multilayer_perceptron(X_test)
print("Test Accuracy: %f" % accuracy(pred, Y_test))
# Visualize predictions.
# Predict 5 images from validation set.
n_images = 5
test_images = X_test[:n_images]
predictions = multilayer_perceptron(test_images)
# Display image and model prediction.
for i in range(n_images):
plt.imshow(tf.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i" % tf.argmax(predictions.numpy()[i]))
| 37.736842
| 91
| 0.718968
|
import tensorflow as tf
import matplotlib.pyplot as plt
num_classes = 10
num_features = 784
learning_rate = 0.001
training_steps = 1000
batch_size = 256
display_step = 100
n_hidden_1 = 128
n_hidden_2 = 256
from tensorflow.keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = tf.Variable(X_train, dtype=tf.float32)
X_test = tf.Variable(X_test, dtype=tf.float32)
X_train = tf.reshape(X_train, [-1, num_features])
X_test = tf.reshape(X_test, [-1, num_features])
X_train = X_train / 255.
X_test = X_test / 255.
print(X_train.shape)
print(X_test.shape)
train_data = tf.data.Dataset.from_tensor_slices((X_train, Y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
num_hidden_units = [n_hidden_1, n_hidden_2, num_classes]
random_normal = tf.initializers.RandomNormal()
W1 = tf.Variable(random_normal([num_features, num_hidden_units[0]]), name="weight1")
W2 = tf.Variable(random_normal([num_hidden_units[0], num_hidden_units[1]]), name="weight2")
W3 = tf.Variable(random_normal([num_hidden_units[1], num_hidden_units[2]]), name="weight3")
b1 = tf.Variable(tf.zeros([num_hidden_units[0]]), name="bias1")
b2 = tf.Variable(tf.zeros([num_hidden_units[1]]), name="bias2")
b3 = tf.Variable(tf.zeros([num_hidden_units[2]]), name="bias3")
def multilayer_perceptron(x):
h1 = tf.nn.relu(tf.add(tf.matmul(x, W1), b1))
h2 = tf.nn.relu(tf.add(tf.matmul(h1, W2), b2))
h3 = tf.nn.relu(tf.add(tf.matmul(h2, W3), b3))
return tf.nn.softmax(h3)
def cross_entropy(y_pred, y_true):
y_true = tf.one_hot(y_true, depth=num_classes)
y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)
return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))
def accuracy(y_pred, y_true):
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)
optimizer = tf.optimizers.SGD(learning_rate)
def run_optimization(x, y):
with tf.GradientTape() as g:
pred = multilayer_perceptron(x)
loss = cross_entropy(pred, y)
gradients = g.gradient(loss, [W1, W2, W3, b1, b2, b3])
optimizer.apply_gradients(zip(gradients, [W1, W2, W3, b1, b2, b3]))
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = multilayer_perceptron(batch_x)
loss = cross_entropy(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
pred = multilayer_perceptron(X_test)
print("Test Accuracy: %f" % accuracy(pred, Y_test))
n_images = 5
test_images = X_test[:n_images]
predictions = multilayer_perceptron(test_images)
for i in range(n_images):
plt.imshow(tf.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i" % tf.argmax(predictions.numpy()[i]))
| true
| true
|
f712f7710849a450ed29af68d0147dcb2e216c9f
| 665
|
py
|
Python
|
tests/conftest.py
|
sebastien-prudhomme/docker-pytest
|
fc1b4ed3af860a40f95bff04b7f8e434fe16432e
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
sebastien-prudhomme/docker-pytest
|
fc1b4ed3af860a40f95bff04b7f8e434fe16432e
|
[
"MIT"
] | 85
|
2021-11-23T22:30:20.000Z
|
2022-02-21T03:14:01.000Z
|
tests/conftest.py
|
sebastien-prudhomme/docker-pytest
|
fc1b4ed3af860a40f95bff04b7f8e434fe16432e
|
[
"MIT"
] | 1
|
2021-12-06T17:33:06.000Z
|
2021-12-06T17:33:06.000Z
|
import subprocess
import pytest
import testinfra
def pytest_addoption(parser):
parser.addoption("--image")
@pytest.fixture(scope="session")
def image(request):
return request.config.getoption("--image")
@pytest.fixture(scope="session")
def host(image):
run_command = ["docker", "run", "-d", image, "sleep", "infinity"]
output = subprocess.check_output(run_command)
docker_id = output.decode().rstrip()
yield testinfra.get_host(f"docker://{docker_id}")
rm_command = ["docker", "rm", "-f", docker_id]
subprocess.check_call(rm_command)
@pytest.fixture(scope="session")
def packages(host):
return host.pip.get_packages()
| 20.78125
| 69
| 0.697744
|
import subprocess
import pytest
import testinfra
def pytest_addoption(parser):
parser.addoption("--image")
@pytest.fixture(scope="session")
def image(request):
return request.config.getoption("--image")
@pytest.fixture(scope="session")
def host(image):
run_command = ["docker", "run", "-d", image, "sleep", "infinity"]
output = subprocess.check_output(run_command)
docker_id = output.decode().rstrip()
yield testinfra.get_host(f"docker://{docker_id}")
rm_command = ["docker", "rm", "-f", docker_id]
subprocess.check_call(rm_command)
@pytest.fixture(scope="session")
def packages(host):
return host.pip.get_packages()
| true
| true
|
f712f7cf75786e46db7389f593830972a3844b31
| 2,501
|
py
|
Python
|
cirq-core/cirq/contrib/routing/device_test.py
|
stubbi/Cirq
|
6d2cd16991bd7fde352010d31010f85d7eafc0ba
|
[
"Apache-2.0"
] | 2
|
2019-01-12T07:10:32.000Z
|
2020-09-08T03:47:22.000Z
|
cirq-core/cirq/contrib/routing/device_test.py
|
resduo/Cirq
|
680f897345eb1c71c9242515edda8f04b8594319
|
[
"Apache-2.0"
] | 5
|
2020-08-20T01:03:57.000Z
|
2021-04-13T00:26:48.000Z
|
cirq-core/cirq/contrib/routing/device_test.py
|
resduo/Cirq
|
680f897345eb1c71c9242515edda8f04b8594319
|
[
"Apache-2.0"
] | 1
|
2019-06-17T11:21:53.000Z
|
2019-06-17T11:21:53.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import networkx as nx
import cirq
import cirq.contrib.routing as ccr
def test_xmon_device_to_graph():
with cirq.testing.assert_deprecated("gridqubits_to_graph_device", deadline="v0.12"):
class TestDevice:
qubits = cirq.GridQubit.rect(2, 11)
foxtail_graph = ccr.xmon_device_to_graph(TestDevice())
two_by_eleven_grid_graph = ccr.get_grid_device_graph(2, 11)
assert foxtail_graph.nodes == two_by_eleven_grid_graph.nodes
assert foxtail_graph.edges() == two_by_eleven_grid_graph.edges()
@pytest.mark.parametrize('n_qubits', (2, 5, 11))
def test_get_linear_device_graph(n_qubits):
graph = ccr.get_linear_device_graph(n_qubits)
assert sorted(graph) == cirq.LineQubit.range(n_qubits)
assert len(graph.edges()) == n_qubits - 1
assert all(abs(a.x - b.x) == 1 for a, b in graph.edges())
def test_nx_qubit_layout():
grid_qubit_graph = ccr.gridqubits_to_graph_device(cirq.GridQubit.rect(5, 5))
pos = ccr.nx_qubit_layout(grid_qubit_graph)
assert len(pos) == len(grid_qubit_graph)
for k, (x, y) in pos.items():
assert x == k.col
assert y == -k.row
def test_nx_qubit_layout_2():
g = nx.from_edgelist(
[
(cirq.LineQubit(0), cirq.LineQubit(1)),
(cirq.LineQubit(1), cirq.LineQubit(2)),
]
)
pos = ccr.nx_qubit_layout(g)
for k, (x, y) in pos.items():
assert x == k.x
assert y == 0.5
def test_nx_qubit_layout_3():
g = nx.from_edgelist(
[
(cirq.NamedQubit('a'), cirq.NamedQubit('b')),
(cirq.NamedQubit('b'), cirq.NamedQubit('c')),
]
)
node_to_i = {
cirq.NamedQubit('a'): 0,
cirq.NamedQubit('b'): 1,
cirq.NamedQubit('c'): 2,
}
pos = ccr.nx_qubit_layout(g)
for k, (x, y) in pos.items():
assert x == 0.5
assert y == node_to_i[k] + 1
| 30.5
| 88
| 0.656937
|
import pytest
import networkx as nx
import cirq
import cirq.contrib.routing as ccr
def test_xmon_device_to_graph():
with cirq.testing.assert_deprecated("gridqubits_to_graph_device", deadline="v0.12"):
class TestDevice:
qubits = cirq.GridQubit.rect(2, 11)
foxtail_graph = ccr.xmon_device_to_graph(TestDevice())
two_by_eleven_grid_graph = ccr.get_grid_device_graph(2, 11)
assert foxtail_graph.nodes == two_by_eleven_grid_graph.nodes
assert foxtail_graph.edges() == two_by_eleven_grid_graph.edges()
@pytest.mark.parametrize('n_qubits', (2, 5, 11))
def test_get_linear_device_graph(n_qubits):
graph = ccr.get_linear_device_graph(n_qubits)
assert sorted(graph) == cirq.LineQubit.range(n_qubits)
assert len(graph.edges()) == n_qubits - 1
assert all(abs(a.x - b.x) == 1 for a, b in graph.edges())
def test_nx_qubit_layout():
grid_qubit_graph = ccr.gridqubits_to_graph_device(cirq.GridQubit.rect(5, 5))
pos = ccr.nx_qubit_layout(grid_qubit_graph)
assert len(pos) == len(grid_qubit_graph)
for k, (x, y) in pos.items():
assert x == k.col
assert y == -k.row
def test_nx_qubit_layout_2():
g = nx.from_edgelist(
[
(cirq.LineQubit(0), cirq.LineQubit(1)),
(cirq.LineQubit(1), cirq.LineQubit(2)),
]
)
pos = ccr.nx_qubit_layout(g)
for k, (x, y) in pos.items():
assert x == k.x
assert y == 0.5
def test_nx_qubit_layout_3():
g = nx.from_edgelist(
[
(cirq.NamedQubit('a'), cirq.NamedQubit('b')),
(cirq.NamedQubit('b'), cirq.NamedQubit('c')),
]
)
node_to_i = {
cirq.NamedQubit('a'): 0,
cirq.NamedQubit('b'): 1,
cirq.NamedQubit('c'): 2,
}
pos = ccr.nx_qubit_layout(g)
for k, (x, y) in pos.items():
assert x == 0.5
assert y == node_to_i[k] + 1
| true
| true
|
f712f7f0a4f7bb5575b634061313c5538d1a265f
| 10,549
|
py
|
Python
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/auto_rest_bool_test_service/operations/bool_model_operations.py
|
ljhljh235/AutoRest
|
b9ab4000e9b93d16925db84d08bafc225b098f8e
|
[
"MIT"
] | 3
|
2018-03-20T22:36:32.000Z
|
2021-07-15T02:36:51.000Z
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/auto_rest_bool_test_service/operations/bool_model_operations.py
|
ljhljh235/AutoRest
|
b9ab4000e9b93d16925db84d08bafc225b098f8e
|
[
"MIT"
] | null | null | null |
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/auto_rest_bool_test_service/operations/bool_model_operations.py
|
ljhljh235/AutoRest
|
b9ab4000e9b93d16925db84d08bafc225b098f8e
|
[
"MIT"
] | 1
|
2019-07-20T12:20:03.000Z
|
2019-07-20T12:20:03.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class BoolModelOperations(object):
"""BoolModelOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_true(
self, custom_headers=None, raw=False, **operation_config):
"""Get true Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyboolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/true'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_true(
self, bool_body, custom_headers=None, raw=False, **operation_config):
"""Set Boolean value true.
:param bool_body:
:type bool_body: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyboolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/true'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(bool_body, 'bool')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_false(
self, custom_headers=None, raw=False, **operation_config):
"""Get false Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyboolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_false(
self, bool_body, custom_headers=None, raw=False, **operation_config):
"""Set Boolean value false.
:param bool_body:
:type bool_body: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyboolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(bool_body, 'bool')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get null Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyboolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get invalid Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyboolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 35.046512
| 91
| 0.645274
|
from msrest.pipeline import ClientRawResponse
from .. import models
class BoolModelOperations(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_true(
self, custom_headers=None, raw=False, **operation_config):
url = '/bool/true'
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_true(
self, bool_body, custom_headers=None, raw=False, **operation_config):
url = '/bool/true'
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
body_content = self._serialize.body(bool_body, 'bool')
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_false(
self, custom_headers=None, raw=False, **operation_config):
url = '/bool/false'
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_false(
self, bool_body, custom_headers=None, raw=False, **operation_config):
url = '/bool/false'
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
body_content = self._serialize.body(bool_body, 'bool')
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_null(
self, custom_headers=None, raw=False, **operation_config):
url = '/bool/null'
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
url = '/bool/invalid'
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| true
| true
|
f712f93fa50415847d9a2a993a502e1fbea5717c
| 761
|
py
|
Python
|
ConfirmVersionAndTag.py
|
matthewfcarlson/edk2-pytool-extensions
|
60a6b32a8a9e8deaed7fd197779a27a8bbf44c84
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
ConfirmVersionAndTag.py
|
matthewfcarlson/edk2-pytool-extensions
|
60a6b32a8a9e8deaed7fd197779a27a8bbf44c84
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
ConfirmVersionAndTag.py
|
matthewfcarlson/edk2-pytool-extensions
|
60a6b32a8a9e8deaed7fd197779a27a8bbf44c84
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
## @file
# Quick script to check that the wheel/package created is aligned on a git tag.
# Official releases should not be made from non-tagged code.
#
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import glob
import os
import sys
p = os.path.join(os.getcwd(), "dist")
whlfile = glob.glob(os.path.join(p, "*.whl"))
if(len(whlfile) != 1):
for filename in whlfile:
print(filename)
raise Exception("Too many wheel files")
rfn = os.path.relpath(whlfile[0], os.getcwd())
v = rfn.split("-")[1]
if v.count(".") != 2:
raise Exception("Version %s not in format major.minor.patch" % v)
if "dev" in v:
raise Exception("No Dev versions allowed to be published.")
print("version: " + str(v))
sys.exit(0)
| 27.178571
| 79
| 0.678055
|
import glob
import os
import sys
p = os.path.join(os.getcwd(), "dist")
whlfile = glob.glob(os.path.join(p, "*.whl"))
if(len(whlfile) != 1):
for filename in whlfile:
print(filename)
raise Exception("Too many wheel files")
rfn = os.path.relpath(whlfile[0], os.getcwd())
v = rfn.split("-")[1]
if v.count(".") != 2:
raise Exception("Version %s not in format major.minor.patch" % v)
if "dev" in v:
raise Exception("No Dev versions allowed to be published.")
print("version: " + str(v))
sys.exit(0)
| true
| true
|
f712f954c7bbf6e7b96547d67794e6b38cba128d
| 278
|
py
|
Python
|
ex096.py
|
LeoWshington/Exercicios_CursoEmVideo_Python
|
294d14d9aaab5e32aaf39d70b0cd1266f0b55a02
|
[
"MIT"
] | null | null | null |
ex096.py
|
LeoWshington/Exercicios_CursoEmVideo_Python
|
294d14d9aaab5e32aaf39d70b0cd1266f0b55a02
|
[
"MIT"
] | null | null | null |
ex096.py
|
LeoWshington/Exercicios_CursoEmVideo_Python
|
294d14d9aaab5e32aaf39d70b0cd1266f0b55a02
|
[
"MIT"
] | null | null | null |
def area(c, la):
print(f'A area de um terreno {c :.2f}m x {la :.2f}m é de {c * la :.2f}m².')
# Programa principal
print(f'{"Controle de Terrenos" :^30}\n'
f'{"-" * 30}')
comp = float(input('Comprimento (m)): '))
larg = float(input('Largura (m): '))
area(comp, larg)
| 25.272727
| 79
| 0.571942
|
def area(c, la):
print(f'A area de um terreno {c :.2f}m x {la :.2f}m é de {c * la :.2f}m².')
print(f'{"Controle de Terrenos" :^30}\n'
f'{"-" * 30}')
comp = float(input('Comprimento (m)): '))
larg = float(input('Largura (m): '))
area(comp, larg)
| true
| true
|
f712f97584455965df05168aab1f03822ef439d1
| 977
|
py
|
Python
|
igibson/utils/data_utils/ext_object/scripts/step_2_merge.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | 360
|
2020-04-02T11:12:09.000Z
|
2022-03-24T21:46:58.000Z
|
igibson/utils/data_utils/ext_object/scripts/step_2_merge.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | 169
|
2020-04-07T21:01:05.000Z
|
2022-03-31T10:07:39.000Z
|
igibson/utils/data_utils/ext_object/scripts/step_2_merge.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | 94
|
2020-04-09T23:22:17.000Z
|
2022-03-17T21:49:03.000Z
|
import os
import sys
import bpy
script_dir = os.path.dirname(os.path.abspath(__file__))
utils_dir = os.path.join(script_dir, "../../blender_utils")
sys.path.append(utils_dir)
from utils import clean_unused, import_obj_folder
model_id = sys.argv[-3]
obj_dir = sys.argv[-2]
save_dir = sys.argv[-1]
os.makedirs(save_dir, exist_ok=True)
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
bpy.data.objects.remove(obj)
clean_unused()
import_obj_folder("object", obj_dir, use_split_objects=True)
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.select_all(action="SELECT")
save_path = os.path.join(save_dir, "{}_cm.obj".format(model_id))
bpy.ops.export_scene.obj(
filepath=save_path,
use_selection=True,
axis_up="Z",
axis_forward="X",
use_materials=False,
use_normals=False,
use_uvs=False,
use_triangles=True,
path_mode="COPY",
)
| 26.405405
| 64
| 0.733879
|
import os
import sys
import bpy
script_dir = os.path.dirname(os.path.abspath(__file__))
utils_dir = os.path.join(script_dir, "../../blender_utils")
sys.path.append(utils_dir)
from utils import clean_unused, import_obj_folder
model_id = sys.argv[-3]
obj_dir = sys.argv[-2]
save_dir = sys.argv[-1]
os.makedirs(save_dir, exist_ok=True)
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
bpy.data.objects.remove(obj)
clean_unused()
import_obj_folder("object", obj_dir, use_split_objects=True)
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.select_all(action="SELECT")
save_path = os.path.join(save_dir, "{}_cm.obj".format(model_id))
bpy.ops.export_scene.obj(
filepath=save_path,
use_selection=True,
axis_up="Z",
axis_forward="X",
use_materials=False,
use_normals=False,
use_uvs=False,
use_triangles=True,
path_mode="COPY",
)
| true
| true
|
f712f9a4f811eb447ce4ca49292f63080e7cea3d
| 9,291
|
py
|
Python
|
app/fsr_cfg.py
|
tappi287/openvr_fsr_app
|
a41feca64b53a42dc4808ea5f16ad1d3a851eaf9
|
[
"MIT"
] | 146
|
2021-07-21T23:03:53.000Z
|
2022-03-30T15:03:01.000Z
|
app/fsr_cfg.py
|
tappi287/openvr_fsr_app
|
a41feca64b53a42dc4808ea5f16ad1d3a851eaf9
|
[
"MIT"
] | 45
|
2021-08-09T12:19:44.000Z
|
2022-03-31T14:22:08.000Z
|
app/fsr_cfg.py
|
tappi287/openvr_fsr_app
|
a41feca64b53a42dc4808ea5f16ad1d3a851eaf9
|
[
"MIT"
] | 5
|
2021-09-03T16:12:15.000Z
|
2022-01-22T07:45:35.000Z
|
from .openvr_mod_cfg import OpenVRModCfgSetting, OpenVRModSettings
class FsrSettings(OpenVRModSettings):
cfg_key = 'fsr'
format = 'cfg'
def __init__(self):
self.enabled = OpenVRModCfgSetting(
key='enabled',
name='Enabled',
category='FSR Settings',
desc="enable image upscaling through AMD's FSR or NVIDIA's NIS",
value=True,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.useNIS = OpenVRModCfgSetting(
key='useNIS',
name="Use NVIDIA's Image Scaling",
category='FSR Settings',
desc="if enabled, uses NVIDIA's Image Scaling instead of the default "
"AMD FidelityFX SuperResolution. Both algorithms work similarly, but produce"
"somewhat different results. You may want to experiment switching between the"
"two to determine which one you like better for a particular game.",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.renderScale = OpenVRModCfgSetting(
key='renderScale',
name='Render Scale',
category='FSR Settings',
desc="Per-dimension render scale. If <1, will lower the game's render resolution"
"accordingly and afterwards upscale to the native resolution set in SteamVR. "
"If >1, the game will render at its native resolution, and afterwards the "
"image is upscaled to a higher resolution as per the given value. "
"If =1, effectively disables upsampling, but you'll still get the sharpening stage. "
"AMD presets: Ultra Quality => 0.77 Quality => 0.67 Balanced => 0.59 "
"Performance => 0.50",
value=0.77,
settings=[{'settingType': 'range', 'min': 0.10, 'max': 3.0, 'step': 0.01, 'display': 'floatpercent'}]
)
self.sharpness = OpenVRModCfgSetting(
key='sharpness',
name='Sharpness',
category='FSR Settings',
desc="tune sharpness, values range from 0 to 1",
value=0.9,
settings=[{'settingType': 'range', 'min': 0.10, 'max': 3.0, 'step': 0.01, 'display': 'floatpercent'}]
)
self.radius = OpenVRModCfgSetting(
key='radius',
name='Radius',
category='FSR Settings',
desc="Only apply FSR/NIS to the given radius around the center of the image. "
"Anything outside this radius is upscaled by simple bilinear filtering,"
" which is cheaper and thus saves a bit of performance. Due to the design"
" of current HMD lenses, you can experiment with fairly small radii and may"
" still not see a noticeable difference."
" Sensible values probably lie somewhere between [0.2, 1.0]. However, note"
" that, since the image is not spheric, even a value of 1.0 technically still"
" skips some pixels in the corner of the image, so if you want to completely"
" disable this optimization, you can choose a value of 2."
" IMPORTANT: if you face issues like the view appearing offset or mismatched"
" between the eyes, turn this optimization off by setting the value to 2.0",
value=0.50,
settings=[{'settingType': 'range', 'min': 0.20, 'max': 2.00, 'step': 0.01}]
)
self.applyMIPBias = OpenVRModCfgSetting(
key='applyMIPBias',
name='Apply MIP Bias',
category='FSR Settings',
desc="if enabled, applies a negative LOD bias to texture MIP levels"
" should theoretically improve texture detail in the upscaled image"
" IMPORTANT: if you experience issues with rendering like disappearing"
" textures or strange patterns in the rendering, try turning this off"
" by setting the value to false.",
value=True,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.debugMode = OpenVRModCfgSetting(
key='debugMode',
name='Debug Mode',
category='FSR Settings',
desc="If enabled, will visualize the radius to which FSR/NIS is applied."
" Will also periodically log the GPU cost for applying FSR/NIS in the"
" current configuration.",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
# ---
# Hotkey Settings
# ---
self.hotkeys = OpenVRModCfgSetting(
key='hotkeys',
name='Hotkeys',
category='Hotkey Settings',
hidden=True,
value=dict(),
settings=list(),
)
self.hotkeysEnabled = OpenVRModCfgSetting(
key='enabled',
parent=self.hotkeys.key,
name='Hotkeys Enabled',
category='Hotkey Settings',
desc="If enabled, you can change certain settings of the mod on the fly by"
" pressing certain hotkeys. Good to see the visual difference. But you"
" may want to turn off hotkeys during regular play to prevent them from"
" interfering with game hotkeys.",
value=True,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysRequireCtrl = OpenVRModCfgSetting(
key='requireCtrl',
parent=self.hotkeys.key,
name='Require Ctrl',
category='Hotkey Settings',
desc="if enabled, must also be holding CTRL key to use hotkeys",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysRequireAlt = OpenVRModCfgSetting(
key='requireAlt',
parent=self.hotkeys.key,
name='Require Alt',
category='Hotkey Settings',
desc="if enabled, must also be holding ALT key to use hotkeys",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysRequireShift = OpenVRModCfgSetting(
key='requireShift',
parent=self.hotkeys.key,
name='Require Shift',
category='Hotkey Settings',
desc="if enabled, must also be holding SHIFT key to use hotkeys",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysToggleUseNIS = OpenVRModCfgSetting(
key='toggleUseNIS',
parent=self.hotkeys.key,
name='Toggle NIS',
category='Hotkeys',
desc='switch between FSR and NIS',
value=112, # F1
settings=[{'settingType': 'key'}]
)
self.hotkeysToggleDebugMode = OpenVRModCfgSetting(
key='toggleDebugMode',
parent=self.hotkeys.key,
name='Toggle Debug Mode',
category='Hotkeys',
desc='toggle debug mode on or off',
value=113, # F2
settings=[{'settingType': 'key'}]
)
self.hotkeysDecreaseSharpness = OpenVRModCfgSetting(
key='decreaseSharpness',
parent=self.hotkeys.key,
name='Decrease Sharpness',
category='Hotkeys',
desc='decrease sharpness by 0.05',
value=114, # F3
settings=[{'settingType': 'key'}]
)
self.hotkeysIncreaseSharpness = OpenVRModCfgSetting(
key='increaseSharpness',
parent=self.hotkeys.key,
name='Increase Sharpness',
category='Hotkeys',
desc='increase sharpness by 0.05',
value=115, # F4
settings=[{'settingType': 'key'}]
)
self.hotkeysDecreaseRadius = OpenVRModCfgSetting(
key='decreaseRadius',
parent=self.hotkeys.key,
name='Decrease Radius',
category='Hotkeys',
desc='decrease sharpening radius by 0.05',
value=116, # F5
settings=[{'settingType': 'key'}]
)
self.hotkeysIncreaseRadius = OpenVRModCfgSetting(
key='increaseRadius',
parent=self.hotkeys.key,
name='Increase Radius',
category='Hotkeys',
desc='increase sharpening radius by 0.05',
value=117, # F6
settings=[{'settingType': 'key'}]
)
self.hotkeysCaptureOutput = OpenVRModCfgSetting(
key='captureOutput',
parent=self.hotkeys.key,
name='Capture Output',
category='Hotkeys',
desc='take a screenshot of the final output sent to the HMD',
value=118, # F7
settings=[{'settingType': 'key'}]
)
self.option_field_names = self.get_setting_fields()
| 45.321951
| 113
| 0.551394
|
from .openvr_mod_cfg import OpenVRModCfgSetting, OpenVRModSettings
class FsrSettings(OpenVRModSettings):
cfg_key = 'fsr'
format = 'cfg'
def __init__(self):
self.enabled = OpenVRModCfgSetting(
key='enabled',
name='Enabled',
category='FSR Settings',
desc="enable image upscaling through AMD's FSR or NVIDIA's NIS",
value=True,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.useNIS = OpenVRModCfgSetting(
key='useNIS',
name="Use NVIDIA's Image Scaling",
category='FSR Settings',
desc="if enabled, uses NVIDIA's Image Scaling instead of the default "
"AMD FidelityFX SuperResolution. Both algorithms work similarly, but produce"
"somewhat different results. You may want to experiment switching between the"
"two to determine which one you like better for a particular game.",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.renderScale = OpenVRModCfgSetting(
key='renderScale',
name='Render Scale',
category='FSR Settings',
desc="Per-dimension render scale. If <1, will lower the game's render resolution"
"accordingly and afterwards upscale to the native resolution set in SteamVR. "
"If >1, the game will render at its native resolution, and afterwards the "
"image is upscaled to a higher resolution as per the given value. "
"If =1, effectively disables upsampling, but you'll still get the sharpening stage. "
"AMD presets: Ultra Quality => 0.77 Quality => 0.67 Balanced => 0.59 "
"Performance => 0.50",
value=0.77,
settings=[{'settingType': 'range', 'min': 0.10, 'max': 3.0, 'step': 0.01, 'display': 'floatpercent'}]
)
self.sharpness = OpenVRModCfgSetting(
key='sharpness',
name='Sharpness',
category='FSR Settings',
desc="tune sharpness, values range from 0 to 1",
value=0.9,
settings=[{'settingType': 'range', 'min': 0.10, 'max': 3.0, 'step': 0.01, 'display': 'floatpercent'}]
)
self.radius = OpenVRModCfgSetting(
key='radius',
name='Radius',
category='FSR Settings',
desc="Only apply FSR/NIS to the given radius around the center of the image. "
"Anything outside this radius is upscaled by simple bilinear filtering,"
" which is cheaper and thus saves a bit of performance. Due to the design"
" of current HMD lenses, you can experiment with fairly small radii and may"
" still not see a noticeable difference."
" Sensible values probably lie somewhere between [0.2, 1.0]. However, note"
" that, since the image is not spheric, even a value of 1.0 technically still"
" skips some pixels in the corner of the image, so if you want to completely"
" disable this optimization, you can choose a value of 2."
" IMPORTANT: if you face issues like the view appearing offset or mismatched"
" between the eyes, turn this optimization off by setting the value to 2.0",
value=0.50,
settings=[{'settingType': 'range', 'min': 0.20, 'max': 2.00, 'step': 0.01}]
)
self.applyMIPBias = OpenVRModCfgSetting(
key='applyMIPBias',
name='Apply MIP Bias',
category='FSR Settings',
desc="if enabled, applies a negative LOD bias to texture MIP levels"
" should theoretically improve texture detail in the upscaled image"
" IMPORTANT: if you experience issues with rendering like disappearing"
" textures or strange patterns in the rendering, try turning this off"
" by setting the value to false.",
value=True,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.debugMode = OpenVRModCfgSetting(
key='debugMode',
name='Debug Mode',
category='FSR Settings',
desc="If enabled, will visualize the radius to which FSR/NIS is applied."
" Will also periodically log the GPU cost for applying FSR/NIS in the"
" current configuration.",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeys = OpenVRModCfgSetting(
key='hotkeys',
name='Hotkeys',
category='Hotkey Settings',
hidden=True,
value=dict(),
settings=list(),
)
self.hotkeysEnabled = OpenVRModCfgSetting(
key='enabled',
parent=self.hotkeys.key,
name='Hotkeys Enabled',
category='Hotkey Settings',
desc="If enabled, you can change certain settings of the mod on the fly by"
" pressing certain hotkeys. Good to see the visual difference. But you"
" may want to turn off hotkeys during regular play to prevent them from"
" interfering with game hotkeys.",
value=True,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysRequireCtrl = OpenVRModCfgSetting(
key='requireCtrl',
parent=self.hotkeys.key,
name='Require Ctrl',
category='Hotkey Settings',
desc="if enabled, must also be holding CTRL key to use hotkeys",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysRequireAlt = OpenVRModCfgSetting(
key='requireAlt',
parent=self.hotkeys.key,
name='Require Alt',
category='Hotkey Settings',
desc="if enabled, must also be holding ALT key to use hotkeys",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysRequireShift = OpenVRModCfgSetting(
key='requireShift',
parent=self.hotkeys.key,
name='Require Shift',
category='Hotkey Settings',
desc="if enabled, must also be holding SHIFT key to use hotkeys",
value=False,
settings=[{'value': True, 'name': 'On'}, {'value': False, 'name': 'Off'}]
)
self.hotkeysToggleUseNIS = OpenVRModCfgSetting(
key='toggleUseNIS',
parent=self.hotkeys.key,
name='Toggle NIS',
category='Hotkeys',
desc='switch between FSR and NIS',
value=112,
settings=[{'settingType': 'key'}]
)
self.hotkeysToggleDebugMode = OpenVRModCfgSetting(
key='toggleDebugMode',
parent=self.hotkeys.key,
name='Toggle Debug Mode',
category='Hotkeys',
desc='toggle debug mode on or off',
value=113,
settings=[{'settingType': 'key'}]
)
self.hotkeysDecreaseSharpness = OpenVRModCfgSetting(
key='decreaseSharpness',
parent=self.hotkeys.key,
name='Decrease Sharpness',
category='Hotkeys',
desc='decrease sharpness by 0.05',
value=114,
settings=[{'settingType': 'key'}]
)
self.hotkeysIncreaseSharpness = OpenVRModCfgSetting(
key='increaseSharpness',
parent=self.hotkeys.key,
name='Increase Sharpness',
category='Hotkeys',
desc='increase sharpness by 0.05',
value=115,
settings=[{'settingType': 'key'}]
)
self.hotkeysDecreaseRadius = OpenVRModCfgSetting(
key='decreaseRadius',
parent=self.hotkeys.key,
name='Decrease Radius',
category='Hotkeys',
desc='decrease sharpening radius by 0.05',
value=116,
settings=[{'settingType': 'key'}]
)
self.hotkeysIncreaseRadius = OpenVRModCfgSetting(
key='increaseRadius',
parent=self.hotkeys.key,
name='Increase Radius',
category='Hotkeys',
desc='increase sharpening radius by 0.05',
value=117,
settings=[{'settingType': 'key'}]
)
self.hotkeysCaptureOutput = OpenVRModCfgSetting(
key='captureOutput',
parent=self.hotkeys.key,
name='Capture Output',
category='Hotkeys',
desc='take a screenshot of the final output sent to the HMD',
value=118,
settings=[{'settingType': 'key'}]
)
self.option_field_names = self.get_setting_fields()
| true
| true
|
f712f9debe1711e0dceba667d636b17c24e7f00f
| 19,164
|
py
|
Python
|
projectRoot/env/Lib/site-packages/django/contrib/staticfiles/storage.py
|
russellgoldman/Beer-Finder
|
89b8493f72cb77cc79896b4221d5d8b7953bb5f6
|
[
"MIT"
] | 3
|
2018-05-12T18:13:19.000Z
|
2018-05-27T00:06:31.000Z
|
projectRoot/env/Lib/site-packages/django/contrib/staticfiles/storage.py
|
russellgoldman/Beer-Finder
|
89b8493f72cb77cc79896b4221d5d8b7953bb5f6
|
[
"MIT"
] | 1
|
2020-06-05T18:32:05.000Z
|
2020-06-05T18:32:05.000Z
|
projectRoot/env/Lib/site-packages/django/contrib/staticfiles/storage.py
|
russellgoldman/Beer-Finder
|
89b8493f72cb77cc79896b4221d5d8b7953bb5f6
|
[
"MIT"
] | 5
|
2018-05-17T18:08:26.000Z
|
2018-12-10T17:37:11.000Z
|
import hashlib
import json
import os
import posixpath
import re
from collections import OrderedDict
from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, caches,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes
from django.utils.functional import LazyObject
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super().__init__(location, base_url, *args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super().path(name)
class HashedFilesMixin:
default_template = """url("%s")"""
max_post_process_passes = 5
patterns = (
("*.styling", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
# `name` is the base name to construct the new hashed filename from.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if filename:
filename = urlsplit(unquote(filename)).path.strip()
filename = filename or clean_name
opened = False
if content is None:
if not self.exists(filename):
raise ValueError("The file '%s' could not be found with %r." % (filename, self))
try:
content = self.open(filename)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
Return the non-hashed URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super().url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
"""
Return the non-hashed URL in DEBUG mode.
"""
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
return template % unquote(transformed_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns)
]
# Do a single pass first. Post-process all files once, then repeat for
# adjustable files.
for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
paths = {path: paths[path] for path in adjustable_paths}
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield 'All', None, RuntimeError('Max post-process passes exceeded.')
# Store the processed paths
self.hashed_files.update(hashed_files)
def _post_process(self, paths, adjustable_paths, hashed_files):
# Sort the files by directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
substitutions = True
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
# generate the hash with the original content, even for
# adjustable files.
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode(settings.FILE_CHARSET)
for extension, patterns in self._patterns.items():
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, hashed_files, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
# Save intermediate file for reference
saved_name = self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = self.clean_name(saved_name)
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = self.clean_name(saved_name)
# and then set the cache accordingly
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
# Normalize the path to avoid multiple names for the same file like
# ../foo/bar.styling and ../foo/../foo/bar.styling which normalize to the same
# path.
name = posixpath.normpath(name)
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
return cache_name
def stored_name(self, name):
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name:
return cache_name
# No cached name found, recalculate it from the files.
intermediate_name = name
for i in range(self.max_post_process_passes + 1):
cache_name = self.clean_name(
self.hashed_name(name, content=None, filename=intermediate_name)
)
if intermediate_name == cache_name:
# Store the hashed name if there was a miss.
self.hashed_files[hash_key] = cache_name
return cache_name
else:
# Move on to the next intermediate file.
intermediate_name = cache_name
# If the cache name can't be determined after the max number of passes,
# the intermediate files on disk may be corrupt; avoid an infinite loop.
raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
manifest_strict = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode()
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
yield from super().post_process(*args, **kwargs)
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode()
self._save(self.manifest_name, ContentFile(contents))
def stored_name(self, name):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
hash_key = self.hash_key(clean_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
if self.manifest_strict:
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
cache_name = self.clean_name(self.hashed_name(name))
unparsed_name = list(parsed_name)
unparsed_name[2] = cache_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
class _MappingCache:
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| 38.481928
| 113
| 0.593143
|
import hashlib
import json
import os
import posixpath
import re
from collections import OrderedDict
from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, caches,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes
from django.utils.functional import LazyObject
class StaticFilesStorage(FileSystemStorage):
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super().__init__(location, base_url, *args, **kwargs)
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super().path(name)
class HashedFilesMixin:
default_template = """url("%s")"""
max_post_process_passes = 5
patterns = (
("*.styling", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if filename:
filename = urlsplit(unquote(filename)).path.strip()
filename = filename or clean_name
opened = False
if content is None:
if not self.exists(filename):
raise ValueError("The file '%s' could not be found with %r." % (filename, self))
try:
content = self.open(filename)
except IOError:
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super().url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
if template is None:
template = self.default_template
def converter(matchobj):
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
return template % unquote(transformed_url)
return converter
def post_process(self, paths, dry_run=False, **options):
if dry_run:
return
hashed_files = OrderedDict()
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns)
]
for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
paths = {path: paths[path] for path in adjustable_paths}
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield 'All', None, RuntimeError('Max post-process passes exceeded.')
self.hashed_files.update(hashed_files)
def _post_process(self, paths, adjustable_paths, hashed_files):
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
substitutions = True
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode(settings.FILE_CHARSET)
for extension, patterns in self._patterns.items():
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, hashed_files, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
# Save intermediate file for reference
saved_name = self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = self.clean_name(saved_name)
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = self.clean_name(saved_name)
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
name = posixpath.normpath(name)
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
return cache_name
def stored_name(self, name):
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name:
return cache_name
intermediate_name = name
for i in range(self.max_post_process_passes + 1):
cache_name = self.clean_name(
self.hashed_name(name, content=None, filename=intermediate_name)
)
if intermediate_name == cache_name:
self.hashed_files[hash_key] = cache_name
return cache_name
else:
intermediate_name = cache_name
# the intermediate files on disk may be corrupt; avoid an infinite loop.
raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
manifest_strict = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode()
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
yield from super().post_process(*args, **kwargs)
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode()
self._save(self.manifest_name, ContentFile(contents))
def stored_name(self, name):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
hash_key = self.hash_key(clean_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
if self.manifest_strict:
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
cache_name = self.clean_name(self.hashed_name(name))
unparsed_name = list(parsed_name)
unparsed_name[2] = cache_name
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
class _MappingCache:
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| true
| true
|
f712fa692140b87d42856b40c9e6ecd289c79a5a
| 408
|
py
|
Python
|
frontends/pytorch/test/extension_coexistence.py
|
cathyzhyi/mlir-npcomp
|
79a3f639bfb449ba1409ff6dff161badf5a11b44
|
[
"Apache-2.0"
] | null | null | null |
frontends/pytorch/test/extension_coexistence.py
|
cathyzhyi/mlir-npcomp
|
79a3f639bfb449ba1409ff6dff161badf5a11b44
|
[
"Apache-2.0"
] | 6
|
2020-10-21T18:18:10.000Z
|
2021-04-02T20:38:28.000Z
|
frontends/pytorch/test/extension_coexistence.py
|
cathyzhyi/mlir-npcomp
|
79a3f639bfb449ba1409ff6dff161badf5a11b44
|
[
"Apache-2.0"
] | null | null | null |
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.
# Some checks that we can import the various extensions and libraries and
# not have symbol collisions or other goings on.
# RUN: %PYTHON %s
import sys
print(f"PYTHONPATH={sys.path}")
import mlir
import npcomp
import _npcomp
import _torch_mlir
print("Extensions all loaded")
| 21.473684
| 73
| 0.764706
|
import sys
print(f"PYTHONPATH={sys.path}")
import mlir
import npcomp
import _npcomp
import _torch_mlir
print("Extensions all loaded")
| true
| true
|
f712fa9c033a17ba9d2b04ae09b2bba1b4db81c7
| 10,862
|
py
|
Python
|
libcloud/loadbalancer/drivers/alb.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | null | null | null |
libcloud/loadbalancer/drivers/alb.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | null | null | null |
libcloud/loadbalancer/drivers/alb.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ApplicationLBDriver'
]
from libcloud.utils.xml import findtext, findall
from libcloud.loadbalancer.types import State
from libcloud.loadbalancer.base import Driver, LoadBalancer, Member
from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection
VERSION = '2015-12-01'
HOST = 'elasticloadbalancing.%s.amazonaws.com'
ROOT = '/%s/' % (VERSION)
NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, )
class ALBResponse(AWSGenericResponse):
"""
Amazon ALB response class.
"""
namespace = NS
exceptions = {}
xpath = 'Error'
class ALBConnection(SignedAWSConnection):
version = VERSION
host = HOST
responseCls = ALBResponse
service_name = 'elasticloadbalancing'
class ApplicationLBDriver(Driver):
name = 'Amazon Application Load Balancing'
website = 'http://aws.amazon.com/elasticloadbalancing/'
connectionCls = ALBConnection
signature_version = '4'
def __init__(self, access_id, secret, region, token=None):
self.token = token
super(ApplicationLBDriver, self).__init__(
access_id, secret, token=token
)
self.region = region
self.region_name = region
self.connection.host = HOST % (region)
def list_protocols(self):
return ['http', 'https']
def list_balancers(self):
params = {'Action': 'DescribeLoadBalancers'}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)
def balancer_list_members(self, balancer):
return balancer._members
def get_balancer(self, balancer_id):
params = {
'Action': 'DescribeLoadBalancers',
'LoadBalancerNames.member.1': balancer_id
}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)[0]
def ex_balancer_list_listeners(self, balancer):
return balancer.extra.get('listeners', [])
def _to_listeners(self, data):
xpath = 'DescribeListenersResult/Listeners/member'
return [self._to_listener(el) for el in findall(
element=data, xpath=xpath, namespace=NS
)]
def _to_listener(self, el):
listener_arn = findtext(element=el, xpath='ListenerArn', namespace=NS)
listener = {
'id': listener_arn,
'protocol': findtext(element=el, xpath='Protocol', namespace=NS),
'port': findtext(element=el, xpath='Port', namespace=NS),
'rules': self._ex_get_rules_for_listener(listener_arn)
}
return listener
def _to_targets(self, data):
xpath = 'DefaultActions/member'
return [self._to_target(el) for el in findall(
element=data, xpath=xpath, namespace=NS
)]
def _to_target(self, el):
return findtext(
element=el,
xpath='DefaultActions/member/TargetGroupArn',
namespace=NS
)
def _to_balancer(self, el):
name = findtext(element=el, xpath='LoadBalancerName', namespace=NS)
id = findtext(element=el, xpath='LoadBalancerArn', namespace=NS)
dns_name = findtext(el, xpath='DNSName', namespace=NS)
balancer = LoadBalancer(
id=id,
name=name,
state=State.UNKNOWN,
ip=dns_name,
port=None,
driver=self.connection.driver
)
extra = {
'listeners': self._ex_get_balancer_listeners(balancer),
'target_groups': self._ex_get_balancer_target_groups(balancer),
'tags': self._ex_get_balancer_tags(balancer)
}
balancer.extra = extra
if len(extra['listeners']) > 0:
balancer.port = extra['listeners'][0]['port']
else:
balancer.port = None
balancer._members = self._ex_get_balancer_memebers(balancer)
return balancer
def _to_balancers(self, data):
xpath = 'DescribeLoadBalancersResult/LoadBalancers/member'
return [self._to_balancer(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_tags(self, data):
"""
return tags dict
"""
tags = {}
xpath = 'DescribeTagsResult/TagDescriptions/member/Tags/member'
for el in findall(element=data, xpath=xpath, namespace=NS):
key = findtext(element=el, xpath='Key', namespace=NS)
value = findtext(element=el, xpath='Value', namespace=NS)
if key:
tags[key] = value
return tags
def _to_rule(self, el):
def __to_bool__(val):
return val.lower() in ("yes", "true", "t", "1")
id = findtext(element=el, xpath='RuleArn', namespace=NS)
is_default = findtext(element=el, xpath='IsDefault', namespace=NS)
priority = findtext(element=el, xpath='Priority', namespace=NS)
target_group = findtext(
element=el,
xpath='Actions/member/TargetGroupArn',
namespace=NS
)
conditions = {}
cond_members = findall(
element=el, xpath='Conditions/member', namespace=NS
)
for cond_member in cond_members:
field = findtext(element=cond_member, xpath='Field', namespace=NS)
conditions[field] = []
value_members = findall(
element=cond_member, xpath='Values/member', namespace=NS
)
for value_member in value_members:
conditions[field].append(value_member.text)
rule = {
'id': id,
'is_default': __to_bool__(is_default),
'priority': priority,
'target_group': target_group,
'conditions': conditions
}
return rule
def _to_rules(self, data):
xpath = 'DescribeRulesResult/Rules/member'
return [self._to_rule(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_target_groups(self, data):
xpath = 'DescribeTargetGroupsResult/TargetGroups/member'
return [self._to_target_group(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_target_group(self, el):
target_group_arn = findtext(
element=el, xpath='TargetGroupArn', namespace=NS
)
name = findtext(element=el, xpath='TargetGroupName', namespace=NS)
members = self._ex_get_target_group_members(target_group_arn)
return {'id': target_group_arn, 'name': name, 'members': members}
def _to_target_group_members(self, data):
xpath = 'DescribeTargetHealthResult/TargetHealthDescriptions/member'
return [self._to_target_group_member(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_target_group_member(self, el):
id = findtext(element=el, xpath='Target/Id', namespace=NS)
port = findtext(element=el, xpath='Target/Port', namespace=NS)
health = findtext(
element=el, xpath='TargetHealth/State', namespace=NS
)
return {'id': id, 'port': port, 'health': health}
def _ex_get_balancer_memebers(self, balancer):
balancer_members = []
for tg in balancer.extra['target_groups']:
for tg_member in tg['members']:
new_member = Member(
tg_member['id'],
None,
tg_member['port'],
balancer=balancer,
extra={
'health': tg_member['health'],
'target_group': tg['name']
}
)
balancer_members.append(new_member)
return balancer_members
def _ex_get_target_group_members(self, target_group_arn):
"""
Return a list of target group member dicts.
:rtype: ``list`` of ``dict``
"""
params = {
'Action': 'DescribeTargetHealth',
'TargetGroupArn': target_group_arn
}
data = self.connection.request(ROOT, params=params).object
return self._to_target_group_members(data)
def _ex_get_balancer_target_groups(self, balancer):
"""
Return a list of load balancer target groups with members.
:rtype: ``list`` of ``dict``
"""
params = {
'Action': 'DescribeTargetGroups',
'LoadBalancerArn': balancer.id
}
data = self.connection.request(ROOT, params=params).object
return self._to_target_groups(data)
def _ex_get_balancer_listeners(self, balancer):
"""
Return a list of load balancer listeners dicts.
:rtype: ``list`` of ``dict``
"""
params = {
'Action': 'DescribeListeners',
'LoadBalancerArn': balancer.id
}
data = self.connection.request(ROOT, params=params).object
return self._to_listeners(data)
def _ex_get_rules_for_listener(self, listener_arn):
"""
Return a list of listeners rule dicts.
:rtype: ``list`` of ``dict``
"""
params = {
'Action': 'DescribeRules',
'ListenerArn': listener_arn
}
data = self.connection.request(ROOT, params=params).object
return self._to_rules(data)
def _ex_connection_class_kwargs(self):
pdriver = super(ApplicationLBDriver, self)
kwargs = pdriver._ex_connection_class_kwargs()
if hasattr(self, 'token') and self.token is not None:
kwargs['token'] = self.token
kwargs['signature_version'] = '4'
else:
kwargs['signature_version'] = self.signature_version
return kwargs
def _ex_get_balancer_tags(self, balancer):
params = {
'Action': 'DescribeTags',
'ResourceArns.member.1': balancer.id
}
data = self.connection.request(ROOT, params=params).object
return self._to_tags(data)
| 33.732919
| 78
| 0.614712
|
__all__ = [
'ApplicationLBDriver'
]
from libcloud.utils.xml import findtext, findall
from libcloud.loadbalancer.types import State
from libcloud.loadbalancer.base import Driver, LoadBalancer, Member
from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection
VERSION = '2015-12-01'
HOST = 'elasticloadbalancing.%s.amazonaws.com'
ROOT = '/%s/' % (VERSION)
NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, )
class ALBResponse(AWSGenericResponse):
namespace = NS
exceptions = {}
xpath = 'Error'
class ALBConnection(SignedAWSConnection):
version = VERSION
host = HOST
responseCls = ALBResponse
service_name = 'elasticloadbalancing'
class ApplicationLBDriver(Driver):
name = 'Amazon Application Load Balancing'
website = 'http://aws.amazon.com/elasticloadbalancing/'
connectionCls = ALBConnection
signature_version = '4'
def __init__(self, access_id, secret, region, token=None):
self.token = token
super(ApplicationLBDriver, self).__init__(
access_id, secret, token=token
)
self.region = region
self.region_name = region
self.connection.host = HOST % (region)
def list_protocols(self):
return ['http', 'https']
def list_balancers(self):
params = {'Action': 'DescribeLoadBalancers'}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)
def balancer_list_members(self, balancer):
return balancer._members
def get_balancer(self, balancer_id):
params = {
'Action': 'DescribeLoadBalancers',
'LoadBalancerNames.member.1': balancer_id
}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)[0]
def ex_balancer_list_listeners(self, balancer):
return balancer.extra.get('listeners', [])
def _to_listeners(self, data):
xpath = 'DescribeListenersResult/Listeners/member'
return [self._to_listener(el) for el in findall(
element=data, xpath=xpath, namespace=NS
)]
def _to_listener(self, el):
listener_arn = findtext(element=el, xpath='ListenerArn', namespace=NS)
listener = {
'id': listener_arn,
'protocol': findtext(element=el, xpath='Protocol', namespace=NS),
'port': findtext(element=el, xpath='Port', namespace=NS),
'rules': self._ex_get_rules_for_listener(listener_arn)
}
return listener
def _to_targets(self, data):
xpath = 'DefaultActions/member'
return [self._to_target(el) for el in findall(
element=data, xpath=xpath, namespace=NS
)]
def _to_target(self, el):
return findtext(
element=el,
xpath='DefaultActions/member/TargetGroupArn',
namespace=NS
)
def _to_balancer(self, el):
name = findtext(element=el, xpath='LoadBalancerName', namespace=NS)
id = findtext(element=el, xpath='LoadBalancerArn', namespace=NS)
dns_name = findtext(el, xpath='DNSName', namespace=NS)
balancer = LoadBalancer(
id=id,
name=name,
state=State.UNKNOWN,
ip=dns_name,
port=None,
driver=self.connection.driver
)
extra = {
'listeners': self._ex_get_balancer_listeners(balancer),
'target_groups': self._ex_get_balancer_target_groups(balancer),
'tags': self._ex_get_balancer_tags(balancer)
}
balancer.extra = extra
if len(extra['listeners']) > 0:
balancer.port = extra['listeners'][0]['port']
else:
balancer.port = None
balancer._members = self._ex_get_balancer_memebers(balancer)
return balancer
def _to_balancers(self, data):
xpath = 'DescribeLoadBalancersResult/LoadBalancers/member'
return [self._to_balancer(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_tags(self, data):
tags = {}
xpath = 'DescribeTagsResult/TagDescriptions/member/Tags/member'
for el in findall(element=data, xpath=xpath, namespace=NS):
key = findtext(element=el, xpath='Key', namespace=NS)
value = findtext(element=el, xpath='Value', namespace=NS)
if key:
tags[key] = value
return tags
def _to_rule(self, el):
def __to_bool__(val):
return val.lower() in ("yes", "true", "t", "1")
id = findtext(element=el, xpath='RuleArn', namespace=NS)
is_default = findtext(element=el, xpath='IsDefault', namespace=NS)
priority = findtext(element=el, xpath='Priority', namespace=NS)
target_group = findtext(
element=el,
xpath='Actions/member/TargetGroupArn',
namespace=NS
)
conditions = {}
cond_members = findall(
element=el, xpath='Conditions/member', namespace=NS
)
for cond_member in cond_members:
field = findtext(element=cond_member, xpath='Field', namespace=NS)
conditions[field] = []
value_members = findall(
element=cond_member, xpath='Values/member', namespace=NS
)
for value_member in value_members:
conditions[field].append(value_member.text)
rule = {
'id': id,
'is_default': __to_bool__(is_default),
'priority': priority,
'target_group': target_group,
'conditions': conditions
}
return rule
def _to_rules(self, data):
xpath = 'DescribeRulesResult/Rules/member'
return [self._to_rule(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_target_groups(self, data):
xpath = 'DescribeTargetGroupsResult/TargetGroups/member'
return [self._to_target_group(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_target_group(self, el):
target_group_arn = findtext(
element=el, xpath='TargetGroupArn', namespace=NS
)
name = findtext(element=el, xpath='TargetGroupName', namespace=NS)
members = self._ex_get_target_group_members(target_group_arn)
return {'id': target_group_arn, 'name': name, 'members': members}
def _to_target_group_members(self, data):
xpath = 'DescribeTargetHealthResult/TargetHealthDescriptions/member'
return [self._to_target_group_member(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_target_group_member(self, el):
id = findtext(element=el, xpath='Target/Id', namespace=NS)
port = findtext(element=el, xpath='Target/Port', namespace=NS)
health = findtext(
element=el, xpath='TargetHealth/State', namespace=NS
)
return {'id': id, 'port': port, 'health': health}
def _ex_get_balancer_memebers(self, balancer):
balancer_members = []
for tg in balancer.extra['target_groups']:
for tg_member in tg['members']:
new_member = Member(
tg_member['id'],
None,
tg_member['port'],
balancer=balancer,
extra={
'health': tg_member['health'],
'target_group': tg['name']
}
)
balancer_members.append(new_member)
return balancer_members
def _ex_get_target_group_members(self, target_group_arn):
params = {
'Action': 'DescribeTargetHealth',
'TargetGroupArn': target_group_arn
}
data = self.connection.request(ROOT, params=params).object
return self._to_target_group_members(data)
def _ex_get_balancer_target_groups(self, balancer):
params = {
'Action': 'DescribeTargetGroups',
'LoadBalancerArn': balancer.id
}
data = self.connection.request(ROOT, params=params).object
return self._to_target_groups(data)
def _ex_get_balancer_listeners(self, balancer):
params = {
'Action': 'DescribeListeners',
'LoadBalancerArn': balancer.id
}
data = self.connection.request(ROOT, params=params).object
return self._to_listeners(data)
def _ex_get_rules_for_listener(self, listener_arn):
params = {
'Action': 'DescribeRules',
'ListenerArn': listener_arn
}
data = self.connection.request(ROOT, params=params).object
return self._to_rules(data)
def _ex_connection_class_kwargs(self):
pdriver = super(ApplicationLBDriver, self)
kwargs = pdriver._ex_connection_class_kwargs()
if hasattr(self, 'token') and self.token is not None:
kwargs['token'] = self.token
kwargs['signature_version'] = '4'
else:
kwargs['signature_version'] = self.signature_version
return kwargs
def _ex_get_balancer_tags(self, balancer):
params = {
'Action': 'DescribeTags',
'ResourceArns.member.1': balancer.id
}
data = self.connection.request(ROOT, params=params).object
return self._to_tags(data)
| true
| true
|
f712fac6bd1822c194117ca4f70daa44951941ca
| 23,640
|
py
|
Python
|
ummeli/base/migrations/0025_auto__add_field_userban_is_unbanned.py
|
praekelt/ummeli
|
c09731ed9a3a40359e81dcf6ed365797606ae9e2
|
[
"BSD-3-Clause"
] | null | null | null |
ummeli/base/migrations/0025_auto__add_field_userban_is_unbanned.py
|
praekelt/ummeli
|
c09731ed9a3a40359e81dcf6ed365797606ae9e2
|
[
"BSD-3-Clause"
] | null | null | null |
ummeli/base/migrations/0025_auto__add_field_userban_is_unbanned.py
|
praekelt/ummeli
|
c09731ed9a3a40359e81dcf6ed365797606ae9e2
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserBan.is_unbanned'
db.add_column('base_userban', 'is_unbanned', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserBan.is_unbanned'
db.delete_column('base_userban', 'is_unbanned')
models = {
'atlas.city': {
'Meta': {'ordering': "('name',)", 'object_name': 'City'},
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Region']", 'null': 'True', 'blank': 'True'})
},
'atlas.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'atlas.location': {
'Meta': {'object_name': 'Location'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.City']"}),
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photologue.Photo']", 'null': 'True', 'blank': 'True'})
},
'atlas.region': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('country', 'code'),)", 'object_name': 'Region'},
'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 20, 14, 40, 19, 580727)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 20, 14, 40, 19, 580636)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'base.banner': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Banner', '_ormbases': ['jmbo.ModelBase']},
'banner_type': ('django.db.models.fields.CharField', [], {'default': "'banner'", 'max_length': '10'}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'}),
'time_off': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'time_on': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'base.certificate': {
'Meta': {'object_name': 'Certificate'},
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'base.curriculumvitae': {
'Meta': {'object_name': 'CurriculumVitae'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certificates': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Certificate']", 'symmetrical': 'False', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'comment_as_anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'connection_requests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'connection_requests'", 'blank': 'True', 'to': "orm['auth.User']"}),
'date_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'nr_of_faxes_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'preferred_skill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'profiles_preferred'", 'null': 'True', 'to': "orm['base.Skill']"}),
'province': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Reference']", 'symmetrical': 'False', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'show_address': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_contact_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'profiles'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['base.Skill']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'work_experiences': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.WorkExperience']", 'symmetrical': 'False', 'blank': 'True'})
},
'base.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'read_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'base.reference': {
'Meta': {'object_name': 'Reference'},
'contact_no': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'})
},
'base.skill': {
'Meta': {'object_name': 'Skill'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skill': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'base.userban': {
'Meta': {'object_name': 'UserBan'},
'ban_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_unbanned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unban_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'base.workexperience': {
'Meta': {'object_name': 'WorkExperience'},
'company': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'end_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Location']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'null': 'True', 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photo': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'},
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tags': ('photologue.models.TagField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['base']
| 84.731183
| 201
| 0.554103
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('base_userban', 'is_unbanned', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
db.delete_column('base_userban', 'is_unbanned')
models = {
'atlas.city': {
'Meta': {'ordering': "('name',)", 'object_name': 'City'},
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Region']", 'null': 'True', 'blank': 'True'})
},
'atlas.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'atlas.location': {
'Meta': {'object_name': 'Location'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.City']"}),
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photologue.Photo']", 'null': 'True', 'blank': 'True'})
},
'atlas.region': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('country', 'code'),)", 'object_name': 'Region'},
'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'coordinates': ('atlas.fields.CoordinateField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 20, 14, 40, 19, 580727)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 20, 14, 40, 19, 580636)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'base.banner': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Banner', '_ormbases': ['jmbo.ModelBase']},
'banner_type': ('django.db.models.fields.CharField', [], {'default': "'banner'", 'max_length': '10'}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'}),
'time_off': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'time_on': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'base.certificate': {
'Meta': {'object_name': 'Certificate'},
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'base.curriculumvitae': {
'Meta': {'object_name': 'CurriculumVitae'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certificates': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Certificate']", 'symmetrical': 'False', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'comment_as_anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'connection_requests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'connection_requests'", 'blank': 'True', 'to': "orm['auth.User']"}),
'date_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'nr_of_faxes_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'preferred_skill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'profiles_preferred'", 'null': 'True', 'to': "orm['base.Skill']"}),
'province': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Reference']", 'symmetrical': 'False', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'show_address': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_contact_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'profiles'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['base.Skill']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'work_experiences': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.WorkExperience']", 'symmetrical': 'False', 'blank': 'True'})
},
'base.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'read_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'base.reference': {
'Meta': {'object_name': 'Reference'},
'contact_no': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'})
},
'base.skill': {
'Meta': {'object_name': 'Skill'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skill': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'base.userban': {
'Meta': {'object_name': 'UserBan'},
'ban_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_unbanned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unban_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'base.workexperience': {
'Meta': {'object_name': 'WorkExperience'},
'company': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'end_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Location']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'null': 'True', 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photo': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'},
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tags': ('photologue.models.TagField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['base']
| true
| true
|
f712fac84d590819f808ab2c2adec2ef2541b025
| 2,277
|
py
|
Python
|
test/test_profile_audiences_changes.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-03-05T06:41:26.000Z
|
2021-03-05T06:41:26.000Z
|
test/test_profile_audiences_changes.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-09-07T08:56:58.000Z
|
2021-09-07T08:56:58.000Z
|
test/test_profile_audiences_changes.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2019-05-21T10:27:54.000Z
|
2019-05-21T10:27:54.000Z
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.profile_audiences_changes import ProfileAudiencesChanges # noqa: E501
from talon_one.rest import ApiException
class TestProfileAudiencesChanges(unittest.TestCase):
"""ProfileAudiencesChanges unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ProfileAudiencesChanges
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.profile_audiences_changes.ProfileAudiencesChanges() # noqa: E501
if include_optional :
return ProfileAudiencesChanges(
adds = [
56
],
deletes = [
56
]
)
else :
return ProfileAudiencesChanges(
adds = [
56
],
deletes = [
56
],
)
def testProfileAudiencesChanges(self):
"""Test ProfileAudiencesChanges"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 35.578125
| 647
| 0.645586
|
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.profile_audiences_changes import ProfileAudiencesChanges
from talon_one.rest import ApiException
class TestProfileAudiencesChanges(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
include_optional :
return ProfileAudiencesChanges(
adds = [
56
],
deletes = [
56
]
)
else :
return ProfileAudiencesChanges(
adds = [
56
],
deletes = [
56
],
)
def testProfileAudiencesChanges(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f712fc10c849490c50a12ea4f35d86af02b77820
| 1,407
|
py
|
Python
|
src/subcommands/listers/list_users.py
|
kevin3/cwl-ica
|
cf706ea42993d563f364c0847ee4b882f8fe067c
|
[
"MIT"
] | 8
|
2021-12-08T05:33:58.000Z
|
2022-03-07T00:40:48.000Z
|
src/subcommands/listers/list_users.py
|
kevin3/cwl-ica
|
cf706ea42993d563f364c0847ee4b882f8fe067c
|
[
"MIT"
] | 34
|
2021-08-11T03:59:33.000Z
|
2022-03-10T05:39:26.000Z
|
src/subcommands/listers/list_users.py
|
kevin3/cwl-ica
|
cf706ea42993d563f364c0847ee4b882f8fe067c
|
[
"MIT"
] | 1
|
2022-01-08T07:34:55.000Z
|
2022-01-08T07:34:55.000Z
|
#!/usr/bin/env python3
"""
List all users registered in <CWL_ICA_REPO_PATH>/config/user.yaml
"""
from classes.command import Command
from utils.logging import get_logger
import pandas as pd
from utils.repo import read_yaml, get_user_yaml_path
import sys
logger = get_logger()
class ListUsers(Command):
"""Usage:
cwl-ica [options] list-users help
cwl-ica [options] list-users
Description:
List all registered users in <CWL_ICA_REPO_PATH>/config/user.yaml
Example:
cwl-ica list-users
"""
def __init__(self, command_argv):
# Collect args from doc strings
super().__init__(command_argv)
# Check args
self.check_args()
def __call__(self):
"""
Just run through this
:return:
"""
# Check project.yaml exists
user_yaml_path = get_user_yaml_path()
user_list = read_yaml(user_yaml_path)['users']
# Create pandas df of user yaml path
user_df = pd.DataFrame(user_list)
# Write user to stdout
user_df.to_markdown(sys.stdout, index=False)
# Add new line
print()
def check_args(self):
"""
Check if --tenant-name is defined or CWL_ICA_DEFAULT_TENANT is present
Or if --tenant-name is set to 'all'
:return:
"""
# Just make sure the user.yaml path exists
_ = get_user_yaml_path()
| 21.984375
| 78
| 0.636105
|
from classes.command import Command
from utils.logging import get_logger
import pandas as pd
from utils.repo import read_yaml, get_user_yaml_path
import sys
logger = get_logger()
class ListUsers(Command):
def __init__(self, command_argv):
super().__init__(command_argv)
self.check_args()
def __call__(self):
user_yaml_path = get_user_yaml_path()
user_list = read_yaml(user_yaml_path)['users']
user_df = pd.DataFrame(user_list)
user_df.to_markdown(sys.stdout, index=False)
print()
def check_args(self):
_ = get_user_yaml_path()
| true
| true
|
f712fc37bbc5834d89b087770d9385645d23ac35
| 287
|
py
|
Python
|
delphi/distributions/__init__.py
|
pstefanou12/icml-submission
|
341cc13cbd0a7a3a38c94b1adab80dbe0a98718c
|
[
"MIT"
] | null | null | null |
delphi/distributions/__init__.py
|
pstefanou12/icml-submission
|
341cc13cbd0a7a3a38c94b1adab80dbe0a98718c
|
[
"MIT"
] | null | null | null |
delphi/distributions/__init__.py
|
pstefanou12/icml-submission
|
341cc13cbd0a7a3a38c94b1adab80dbe0a98718c
|
[
"MIT"
] | null | null | null |
from .censored_normal import CensoredNormal
from .censored_multivariate_normal import CensoredMultivariateNormal
from .truncated_normal import TruncatedNormal
from .truncated_multivariate_normal import TruncatedMultivariateNormal
from .truncated_boolean_product import TruncatedBernoulli
| 57.4
| 70
| 0.916376
|
from .censored_normal import CensoredNormal
from .censored_multivariate_normal import CensoredMultivariateNormal
from .truncated_normal import TruncatedNormal
from .truncated_multivariate_normal import TruncatedMultivariateNormal
from .truncated_boolean_product import TruncatedBernoulli
| true
| true
|
f712fcb88b0b58b27af1bae0b47800d64b53266b
| 3,419
|
py
|
Python
|
testing/test_dynamic_function_error_suppression.py
|
python-discord/flake8-annotations
|
f96dd508d5b2c0bcaf3019d6dcfe65612e6cef78
|
[
"MIT"
] | 49
|
2019-08-06T20:54:38.000Z
|
2020-09-02T10:59:24.000Z
|
testing/test_dynamic_function_error_suppression.py
|
python-discord/flake8-annotations
|
f96dd508d5b2c0bcaf3019d6dcfe65612e6cef78
|
[
"MIT"
] | 73
|
2019-08-06T18:02:54.000Z
|
2020-08-31T12:21:39.000Z
|
testing/test_dynamic_function_error_suppression.py
|
python-discord/flake8-annotations
|
f96dd508d5b2c0bcaf3019d6dcfe65612e6cef78
|
[
"MIT"
] | 12
|
2019-08-08T20:25:59.000Z
|
2020-09-02T18:17:48.000Z
|
from typing import Tuple
import pytest
from flake8_annotations.error_codes import Error
from testing.helpers import check_is_empty, check_is_not_empty, check_source
from testing.test_cases.dynamic_function_test_cases import (
DynamicallyTypedFunctionTestCase,
DynamicallyTypedNestedFunctionTestCase,
dynamic_function_test_cases,
nested_dynamic_function_test_cases,
)
class TestDynamicallyTypedFunctionErrorSuppression:
"""Test suppression of errors for dynamically typed functions."""
@pytest.fixture(
params=dynamic_function_test_cases.items(), ids=dynamic_function_test_cases.keys()
)
def yielded_errors(
self, request # noqa: ANN001
) -> Tuple[str, DynamicallyTypedFunctionTestCase, Tuple[Error]]:
"""
Build a fixture for the errors emitted from parsing the dynamically typed def test code.
Fixture provides a tuple of: test case name, its corresponding
`DynamicallyTypedFunctionTestCase` instance, and a tuple of the errors yielded by the
checker, which should be empty if the test case's `should_yield_error` is `False`.
"""
test_case_name, test_case = request.param
return (
test_case_name,
test_case,
tuple(check_source(test_case.src, allow_untyped_defs=True)),
)
def test_suppressed_dynamic_function_error(
self, yielded_errors: Tuple[str, DynamicallyTypedFunctionTestCase, Tuple[Error]]
) -> None:
"""Test that all errors are suppressed if a function is dynamically typed."""
test_case_name, test_case, errors = yielded_errors
failure_msg = f"Check failed for case '{test_case_name}'"
if test_case.should_yield_error:
check_is_not_empty(errors, msg=failure_msg)
else:
check_is_empty(errors, msg=failure_msg)
class TestDynamicallyTypedNestedFunctionErrorSuppression:
"""Test suppression of errors for dynamically typed nested functions."""
@pytest.fixture(
params=nested_dynamic_function_test_cases.items(),
ids=nested_dynamic_function_test_cases.keys(),
)
def yielded_errors(
self, request # noqa: ANN001
) -> Tuple[str, DynamicallyTypedNestedFunctionTestCase, Tuple[Error]]:
"""
Build a fixture for the errors emitted from parsing the dynamically typed def test code.
Fixture provides a tuple of: test case name, its corresponding
`DynamicallyTypedNestedFunctionTestCase` instance, and a tuple of the errors yielded by the
checker, which should be empty if the test case's `should_yield_error` is `False`.
"""
test_case_name, test_case = request.param
return (
test_case_name,
test_case,
tuple(check_source(test_case.src, allow_untyped_nested=True)),
)
def test_suppressed_dynamic_nested_function_error(
self, yielded_errors: Tuple[str, DynamicallyTypedNestedFunctionTestCase, Tuple[Error]]
) -> None:
"""Test that all errors are suppressed if a nested function is dynamically typed."""
test_case_name, test_case, errors = yielded_errors
failure_msg = f"Check failed for case '{test_case_name}'"
if test_case.should_yield_error:
check_is_not_empty(errors, msg=failure_msg)
else:
check_is_empty(errors, msg=failure_msg)
| 38.852273
| 99
| 0.705762
|
from typing import Tuple
import pytest
from flake8_annotations.error_codes import Error
from testing.helpers import check_is_empty, check_is_not_empty, check_source
from testing.test_cases.dynamic_function_test_cases import (
DynamicallyTypedFunctionTestCase,
DynamicallyTypedNestedFunctionTestCase,
dynamic_function_test_cases,
nested_dynamic_function_test_cases,
)
class TestDynamicallyTypedFunctionErrorSuppression:
@pytest.fixture(
params=dynamic_function_test_cases.items(), ids=dynamic_function_test_cases.keys()
)
def yielded_errors(
self, request
) -> Tuple[str, DynamicallyTypedFunctionTestCase, Tuple[Error]]:
test_case_name, test_case = request.param
return (
test_case_name,
test_case,
tuple(check_source(test_case.src, allow_untyped_defs=True)),
)
def test_suppressed_dynamic_function_error(
self, yielded_errors: Tuple[str, DynamicallyTypedFunctionTestCase, Tuple[Error]]
) -> None:
test_case_name, test_case, errors = yielded_errors
failure_msg = f"Check failed for case '{test_case_name}'"
if test_case.should_yield_error:
check_is_not_empty(errors, msg=failure_msg)
else:
check_is_empty(errors, msg=failure_msg)
class TestDynamicallyTypedNestedFunctionErrorSuppression:
@pytest.fixture(
params=nested_dynamic_function_test_cases.items(),
ids=nested_dynamic_function_test_cases.keys(),
)
def yielded_errors(
self, request
) -> Tuple[str, DynamicallyTypedNestedFunctionTestCase, Tuple[Error]]:
test_case_name, test_case = request.param
return (
test_case_name,
test_case,
tuple(check_source(test_case.src, allow_untyped_nested=True)),
)
def test_suppressed_dynamic_nested_function_error(
self, yielded_errors: Tuple[str, DynamicallyTypedNestedFunctionTestCase, Tuple[Error]]
) -> None:
test_case_name, test_case, errors = yielded_errors
failure_msg = f"Check failed for case '{test_case_name}'"
if test_case.should_yield_error:
check_is_not_empty(errors, msg=failure_msg)
else:
check_is_empty(errors, msg=failure_msg)
| true
| true
|
f712fcca54f7c24c8947bd7ce93370ffecca026f
| 408
|
py
|
Python
|
tests.py
|
justin4480/spelling
|
b209a95aa7d95ec7febd576db03377ee3368e05d
|
[
"MIT"
] | null | null | null |
tests.py
|
justin4480/spelling
|
b209a95aa7d95ec7febd576db03377ee3368e05d
|
[
"MIT"
] | null | null | null |
tests.py
|
justin4480/spelling
|
b209a95aa7d95ec7febd576db03377ee3368e05d
|
[
"MIT"
] | null | null | null |
from main import Word, Wordgroup
print('start')
i = 22
wg = Wordgroup()
wg.create_wordgroup(f'wordgroup_test{i}')
wa = Word()
wa.create_word(f'word_test{i}a', wg)
wb = Word()
wb.create_word(f'word_test{i}b', wg)
wg = Wordgroup()
wg.load_wordgroup(f'wordgroup_test{i}')
wa = Word()
wa.load_word(f'word_test{i}a')
wb = Word()
wb.load_word(f'word_test{i}b')
wg.load_words()
print(wg.words)
print('end')
| 15.111111
| 41
| 0.696078
|
from main import Word, Wordgroup
print('start')
i = 22
wg = Wordgroup()
wg.create_wordgroup(f'wordgroup_test{i}')
wa = Word()
wa.create_word(f'word_test{i}a', wg)
wb = Word()
wb.create_word(f'word_test{i}b', wg)
wg = Wordgroup()
wg.load_wordgroup(f'wordgroup_test{i}')
wa = Word()
wa.load_word(f'word_test{i}a')
wb = Word()
wb.load_word(f'word_test{i}b')
wg.load_words()
print(wg.words)
print('end')
| true
| true
|
f712fdb394a818d1ca7adba3026bbe8e490a5839
| 2,122
|
py
|
Python
|
src/app/test/api/http/unit/handlers/v1/admin_test.py
|
ExpressHermes/beer-garden
|
2ea0944d7528a8127bc1b79d16d8fdc668f1c8e4
|
[
"MIT"
] | 230
|
2018-02-03T01:33:45.000Z
|
2022-02-20T22:07:25.000Z
|
src/app/test/api/http/unit/handlers/v1/admin_test.py
|
ExpressHermes/beer-garden
|
2ea0944d7528a8127bc1b79d16d8fdc668f1c8e4
|
[
"MIT"
] | 961
|
2018-02-06T11:22:40.000Z
|
2022-03-24T15:22:33.000Z
|
src/app/test/api/http/unit/handlers/v1/admin_test.py
|
ExpressHermes/beer-garden
|
2ea0944d7528a8127bc1b79d16d8fdc668f1c8e4
|
[
"MIT"
] | 33
|
2018-02-04T18:00:07.000Z
|
2021-12-15T13:07:22.000Z
|
# -*- coding: utf-8 -*-
import unittest
from mock import MagicMock, Mock, patch
from tornado.gen import Future
from .. import TestHandlerBase
@unittest.skip("TODO")
class AdminAPITest(TestHandlerBase):
def setUp(self):
self.client_mock = MagicMock(name="client_mock")
self.fake_context = MagicMock(
__enter__=Mock(return_value=self.client_mock),
__exit__=Mock(return_value=False),
)
self.future_mock = Future()
super(AdminAPITest, self).setUp()
@patch("brew_view.handlers.v1.admin.thrift_context")
def test_patch(self, context_mock):
context_mock.return_value = self.fake_context
self.client_mock.rescanSystemDirectory.return_value = self.future_mock
self.future_mock.set_result(None)
response = self.fetch(
"/api/v1/admin/",
method="PATCH",
body='{"operations": [{"operation": "rescan"}]}',
headers={"content-type": "application/json"},
)
self.assertEqual(204, response.code)
self.client_mock.rescanSystemDirectory.assert_called_once_with()
@patch("brew_view.handlers.v1.admin.thrift_context")
def test_patch_exception(self, context_mock):
context_mock.return_value = self.fake_context
self.client_mock.rescanSystemDirectory.return_value = self.future_mock
self.future_mock.set_exception(ValueError())
response = self.fetch(
"/api/v1/admin/",
method="PATCH",
body='{"operations": [{"operation": "rescan"}]}',
headers={"content-type": "application/json"},
)
self.assertGreaterEqual(response.code, 500)
self.client_mock.rescanSystemDirectory.assert_called_once_with()
def test_patch_bad_operation(self):
response = self.fetch(
"/api/v1/admin/",
method="PATCH",
body='{"operations": [{"operation": "fake"}]}',
headers={"content-type": "application/json"},
)
self.assertGreaterEqual(response.code, 400)
self.assertLess(response.code, 500)
| 34.786885
| 78
| 0.638077
|
import unittest
from mock import MagicMock, Mock, patch
from tornado.gen import Future
from .. import TestHandlerBase
@unittest.skip("TODO")
class AdminAPITest(TestHandlerBase):
def setUp(self):
self.client_mock = MagicMock(name="client_mock")
self.fake_context = MagicMock(
__enter__=Mock(return_value=self.client_mock),
__exit__=Mock(return_value=False),
)
self.future_mock = Future()
super(AdminAPITest, self).setUp()
@patch("brew_view.handlers.v1.admin.thrift_context")
def test_patch(self, context_mock):
context_mock.return_value = self.fake_context
self.client_mock.rescanSystemDirectory.return_value = self.future_mock
self.future_mock.set_result(None)
response = self.fetch(
"/api/v1/admin/",
method="PATCH",
body='{"operations": [{"operation": "rescan"}]}',
headers={"content-type": "application/json"},
)
self.assertEqual(204, response.code)
self.client_mock.rescanSystemDirectory.assert_called_once_with()
@patch("brew_view.handlers.v1.admin.thrift_context")
def test_patch_exception(self, context_mock):
context_mock.return_value = self.fake_context
self.client_mock.rescanSystemDirectory.return_value = self.future_mock
self.future_mock.set_exception(ValueError())
response = self.fetch(
"/api/v1/admin/",
method="PATCH",
body='{"operations": [{"operation": "rescan"}]}',
headers={"content-type": "application/json"},
)
self.assertGreaterEqual(response.code, 500)
self.client_mock.rescanSystemDirectory.assert_called_once_with()
def test_patch_bad_operation(self):
response = self.fetch(
"/api/v1/admin/",
method="PATCH",
body='{"operations": [{"operation": "fake"}]}',
headers={"content-type": "application/json"},
)
self.assertGreaterEqual(response.code, 400)
self.assertLess(response.code, 500)
| true
| true
|
f712ff3202a32d3e17ff93147d1fcf3e2d1d5750
| 1,244
|
py
|
Python
|
examples/plotting_utils.py
|
Broly498/sentinel2-cloud-detector
|
912880fcd6fed482475b4cd8da07bda17993ebe8
|
[
"CC0-1.0"
] | null | null | null |
examples/plotting_utils.py
|
Broly498/sentinel2-cloud-detector
|
912880fcd6fed482475b4cd8da07bda17993ebe8
|
[
"CC0-1.0"
] | null | null | null |
examples/plotting_utils.py
|
Broly498/sentinel2-cloud-detector
|
912880fcd6fed482475b4cd8da07bda17993ebe8
|
[
"CC0-1.0"
] | null | null | null |
"""
Plotting utilities for example notebooks
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_image(image=None, mask=None, ax=None, factor=3.5/255, clip_range=(0, 1), **kwargs):
""" Utility function for plotting RGB images and masks.
"""
if ax is None:
_, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
mask_color = [255, 255, 255, 255] if image is None else [255, 255, 0, 100]
if image is None:
if mask is None:
raise ValueError('image or mask should be given')
image = np.zeros(mask.shape + (3,), dtype=np.uint8)
ax.imshow(np.clip(image * factor, *clip_range), **kwargs)
if mask is not None:
cloud_image = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8)
cloud_image[mask == 1] = np.asarray(mask_color, dtype=np.uint8)
ax.imshow(cloud_image)
def plot_probabilities(image, proba, factor=3.5/255):
""" Utility function for plotting a RGB image and its cloud probability map next to each other.
"""
plt.figure(figsize=(15, 15))
ax = plt.subplot(1, 2, 1)
ax.imshow(np.clip(image * factor, 0, 1))
plt.show
ax = plt.subplot(1, 2, 2)
ax.imshow(proba, cmap=plt.cm.inferno)
plt.show
| 29.619048
| 99
| 0.635048
|
import matplotlib.pyplot as plt
import numpy as np
def plot_image(image=None, mask=None, ax=None, factor=3.5/255, clip_range=(0, 1), **kwargs):
if ax is None:
_, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
mask_color = [255, 255, 255, 255] if image is None else [255, 255, 0, 100]
if image is None:
if mask is None:
raise ValueError('image or mask should be given')
image = np.zeros(mask.shape + (3,), dtype=np.uint8)
ax.imshow(np.clip(image * factor, *clip_range), **kwargs)
if mask is not None:
cloud_image = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8)
cloud_image[mask == 1] = np.asarray(mask_color, dtype=np.uint8)
ax.imshow(cloud_image)
def plot_probabilities(image, proba, factor=3.5/255):
plt.figure(figsize=(15, 15))
ax = plt.subplot(1, 2, 1)
ax.imshow(np.clip(image * factor, 0, 1))
plt.show
ax = plt.subplot(1, 2, 2)
ax.imshow(proba, cmap=plt.cm.inferno)
plt.show
| true
| true
|
f712ff51027a96befc2a75d344cf5da30ad5c687
| 112
|
py
|
Python
|
hello/urls.py
|
mk-knight23/django-project
|
ce03d27c0b95eb09d4aa35b298d23409540894ff
|
[
"MIT"
] | 1
|
2020-12-24T12:36:46.000Z
|
2020-12-24T12:36:46.000Z
|
hello/urls.py
|
mk-knight23/django-project
|
ce03d27c0b95eb09d4aa35b298d23409540894ff
|
[
"MIT"
] | null | null | null |
hello/urls.py
|
mk-knight23/django-project
|
ce03d27c0b95eb09d4aa35b298d23409540894ff
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home , name="homepage"),
]
| 16
| 43
| 0.669643
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home , name="homepage"),
]
| true
| true
|
f712ffe3f187336770e99740630606a865844c02
| 560
|
py
|
Python
|
team_builder/accounts/urls.py
|
squadran2003/Social-Team-Builder
|
08fdab7cf176de0daf38078cd9fcd5f17501cef8
|
[
"MIT"
] | null | null | null |
team_builder/accounts/urls.py
|
squadran2003/Social-Team-Builder
|
08fdab7cf176de0daf38078cd9fcd5f17501cef8
|
[
"MIT"
] | null | null | null |
team_builder/accounts/urls.py
|
squadran2003/Social-Team-Builder
|
08fdab7cf176de0daf38078cd9fcd5f17501cef8
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .views import (ProfileView, ApplicantView,
loginView, sign_out, SignupView)
app_name = 'accounts'
urlpatterns = [
path('profile/edit/', ProfileView.as_view(), name='edit_profile'),
path('profile/<option>/', ProfileView.as_view(), name='profile'),
path('applicant/<int:pk>/', ApplicantView.as_view(), name='applicant'),
path('login/', loginView.as_view(), name='login'),
path('logout/', sign_out, name='logout'),
path('signup/', SignupView.as_view(), name='signup'),
]
| 31.111111
| 75
| 0.658929
|
from django.urls import path, include
from .views import (ProfileView, ApplicantView,
loginView, sign_out, SignupView)
app_name = 'accounts'
urlpatterns = [
path('profile/edit/', ProfileView.as_view(), name='edit_profile'),
path('profile/<option>/', ProfileView.as_view(), name='profile'),
path('applicant/<int:pk>/', ApplicantView.as_view(), name='applicant'),
path('login/', loginView.as_view(), name='login'),
path('logout/', sign_out, name='logout'),
path('signup/', SignupView.as_view(), name='signup'),
]
| true
| true
|
f712ffe6fe5c0a6c8759123f756a187d71de3fdd
| 8,081
|
py
|
Python
|
homeassistant/components/miflora/sensor.py
|
switschel/core
|
0ecca246bdc3028c30bf8ccbf2b4c7f2a8b3f9aa
|
[
"Apache-2.0"
] | 2
|
2021-01-29T02:52:01.000Z
|
2021-05-15T04:23:18.000Z
|
homeassistant/components/miflora/sensor.py
|
switschel/core
|
0ecca246bdc3028c30bf8ccbf2b4c7f2a8b3f9aa
|
[
"Apache-2.0"
] | 68
|
2020-07-23T07:13:53.000Z
|
2022-03-31T06:01:48.000Z
|
homeassistant/components/miflora/sensor.py
|
switschel/core
|
0ecca246bdc3028c30bf8ccbf2b4c7f2a8b3f9aa
|
[
"Apache-2.0"
] | 7
|
2021-03-20T12:34:01.000Z
|
2021-12-02T10:13:52.000Z
|
"""Support for Xiaomi Mi Flora BLE plant sensor."""
from datetime import timedelta
import logging
import btlewrap
from btlewrap import BluetoothBackendException
from miflora import miflora_poller
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import (
CONDUCTIVITY,
CONF_FORCE_UPDATE,
CONF_MAC,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_SCAN_INTERVAL,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
EVENT_HOMEASSISTANT_START,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.util.temperature import celsius_to_fahrenheit
try:
import bluepy.btle # noqa: F401 pylint: disable=unused-import
BACKEND = btlewrap.BluepyBackend
except ImportError:
BACKEND = btlewrap.GatttoolBackend
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = "adapter"
CONF_MEDIAN = "median"
CONF_GO_UNAVAILABLE_TIMEOUT = "go_unavailable_timeout"
DEFAULT_ADAPTER = "hci0"
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = "Mi Flora"
DEFAULT_GO_UNAVAILABLE_TIMEOUT = timedelta(seconds=7200)
SCAN_INTERVAL = timedelta(seconds=1200)
ATTR_LAST_SUCCESSFUL_UPDATE = "last_successful_update"
# Sensor types are defined like: Name, units, icon, device_class
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE],
"light": ["Light intensity", LIGHT_LUX, None, DEVICE_CLASS_ILLUMINANCE],
"moisture": ["Moisture", PERCENTAGE, "mdi:water-percent", None],
"conductivity": ["Conductivity", CONDUCTIVITY, "mdi:flash-circle", None],
"battery": ["Battery", PERCENTAGE, None, DEVICE_CLASS_BATTERY],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
vol.Optional(
CONF_GO_UNAVAILABLE_TIMEOUT, default=DEFAULT_GO_UNAVAILABLE_TIMEOUT
): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the MiFlora sensor."""
backend = BACKEND
_LOGGER.debug("Miflora is using %s backend", backend.__name__)
cache = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL).total_seconds()
poller = miflora_poller.MiFloraPoller(
config.get(CONF_MAC),
cache_timeout=cache,
adapter=config.get(CONF_ADAPTER),
backend=backend,
)
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
go_unavailable_timeout = config.get(CONF_GO_UNAVAILABLE_TIMEOUT)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
name = SENSOR_TYPES[parameter][0]
unit = (
hass.config.units.temperature_unit
if parameter == "temperature"
else SENSOR_TYPES[parameter][1]
)
icon = SENSOR_TYPES[parameter][2]
device_class = SENSOR_TYPES[parameter][3]
prefix = config.get(CONF_NAME)
if prefix:
name = f"{prefix} {name}"
devs.append(
MiFloraSensor(
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
)
)
async_add_entities(devs)
class MiFloraSensor(SensorEntity):
"""Implementing the MiFlora sensor."""
def __init__(
self,
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
):
"""Initialize the sensor."""
self.poller = poller
self.parameter = parameter
self._unit = unit
self._icon = icon
self._name = name
self._state = None
self._device_class = device_class
self.data = []
self._force_update = force_update
self.go_unavailable_timeout = go_unavailable_timeout
self.last_successful_update = dt_util.utc_from_timestamp(0)
# Median is used to filter out outliers. median of 3 will filter
# single outliers, while median of 5 will filter double outliers
# Use median_count = 1 if no filtering is required.
self.median_count = median
async def async_added_to_hass(self):
"""Set initial state."""
@callback
def on_startup(_):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, on_startup)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if did update since 2h."""
return self.last_successful_update > (
dt_util.utcnow() - self.go_unavailable_timeout
)
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return {ATTR_LAST_SUCCESSFUL_UPDATE: self.last_successful_update}
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state_class(self):
"""Return the state class of this entity."""
return STATE_CLASS_MEASUREMENT
@property
def native_unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""
Update current conditions.
This uses a rolling median over 3 values to filter out outliers.
"""
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except (OSError, BluetoothBackendException) as err:
_LOGGER.info("Polling error %s: %s", type(err).__name__, err)
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
if self._unit == TEMP_FAHRENHEIT:
data = celsius_to_fahrenheit(data)
self.data.append(data)
self.last_successful_update = dt_util.utcnow()
else:
_LOGGER.info("Did not receive any data from Mi Flora sensor %s", self.name)
# Remove old data from median list or set sensor value to None
# if no data is available anymore
if self.data:
self.data = self.data[1:]
else:
self._state = None
return
_LOGGER.debug("Data collected: %s", self.data)
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
elif self._state is None:
_LOGGER.debug("Set initial state")
self._state = self.data[0]
else:
_LOGGER.debug("Not yet enough data for median calculation")
| 30.379699
| 87
| 0.643361
|
from datetime import timedelta
import logging
import btlewrap
from btlewrap import BluetoothBackendException
from miflora import miflora_poller
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import (
CONDUCTIVITY,
CONF_FORCE_UPDATE,
CONF_MAC,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_SCAN_INTERVAL,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
EVENT_HOMEASSISTANT_START,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.util.temperature import celsius_to_fahrenheit
try:
import bluepy.btle
BACKEND = btlewrap.BluepyBackend
except ImportError:
BACKEND = btlewrap.GatttoolBackend
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = "adapter"
CONF_MEDIAN = "median"
CONF_GO_UNAVAILABLE_TIMEOUT = "go_unavailable_timeout"
DEFAULT_ADAPTER = "hci0"
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = "Mi Flora"
DEFAULT_GO_UNAVAILABLE_TIMEOUT = timedelta(seconds=7200)
SCAN_INTERVAL = timedelta(seconds=1200)
ATTR_LAST_SUCCESSFUL_UPDATE = "last_successful_update"
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE],
"light": ["Light intensity", LIGHT_LUX, None, DEVICE_CLASS_ILLUMINANCE],
"moisture": ["Moisture", PERCENTAGE, "mdi:water-percent", None],
"conductivity": ["Conductivity", CONDUCTIVITY, "mdi:flash-circle", None],
"battery": ["Battery", PERCENTAGE, None, DEVICE_CLASS_BATTERY],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
vol.Optional(
CONF_GO_UNAVAILABLE_TIMEOUT, default=DEFAULT_GO_UNAVAILABLE_TIMEOUT
): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
backend = BACKEND
_LOGGER.debug("Miflora is using %s backend", backend.__name__)
cache = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL).total_seconds()
poller = miflora_poller.MiFloraPoller(
config.get(CONF_MAC),
cache_timeout=cache,
adapter=config.get(CONF_ADAPTER),
backend=backend,
)
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
go_unavailable_timeout = config.get(CONF_GO_UNAVAILABLE_TIMEOUT)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
name = SENSOR_TYPES[parameter][0]
unit = (
hass.config.units.temperature_unit
if parameter == "temperature"
else SENSOR_TYPES[parameter][1]
)
icon = SENSOR_TYPES[parameter][2]
device_class = SENSOR_TYPES[parameter][3]
prefix = config.get(CONF_NAME)
if prefix:
name = f"{prefix} {name}"
devs.append(
MiFloraSensor(
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
)
)
async_add_entities(devs)
class MiFloraSensor(SensorEntity):
def __init__(
self,
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
):
self.poller = poller
self.parameter = parameter
self._unit = unit
self._icon = icon
self._name = name
self._state = None
self._device_class = device_class
self.data = []
self._force_update = force_update
self.go_unavailable_timeout = go_unavailable_timeout
self.last_successful_update = dt_util.utc_from_timestamp(0)
self.median_count = median
async def async_added_to_hass(self):
@callback
def on_startup(_):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, on_startup)
@property
def name(self):
return self._name
@property
def native_value(self):
return self._state
@property
def available(self):
return self.last_successful_update > (
dt_util.utcnow() - self.go_unavailable_timeout
)
@property
def extra_state_attributes(self):
return {ATTR_LAST_SUCCESSFUL_UPDATE: self.last_successful_update}
@property
def device_class(self):
return self._device_class
@property
def state_class(self):
return STATE_CLASS_MEASUREMENT
@property
def native_unit_of_measurement(self):
return self._unit
@property
def icon(self):
return self._icon
@property
def force_update(self):
return self._force_update
def update(self):
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except (OSError, BluetoothBackendException) as err:
_LOGGER.info("Polling error %s: %s", type(err).__name__, err)
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
if self._unit == TEMP_FAHRENHEIT:
data = celsius_to_fahrenheit(data)
self.data.append(data)
self.last_successful_update = dt_util.utcnow()
else:
_LOGGER.info("Did not receive any data from Mi Flora sensor %s", self.name)
if self.data:
self.data = self.data[1:]
else:
self._state = None
return
_LOGGER.debug("Data collected: %s", self.data)
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
elif self._state is None:
_LOGGER.debug("Set initial state")
self._state = self.data[0]
else:
_LOGGER.debug("Not yet enough data for median calculation")
| true
| true
|
f712ffebf55ea52b9d968763e92a05b11a563ee3
| 12,387
|
py
|
Python
|
qa/rpc-tests/util.py
|
VZUH-dev/VZUH
|
5c3fcdb7cd439b2537217d5bc797b459a1518270
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/util.py
|
VZUH-dev/VZUH
|
5c3fcdb7cd439b2537217d5bc797b459a1518270
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/util.py
|
VZUH-dev/VZUH
|
5c3fcdb7cd439b2537217d5bc797b459a1518270
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2019 The Vzuh developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "vzuh.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
vzuhd and vzuh-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run vzuhd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "vzuhd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "vzuh-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vzuh.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a vzuhd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "vzuhd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "vzuh-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple vzuhds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| 35.800578
| 104
| 0.647049
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "vzuh.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "vzuhd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "vzuh-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
sync_blocks(rpcs)
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i)
def initialize_chain_clean(test_dir, num_nodes):
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['):
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "vzuhd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "vzuh-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
change_address = from_node.getnewaddress()
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| true
| true
|
f7130035aa9cbd6d6b04ef2f46267242342d95e7
| 1,619
|
py
|
Python
|
examples/adspygoogle/dfp/v201306/get_all_orders.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | null | null | null |
examples/adspygoogle/dfp/v201306/get_all_orders.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | null | null | null |
examples/adspygoogle/dfp/v201306/get_all_orders.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | 2
|
2020-04-02T19:00:31.000Z
|
2020-08-06T03:28:38.000Z
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all orders. To create orders, run create_orders.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201306')
# Get orders by statement.
orders = DfpUtils.GetAllEntitiesByStatementWithService(order_service)
# Display results.
for order in orders:
print ('Order with id \'%s\', name \'%s\', and advertiser id \'%s\' was '
'found.' % (order['id'], order['name'], order['advertiserId']))
print
print 'Number of results found: %s' % len(orders)
| 33.729167
| 80
| 0.723286
|
"""This code example gets all orders. To create orders, run create_orders.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
order_service = client.GetService('OrderService', version='v201306')
orders = DfpUtils.GetAllEntitiesByStatementWithService(order_service)
for order in orders:
print ('Order with id \'%s\', name \'%s\', and advertiser id \'%s\' was '
'found.' % (order['id'], order['name'], order['advertiserId']))
print
print 'Number of results found: %s' % len(orders)
| false
| true
|
f713009dc6d46b1dfeef756a592f007aa58af451
| 514
|
py
|
Python
|
astroML/utils/exceptions.py
|
arjunsavel/astroML
|
361cadc56360ca35c760405a341e35ecab6dd585
|
[
"BSD-2-Clause"
] | 735
|
2015-01-07T23:55:25.000Z
|
2022-03-31T15:20:25.000Z
|
astroML/utils/exceptions.py
|
awesomemachinelearning/astroML
|
d378ca41565d1aa39997191d13d46d09d104ff1d
|
[
"BSD-2-Clause"
] | 168
|
2015-01-06T21:02:41.000Z
|
2022-03-29T03:15:29.000Z
|
astroML/utils/exceptions.py
|
DinoBektesevic/astroML
|
b4e699bf45a65e233b40d60323c05eafa1d4955e
|
[
"BSD-2-Clause"
] | 278
|
2015-01-26T00:29:38.000Z
|
2022-02-25T21:17:50.000Z
|
"""
This module contains errors/exceptions and warnings for astroML.
"""
from astropy.utils.exceptions import AstropyWarning
class AstroMLWarning(AstropyWarning):
"""
A base warning class from which all AstroML warnings should inherit.
This class is subclassed from AstropyWarnings, so warnings inherited by
this class is handled by the Astropy logger.
"""
class AstroMLDeprecationWarning(AstroMLWarning):
"""
A warning class to indicate a deprecated feature in astroML.
"""
| 23.363636
| 75
| 0.743191
|
from astropy.utils.exceptions import AstropyWarning
class AstroMLWarning(AstropyWarning):
class AstroMLDeprecationWarning(AstroMLWarning):
| true
| true
|
f71300d3ab93cd67015041b58c76c4dce981df47
| 912
|
py
|
Python
|
mysql/tests/common.py
|
Nevon/integrations-core
|
8b8284e8ca6c7188eea8548a6c3b809831c885cb
|
[
"BSD-3-Clause"
] | null | null | null |
mysql/tests/common.py
|
Nevon/integrations-core
|
8b8284e8ca6c7188eea8548a6c3b809831c885cb
|
[
"BSD-3-Clause"
] | null | null | null |
mysql/tests/common.py
|
Nevon/integrations-core
|
8b8284e8ca6c7188eea8548a6c3b809831c885cb
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from sys import maxsize
import pytest
from pkg_resources import parse_version
from datadog_checks.dev import get_docker_hostname
HERE = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.dirname(os.path.dirname(HERE))
TESTS_HELPER_DIR = os.path.join(ROOT, 'datadog_checks_tests_helper')
MYSQL_VERSION_IS_LATEST = os.getenv('MYSQL_VERSION', '').endswith('latest')
if MYSQL_VERSION_IS_LATEST is False:
MYSQL_VERSION_PARSED = parse_version(os.getenv('MYSQL_VERSION', ''))
else:
MYSQL_VERSION_PARSED = parse_version(str(maxsize))
CHECK_NAME = 'mysql'
HOST = get_docker_hostname()
PORT = 13306
SLAVE_PORT = 13307
USER = 'dog'
PASS = 'dog'
requires_static_version = pytest.mark.skipif(
MYSQL_VERSION_IS_LATEST, reason='Version `latest` is ever-changing, skipping'
)
| 26.823529
| 81
| 0.774123
|
import os
from sys import maxsize
import pytest
from pkg_resources import parse_version
from datadog_checks.dev import get_docker_hostname
HERE = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.dirname(os.path.dirname(HERE))
TESTS_HELPER_DIR = os.path.join(ROOT, 'datadog_checks_tests_helper')
MYSQL_VERSION_IS_LATEST = os.getenv('MYSQL_VERSION', '').endswith('latest')
if MYSQL_VERSION_IS_LATEST is False:
MYSQL_VERSION_PARSED = parse_version(os.getenv('MYSQL_VERSION', ''))
else:
MYSQL_VERSION_PARSED = parse_version(str(maxsize))
CHECK_NAME = 'mysql'
HOST = get_docker_hostname()
PORT = 13306
SLAVE_PORT = 13307
USER = 'dog'
PASS = 'dog'
requires_static_version = pytest.mark.skipif(
MYSQL_VERSION_IS_LATEST, reason='Version `latest` is ever-changing, skipping'
)
| true
| true
|
f71301a2158041bdeea3e6ee12a28171ccdfbfe0
| 10,841
|
py
|
Python
|
deepforest/preprocess.py
|
henrykironde/DeepForest
|
9df98ea30debc8a1dc98edfa45dada063b109e6e
|
[
"MIT"
] | 249
|
2018-04-03T18:44:51.000Z
|
2022-03-31T18:01:52.000Z
|
deepforest/preprocess.py
|
henrykironde/DeepForest
|
9df98ea30debc8a1dc98edfa45dada063b109e6e
|
[
"MIT"
] | 280
|
2018-05-23T14:33:39.000Z
|
2022-03-31T21:29:38.000Z
|
deepforest/preprocess.py
|
henrykironde/DeepForest
|
9df98ea30debc8a1dc98edfa45dada063b109e6e
|
[
"MIT"
] | 112
|
2018-05-30T14:52:56.000Z
|
2022-03-25T10:10:58.000Z
|
# Deepforest Preprocessing model
"""The preprocessing module is used to reshape data into format suitable for
training or prediction.
For example cutting large tiles into smaller images.
"""
import os
import numpy as np
import pandas as pd
import slidingwindow
from PIL import Image
import torch
import warnings
import rasterio
def preprocess_image(image, device):
"""Preprocess a single RGB numpy array as a prediction from channels last, to channels first"""
image = torch.tensor(image, device=device).permute(2, 0, 1).unsqueeze(0)
image = image / 255
return image
def image_name_from_path(image_path):
"""Convert path to image name for use in indexing."""
image_name = os.path.basename(image_path)
image_name = os.path.splitext(image_name)[0]
return image_name
def compute_windows(numpy_image, patch_size, patch_overlap):
"""Create a sliding window object from a raster tile.
Args:
numpy_image (array): Raster object as numpy array to cut into crops
Returns:
windows (list): a sliding windows object
"""
if patch_overlap > 1:
raise ValueError("Patch overlap {} must be between 0 - 1".format(patch_overlap))
# Generate overlapping sliding windows
windows = slidingwindow.generate(numpy_image,
slidingwindow.DimOrder.HeightWidthChannel,
patch_size, patch_overlap)
return (windows)
def select_annotations(annotations, windows, index, allow_empty=False):
"""Select annotations that overlap with selected image crop.
Args:
image_name (str): Name of the image in the annotations file to lookup.
annotations_file: path to annotations file in
the format -> image_path, xmin, ymin, xmax, ymax, label
windows: A sliding window object (see compute_windows)
index: The index in the windows object to use a crop bounds
allow_empty (bool): If True, allow window crops
that have no annotations to be included
Returns:
selected_annotations: a pandas dataframe of annotations
"""
# Window coordinates - with respect to tile
window_xmin, window_ymin, w, h = windows[index].getRect()
window_xmax = window_xmin + w
window_ymax = window_ymin + h
# buffer coordinates a bit to grab boxes that might start just against
# the image edge. Don't allow boxes that start and end after the offset
offset = 40
selected_annotations = annotations[(annotations.xmin > (window_xmin - offset)) &
(annotations.xmin < (window_xmax)) &
(annotations.xmax >
(window_xmin)) & (annotations.ymin >
(window_ymin - offset)) &
(annotations.xmax <
(window_xmax + offset)) & (annotations.ymin <
(window_ymax)) &
(annotations.ymax >
(window_ymin)) & (annotations.ymax <
(window_ymax + offset))]
# change the image name
image_name = os.path.splitext("{}".format(annotations.image_path.unique()[0]))[0]
image_basename = os.path.splitext(image_name)[0]
selected_annotations.image_path = "{}_{}.png".format(image_basename, index)
# If no matching annotations, return a line with the image name, but no
# records
if selected_annotations.empty:
if allow_empty:
selected_annotations = pd.DataFrame(
["{}_{}.png".format(image_basename, index)], columns=["image_path"])
selected_annotations["xmin"] = 0
selected_annotations["ymin"] = 0
selected_annotations["xmax"] = 0
selected_annotations["ymax"] = 0
#Dummy label
selected_annotations["label"] = annotations.label.unique()[0]
else:
return None
else:
# update coordinates with respect to origin
selected_annotations.xmax = (selected_annotations.xmin - window_xmin) + (
selected_annotations.xmax - selected_annotations.xmin)
selected_annotations.xmin = (selected_annotations.xmin - window_xmin)
selected_annotations.ymax = (selected_annotations.ymin - window_ymin) + (
selected_annotations.ymax - selected_annotations.ymin)
selected_annotations.ymin = (selected_annotations.ymin - window_ymin)
# cut off any annotations over the border.
selected_annotations.loc[selected_annotations.xmin < 0, "xmin"] = 0
selected_annotations.loc[selected_annotations.xmax > w, "xmax"] = w
selected_annotations.loc[selected_annotations.ymin < 0, "ymin"] = 0
selected_annotations.loc[selected_annotations.ymax > h, "ymax"] = h
return selected_annotations
def save_crop(base_dir, image_name, index, crop):
"""Save window crop as image file to be read by PIL.
Filename should match the image_name + window index
"""
# create dir if needed
if not os.path.exists(base_dir):
os.makedirs(base_dir)
im = Image.fromarray(crop)
image_basename = os.path.splitext(image_name)[0]
filename = "{}/{}_{}.png".format(base_dir, image_basename, index)
im.save(filename)
return filename
def split_raster(annotations_file,
path_to_raster=None,
numpy_image=None,
base_dir=".",
patch_size=400,
patch_overlap=0.05,
allow_empty=False,
image_name = None):
"""Divide a large tile into smaller arrays. Each crop will be saved to
file.
Args:
numpy_image: a numpy object to be used as a raster, usually opened from rasterio.open.read()
path_to_raster: (str): Path to a tile that can be read by rasterio on disk
annotations_file (str): Path to annotations file (with column names)
data in the format -> image_path, xmin, ymin, xmax, ymax, label
base_dir (str): Where to save the annotations and image
crops relative to current working dir
patch_size (int): Maximum dimensions of square window
patch_overlap (float): Percent of overlap among windows 0->1
allow_empty: If True, include images with no annotations
to be included in the dataset
image_name (str): If numpy_image arg is used, what name to give the raster?
Returns:
A pandas dataframe with annotations file for training.
"""
# Load raster as image
# Load raster as image
if (numpy_image is None) & (path_to_raster is None):
raise IOError("supply a raster either as a path_to_raster or if ready from existing in memory numpy object, as numpy_image=")
if path_to_raster:
numpy_image = rasterio.open(path_to_raster).read()
numpy_image = np.moveaxis(numpy_image,0,2)
else:
if image_name is None:
raise(IOError("If passing an numpy_image, please also specify a image_name to match the column in the annotation.csv file"))
# Check that its 3 band
bands = numpy_image.shape[2]
if not bands == 3:
warnings.warn("Input rasterio had non-3 band shape of {}, ignoring alpha channel".format(numpy_image.shape))
try:
numpy_image = numpy_image[:,:,:3].astype("uint8")
except:
raise IOError("Input file {} has {} bands. DeepForest only accepts 3 band RGB "
"rasters in the order (height, width, channels). Selecting the first three bands failed, please reshape manually."
"If the image was cropped and saved as a .jpg, "
"please ensure that no alpha channel was used.".format(
path_to_raster, bands))
# Check that patch size is greater than image size
height = numpy_image.shape[0]
width = numpy_image.shape[1]
if any(np.array([height, width]) < patch_size):
raise ValueError("Patch size of {} is larger than the image dimensions {}".format(
patch_size, [height, width]))
# Compute sliding window index
windows = compute_windows(numpy_image, patch_size, patch_overlap)
# Get image name for indexing
if image_name is None:
image_name = os.path.basename(path_to_raster)
# Load annotations file and coerce dtype
annotations = pd.read_csv(annotations_file)
# open annotations file
image_annotations = annotations[annotations.image_path == image_name]
# Sanity checks
if image_annotations.empty:
raise ValueError(
"No image names match between the file:{} and the image_path: {}. "
"Reminder that image paths should be the relative "
"path (e.g. 'image_name.tif'), not the full path "
"(e.g. path/to/dir/image_name.tif)".format(annotations_file, image_name))
if not all([
x in annotations.columns
for x in ["image_path", "xmin", "ymin", "xmax", "ymax", "label"]
]):
raise ValueError("Annotations file has {} columns, should have "
"format image_path, xmin, ymin, xmax, ymax, label".format(
annotations.shape[1]))
annotations_files = []
for index, window in enumerate(windows):
# Crop image
crop = numpy_image[windows[index].indices()]
#skip if empty crop
if crop.size == 0:
continue
# Find annotations, image_name is the basename of the path
crop_annotations = select_annotations(image_annotations, windows, index,
allow_empty)
# If empty images not allowed, select annotations returns None
if crop_annotations is not None:
# save annotations
annotations_files.append(crop_annotations)
# save image crop
save_crop(base_dir, image_name, index, crop)
if len(annotations_files) == 0:
raise ValueError(
"Input file has no overlapping annotations and allow_empty is {}".format(
allow_empty))
annotations_files = pd.concat(annotations_files)
# Checkpoint csv files, useful for parallelization
# Use filename of the raster path to save the annotations
image_basename = os.path.splitext(image_name)[0]
file_path = image_basename + ".csv"
file_path = os.path.join(base_dir, file_path)
annotations_files.to_csv(file_path, index=False, header=True)
return annotations_files
| 40.301115
| 140
| 0.628079
|
import os
import numpy as np
import pandas as pd
import slidingwindow
from PIL import Image
import torch
import warnings
import rasterio
def preprocess_image(image, device):
image = torch.tensor(image, device=device).permute(2, 0, 1).unsqueeze(0)
image = image / 255
return image
def image_name_from_path(image_path):
image_name = os.path.basename(image_path)
image_name = os.path.splitext(image_name)[0]
return image_name
def compute_windows(numpy_image, patch_size, patch_overlap):
if patch_overlap > 1:
raise ValueError("Patch overlap {} must be between 0 - 1".format(patch_overlap))
windows = slidingwindow.generate(numpy_image,
slidingwindow.DimOrder.HeightWidthChannel,
patch_size, patch_overlap)
return (windows)
def select_annotations(annotations, windows, index, allow_empty=False):
window_xmin, window_ymin, w, h = windows[index].getRect()
window_xmax = window_xmin + w
window_ymax = window_ymin + h
offset = 40
selected_annotations = annotations[(annotations.xmin > (window_xmin - offset)) &
(annotations.xmin < (window_xmax)) &
(annotations.xmax >
(window_xmin)) & (annotations.ymin >
(window_ymin - offset)) &
(annotations.xmax <
(window_xmax + offset)) & (annotations.ymin <
(window_ymax)) &
(annotations.ymax >
(window_ymin)) & (annotations.ymax <
(window_ymax + offset))]
# change the image name
image_name = os.path.splitext("{}".format(annotations.image_path.unique()[0]))[0]
image_basename = os.path.splitext(image_name)[0]
selected_annotations.image_path = "{}_{}.png".format(image_basename, index)
# If no matching annotations, return a line with the image name, but no
# records
if selected_annotations.empty:
if allow_empty:
selected_annotations = pd.DataFrame(
["{}_{}.png".format(image_basename, index)], columns=["image_path"])
selected_annotations["xmin"] = 0
selected_annotations["ymin"] = 0
selected_annotations["xmax"] = 0
selected_annotations["ymax"] = 0
#Dummy label
selected_annotations["label"] = annotations.label.unique()[0]
else:
return None
else:
# update coordinates with respect to origin
selected_annotations.xmax = (selected_annotations.xmin - window_xmin) + (
selected_annotations.xmax - selected_annotations.xmin)
selected_annotations.xmin = (selected_annotations.xmin - window_xmin)
selected_annotations.ymax = (selected_annotations.ymin - window_ymin) + (
selected_annotations.ymax - selected_annotations.ymin)
selected_annotations.ymin = (selected_annotations.ymin - window_ymin)
# cut off any annotations over the border.
selected_annotations.loc[selected_annotations.xmin < 0, "xmin"] = 0
selected_annotations.loc[selected_annotations.xmax > w, "xmax"] = w
selected_annotations.loc[selected_annotations.ymin < 0, "ymin"] = 0
selected_annotations.loc[selected_annotations.ymax > h, "ymax"] = h
return selected_annotations
def save_crop(base_dir, image_name, index, crop):
# create dir if needed
if not os.path.exists(base_dir):
os.makedirs(base_dir)
im = Image.fromarray(crop)
image_basename = os.path.splitext(image_name)[0]
filename = "{}/{}_{}.png".format(base_dir, image_basename, index)
im.save(filename)
return filename
def split_raster(annotations_file,
path_to_raster=None,
numpy_image=None,
base_dir=".",
patch_size=400,
patch_overlap=0.05,
allow_empty=False,
image_name = None):
# Load raster as image
# Load raster as image
if (numpy_image is None) & (path_to_raster is None):
raise IOError("supply a raster either as a path_to_raster or if ready from existing in memory numpy object, as numpy_image=")
if path_to_raster:
numpy_image = rasterio.open(path_to_raster).read()
numpy_image = np.moveaxis(numpy_image,0,2)
else:
if image_name is None:
raise(IOError("If passing an numpy_image, please also specify a image_name to match the column in the annotation.csv file"))
# Check that its 3 band
bands = numpy_image.shape[2]
if not bands == 3:
warnings.warn("Input rasterio had non-3 band shape of {}, ignoring alpha channel".format(numpy_image.shape))
try:
numpy_image = numpy_image[:,:,:3].astype("uint8")
except:
raise IOError("Input file {} has {} bands. DeepForest only accepts 3 band RGB "
"rasters in the order (height, width, channels). Selecting the first three bands failed, please reshape manually."
"If the image was cropped and saved as a .jpg, "
"please ensure that no alpha channel was used.".format(
path_to_raster, bands))
# Check that patch size is greater than image size
height = numpy_image.shape[0]
width = numpy_image.shape[1]
if any(np.array([height, width]) < patch_size):
raise ValueError("Patch size of {} is larger than the image dimensions {}".format(
patch_size, [height, width]))
# Compute sliding window index
windows = compute_windows(numpy_image, patch_size, patch_overlap)
# Get image name for indexing
if image_name is None:
image_name = os.path.basename(path_to_raster)
# Load annotations file and coerce dtype
annotations = pd.read_csv(annotations_file)
# open annotations file
image_annotations = annotations[annotations.image_path == image_name]
# Sanity checks
if image_annotations.empty:
raise ValueError(
"No image names match between the file:{} and the image_path: {}. "
"Reminder that image paths should be the relative "
"path (e.g. 'image_name.tif'), not the full path "
"(e.g. path/to/dir/image_name.tif)".format(annotations_file, image_name))
if not all([
x in annotations.columns
for x in ["image_path", "xmin", "ymin", "xmax", "ymax", "label"]
]):
raise ValueError("Annotations file has {} columns, should have "
"format image_path, xmin, ymin, xmax, ymax, label".format(
annotations.shape[1]))
annotations_files = []
for index, window in enumerate(windows):
# Crop image
crop = numpy_image[windows[index].indices()]
#skip if empty crop
if crop.size == 0:
continue
# Find annotations, image_name is the basename of the path
crop_annotations = select_annotations(image_annotations, windows, index,
allow_empty)
# If empty images not allowed, select annotations returns None
if crop_annotations is not None:
# save annotations
annotations_files.append(crop_annotations)
# save image crop
save_crop(base_dir, image_name, index, crop)
if len(annotations_files) == 0:
raise ValueError(
"Input file has no overlapping annotations and allow_empty is {}".format(
allow_empty))
annotations_files = pd.concat(annotations_files)
# Checkpoint csv files, useful for parallelization
# Use filename of the raster path to save the annotations
image_basename = os.path.splitext(image_name)[0]
file_path = image_basename + ".csv"
file_path = os.path.join(base_dir, file_path)
annotations_files.to_csv(file_path, index=False, header=True)
return annotations_files
| true
| true
|
f71301afd0525fb53613e57af433339ec0256bce
| 250
|
py
|
Python
|
05_debugging/bug_06.py
|
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2020
|
20e08c20995eab567063b1845487e84c0e690e96
|
[
"CC-BY-4.0"
] | null | null | null |
05_debugging/bug_06.py
|
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2020
|
20e08c20995eab567063b1845487e84c0e690e96
|
[
"CC-BY-4.0"
] | null | null | null |
05_debugging/bug_06.py
|
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2020
|
20e08c20995eab567063b1845487e84c0e690e96
|
[
"CC-BY-4.0"
] | null | null | null |
# bug 5
# https://asu-compmethodsphysics-phy494.github.io/ASU-PHY494/2019/02/05/05_Debugging/#activity-fix-as-many-bugs-as-possible
# Create a list of values -10, -9.8, -9.6, ..., -0.2, 0, 0.2, ..., 10.
h = 0.2
x = [-10 + i*h for i in range(100)]
| 27.777778
| 123
| 0.632
|
nge(100)]
| true
| true
|
f713033e7c9b0eb233e59f1ec4f01deff530cf46
| 9,351
|
py
|
Python
|
csgo/client.py
|
ValvePython/csgo
|
ed81efa8c36122e882ffa5247be1b327dbd20850
|
[
"MIT"
] | 108
|
2016-05-16T20:05:45.000Z
|
2022-03-31T13:33:52.000Z
|
csgo/client.py
|
ValvePython/csgo
|
ed81efa8c36122e882ffa5247be1b327dbd20850
|
[
"MIT"
] | 50
|
2016-10-19T20:55:09.000Z
|
2022-02-22T18:14:21.000Z
|
csgo/client.py
|
ValvePython/csgo
|
ed81efa8c36122e882ffa5247be1b327dbd20850
|
[
"MIT"
] | 21
|
2017-04-18T20:38:27.000Z
|
2022-03-11T16:34:59.000Z
|
"""
Only the most essential features to :class:`csgo.client.CSGOClient` are found here. Every other feature is inherited from
the :mod:`csgo.features` package and it's submodules.
"""
import logging
import gevent
import google.protobuf
from steam.core.msg import GCMsgHdrProto
from steam.client.gc import GameCoordinator
from steam.enums.emsg import EMsg
from steam.utils.proto import proto_fill_from_dict
from csgo.features import FeatureBase
from csgo.enums import EGCBaseClientMsg, GCConnectionStatus, GCClientLauncherType
from csgo.msg import get_emsg_enum, find_proto
from csgo.protobufs import gcsdk_gcmessages_pb2 as pb_gc
from csgo.protobufs import cstrike15_gcmessages_pb2 as pb_gclient
class CSGOClient(GameCoordinator, FeatureBase):
"""
:param steam_client: Instance of the steam client
:type steam_client: :class:`steam.client.SteamClient`
"""
_retry_welcome_loop = None
verbose_debug = False
#: enable pretty print of messages in debug logging
app_id = 730
#: main client app id
launcher = GCClientLauncherType.DEFAULT
#: launcher type (used for access to PW) See: :class:`csgo.enums.GCClientLauncherType`
current_jobid = 0
ready = False
#: ``True`` when we have a session with GC
connection_status = GCConnectionStatus.NO_SESSION
#: See :class:`csgo.enums.GCConnectionStatus`
@property
def account_id(self):
"""
Account ID of the logged-in user in the steam client
"""
return self.steam.steam_id.id
@property
def steam_id(self):
"""
:class:`steam.steamid.SteamID` of the logged-in user in the steam client
"""
return self.steam.steam_id
def __init__(self, steam_client):
GameCoordinator.__init__(self, steam_client, self.app_id)
self._LOG = logging.getLogger(self.__class__.__name__)
FeatureBase.__init__(self)
self.steam.on('disconnected', self._handle_disconnect)
self.steam.on(EMsg.ClientPlayingSessionState, self._handle_play_sess_state)
# register GC message handles
self.on(EGCBaseClientMsg.EMsgGCClientConnectionStatus, self._handle_conn_status)
self.on(EGCBaseClientMsg.EMsgGCClientWelcome, self._handle_client_welcome)
def __repr__(self):
return "<%s(%s) %s>" % (self.__class__.__name__,
repr(self.steam),
repr(self.connection_status),
)
def _handle_play_sess_state(self, message):
if self.ready and message.playing_app != self.app_id:
self._set_connection_status(GCConnectionStatus.NO_SESSION)
def _handle_disconnect(self):
if self._retry_welcome_loop:
self._retry_welcome_loop.kill()
self._set_connection_status(GCConnectionStatus.NO_SESSION)
def _handle_client_welcome(self, message):
self._set_connection_status(GCConnectionStatus.HAVE_SESSION)
# handle CSGO Welcome
submessage = pb_gclient.CMsgCStrike15Welcome()
submessage.ParseFromString(message.game_data)
if self.verbose_debug:
self._LOG.debug("Got CStrike15Welcome:\n%s" % str(submessage))
else:
self._LOG.debug("Got CStrike15Welcome")
self.emit('csgo_welcome', submessage)
def _handle_conn_status(self, message):
self._set_connection_status(message.status)
def _process_gc_message(self, emsg, header, payload):
emsg = get_emsg_enum(emsg)
proto = find_proto(emsg)
if proto is None:
self._LOG.error("Failed to parse: %s" % repr(emsg))
return
message = proto()
message.ParseFromString(payload)
if self.verbose_debug:
self._LOG.debug("Incoming: %s\n%s\n---------\n%s" % (repr(emsg),
str(header),
str(message),
))
else:
self._LOG.debug("Incoming: %s", repr(emsg))
self.emit(emsg, message)
if header.proto.job_id_target != 18446744073709551615:
self.emit('job_%d' % header.proto.job_id_target, message)
def _set_connection_status(self, status):
prev_status = self.connection_status
self.connection_status = GCConnectionStatus(status)
if self.connection_status != prev_status:
self.emit("connection_status", self.connection_status)
if self.connection_status == GCConnectionStatus.HAVE_SESSION and not self.ready:
self.ready = True
self.emit('ready')
elif self.connection_status != GCConnectionStatus.HAVE_SESSION and self.ready:
self.ready = False
self.emit('notready')
def wait_msg(self, event, timeout=None, raises=None):
"""Wait for a message, similiar to :meth:`.wait_event`
:param event: event id
:type event: :class:`.ECsgoGCMsg` or job id
:param timeout: seconds to wait before timeout
:type timeout: :class:`int`
:param raises: On timeout when ``False`` returns :class:`None`, else raise :class:`gevent.Timeout`
:type raises: :class:`bool`
:return: returns a message or :class:`None`
:rtype: :class:`None`, or `proto message`
:raises: :class:`gevent.Timeout`
"""
resp = self.wait_event(event, timeout, raises)
if resp is not None:
return resp[0]
def send_job(self, *args, **kwargs):
"""
Send a message as a job
Exactly the same as :meth:`send`
:return: jobid event identifier
:rtype: :class:`str`
"""
jobid = self.current_jobid = ((self.current_jobid + 1) % 10000) or 1
self.remove_all_listeners('job_%d' % jobid)
self._send(*args, jobid=jobid, **kwargs)
return "job_%d" % jobid
def send(self, emsg, data={}, proto=None):
"""
Send a message
:param emsg: Enum for the message
:param data: data for the proto message
:type data: :class:`dict`
:param proto: (optional) manually specify protobuf, other it's detected based on ``emsg``
"""
self._send(emsg, data, proto)
def _send(self, emsg, data={}, proto=None, jobid=None):
if not isinstance(data, dict):
raise ValueError("data kwarg can only be a dict")
if proto is None:
proto = find_proto(emsg)
if proto is None or not issubclass(proto, google.protobuf.message.Message):
raise ValueError("Unable to find proto for emsg, or proto kwarg is invalid")
message = proto()
proto_fill_from_dict(message, data)
header = GCMsgHdrProto(emsg)
if jobid is not None:
header.proto.job_id_source = jobid
if self.verbose_debug:
str_message = ''
str_header = str(header)
str_body = str(message)
if str_header:
str_message += "-- header ---------\n%s\n" % str_header
if str_body:
str_message += "-- message --------\n%s\n" % str_body
self._LOG.debug("Outgoing: %s\n%s" % (repr(emsg), str_message))
else:
self._LOG.debug("Outgoing: %s", repr(emsg))
GameCoordinator.send(self, header, message.SerializeToString())
def _knock_on_gc(self):
n = 1
while True:
if not self.ready:
if self.launcher == GCClientLauncherType.PERFECTWORLD:
self.send(EGCBaseClientMsg.EMsgGCClientHelloPW, {
'client_launcher': self.launcher,
})
else: # GCClientLauncherType.DEFAULT
self.send(EGCBaseClientMsg.EMsgGCClientHello)
self.wait_event('ready', timeout=3 + (2**n))
n = min(n + 1, 4)
else:
self.wait_event('notready')
n = 1
gevent.sleep(1)
def launch(self):
"""
Launch CSGO and establish connection with the game coordinator
``ready`` event will fire when the session is ready.
If the session is lost ``notready`` event will fire.
Alternatively, ``connection_status`` event can be monitored for changes.
"""
if not self.steam.logged_on:
self.steam.wait_event('logged_on')
if not self._retry_welcome_loop and self.app_id not in self.steam.current_games_played:
self.steam.games_played(self.steam.current_games_played + [self.app_id])
self._retry_welcome_loop = gevent.spawn(self._knock_on_gc)
def exit(self):
"""
Close connection to CSGO's game coordinator
"""
if self._retry_welcome_loop:
self._retry_welcome_loop.kill()
if self.app_id in self.steam.current_games_played:
self.steam.current_games_played.remove(self.app_id)
self.steam.games_played(self.steam.current_games_played)
self._set_connection_status(GCConnectionStatus.NO_SESSION)
| 35.555133
| 121
| 0.61202
|
import logging
import gevent
import google.protobuf
from steam.core.msg import GCMsgHdrProto
from steam.client.gc import GameCoordinator
from steam.enums.emsg import EMsg
from steam.utils.proto import proto_fill_from_dict
from csgo.features import FeatureBase
from csgo.enums import EGCBaseClientMsg, GCConnectionStatus, GCClientLauncherType
from csgo.msg import get_emsg_enum, find_proto
from csgo.protobufs import gcsdk_gcmessages_pb2 as pb_gc
from csgo.protobufs import cstrike15_gcmessages_pb2 as pb_gclient
class CSGOClient(GameCoordinator, FeatureBase):
_retry_welcome_loop = None
verbose_debug = False
app_id = 730
launcher = GCClientLauncherType.DEFAULT
current_jobid = 0
ready = False
connection_status = GCConnectionStatus.NO_SESSION
@property
def account_id(self):
return self.steam.steam_id.id
@property
def steam_id(self):
return self.steam.steam_id
def __init__(self, steam_client):
GameCoordinator.__init__(self, steam_client, self.app_id)
self._LOG = logging.getLogger(self.__class__.__name__)
FeatureBase.__init__(self)
self.steam.on('disconnected', self._handle_disconnect)
self.steam.on(EMsg.ClientPlayingSessionState, self._handle_play_sess_state)
self.on(EGCBaseClientMsg.EMsgGCClientConnectionStatus, self._handle_conn_status)
self.on(EGCBaseClientMsg.EMsgGCClientWelcome, self._handle_client_welcome)
def __repr__(self):
return "<%s(%s) %s>" % (self.__class__.__name__,
repr(self.steam),
repr(self.connection_status),
)
def _handle_play_sess_state(self, message):
if self.ready and message.playing_app != self.app_id:
self._set_connection_status(GCConnectionStatus.NO_SESSION)
def _handle_disconnect(self):
if self._retry_welcome_loop:
self._retry_welcome_loop.kill()
self._set_connection_status(GCConnectionStatus.NO_SESSION)
def _handle_client_welcome(self, message):
self._set_connection_status(GCConnectionStatus.HAVE_SESSION)
submessage = pb_gclient.CMsgCStrike15Welcome()
submessage.ParseFromString(message.game_data)
if self.verbose_debug:
self._LOG.debug("Got CStrike15Welcome:\n%s" % str(submessage))
else:
self._LOG.debug("Got CStrike15Welcome")
self.emit('csgo_welcome', submessage)
def _handle_conn_status(self, message):
self._set_connection_status(message.status)
def _process_gc_message(self, emsg, header, payload):
emsg = get_emsg_enum(emsg)
proto = find_proto(emsg)
if proto is None:
self._LOG.error("Failed to parse: %s" % repr(emsg))
return
message = proto()
message.ParseFromString(payload)
if self.verbose_debug:
self._LOG.debug("Incoming: %s\n%s\n---------\n%s" % (repr(emsg),
str(header),
str(message),
))
else:
self._LOG.debug("Incoming: %s", repr(emsg))
self.emit(emsg, message)
if header.proto.job_id_target != 18446744073709551615:
self.emit('job_%d' % header.proto.job_id_target, message)
def _set_connection_status(self, status):
prev_status = self.connection_status
self.connection_status = GCConnectionStatus(status)
if self.connection_status != prev_status:
self.emit("connection_status", self.connection_status)
if self.connection_status == GCConnectionStatus.HAVE_SESSION and not self.ready:
self.ready = True
self.emit('ready')
elif self.connection_status != GCConnectionStatus.HAVE_SESSION and self.ready:
self.ready = False
self.emit('notready')
def wait_msg(self, event, timeout=None, raises=None):
resp = self.wait_event(event, timeout, raises)
if resp is not None:
return resp[0]
def send_job(self, *args, **kwargs):
jobid = self.current_jobid = ((self.current_jobid + 1) % 10000) or 1
self.remove_all_listeners('job_%d' % jobid)
self._send(*args, jobid=jobid, **kwargs)
return "job_%d" % jobid
def send(self, emsg, data={}, proto=None):
self._send(emsg, data, proto)
def _send(self, emsg, data={}, proto=None, jobid=None):
if not isinstance(data, dict):
raise ValueError("data kwarg can only be a dict")
if proto is None:
proto = find_proto(emsg)
if proto is None or not issubclass(proto, google.protobuf.message.Message):
raise ValueError("Unable to find proto for emsg, or proto kwarg is invalid")
message = proto()
proto_fill_from_dict(message, data)
header = GCMsgHdrProto(emsg)
if jobid is not None:
header.proto.job_id_source = jobid
if self.verbose_debug:
str_message = ''
str_header = str(header)
str_body = str(message)
if str_header:
str_message += "-- header ---------\n%s\n" % str_header
if str_body:
str_message += "-- message --------\n%s\n" % str_body
self._LOG.debug("Outgoing: %s\n%s" % (repr(emsg), str_message))
else:
self._LOG.debug("Outgoing: %s", repr(emsg))
GameCoordinator.send(self, header, message.SerializeToString())
def _knock_on_gc(self):
n = 1
while True:
if not self.ready:
if self.launcher == GCClientLauncherType.PERFECTWORLD:
self.send(EGCBaseClientMsg.EMsgGCClientHelloPW, {
'client_launcher': self.launcher,
})
else:
self.send(EGCBaseClientMsg.EMsgGCClientHello)
self.wait_event('ready', timeout=3 + (2**n))
n = min(n + 1, 4)
else:
self.wait_event('notready')
n = 1
gevent.sleep(1)
def launch(self):
if not self.steam.logged_on:
self.steam.wait_event('logged_on')
if not self._retry_welcome_loop and self.app_id not in self.steam.current_games_played:
self.steam.games_played(self.steam.current_games_played + [self.app_id])
self._retry_welcome_loop = gevent.spawn(self._knock_on_gc)
def exit(self):
if self._retry_welcome_loop:
self._retry_welcome_loop.kill()
if self.app_id in self.steam.current_games_played:
self.steam.current_games_played.remove(self.app_id)
self.steam.games_played(self.steam.current_games_played)
self._set_connection_status(GCConnectionStatus.NO_SESSION)
| true
| true
|
f7130439941982fc40699d5ed9af64d9a4f823e1
| 7,358
|
py
|
Python
|
assignment2/cs231n/optim.py
|
furkannturkmen/CS231n-2021
|
2c6618d16bfd4e02e0493e8b8a411a6509206bb4
|
[
"MIT"
] | null | null | null |
assignment2/cs231n/optim.py
|
furkannturkmen/CS231n-2021
|
2c6618d16bfd4e02e0493e8b8a411a6509206bb4
|
[
"MIT"
] | null | null | null |
assignment2/cs231n/optim.py
|
furkannturkmen/CS231n-2021
|
2c6618d16bfd4e02e0493e8b8a411a6509206bb4
|
[
"MIT"
] | null | null | null |
import numpy as np
"""
This file implements various first-order update rules that are commonly used
for training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning
rate, momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not
perform well; however the default values of the other hyperparameters should
work well for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
w -= config["learning_rate"] * dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a
moving average of the gradients.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
config.setdefault("momentum", 0.9)
v = config.get("velocity", np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
v = v * config["momentum"] - config["learning_rate"] * dw # for example -> momentum = 0.9 lr = 0.1
next_w = w + v
"""
v = config["momentum"] * v + (1 - config["momentum"]) * dw
next_w = w - config["learning_rate"] * v
"""
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
config["velocity"] = v
return next_w, config
def rmsprop(w, dw, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
config.setdefault("decay_rate", 0.99)
config.setdefault("epsilon", 1e-8)
config.setdefault("cache", np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the RMSprop update formula, storing the next value of w #
# in the next_w variable. Don't forget to update cache value stored in #
# config['cache']. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
"""
ADAGRAD
config["cache"] += dw * dw
w = w - config["learning_rate"] * dw / (np.sqrt(config["cache"]) + config["epsilon"])
"""
config["cache"] = config["decay_rate"] * config["cache"] + (1 - config["decay_rate"]) * dw * dw
next_w = w - config["learning_rate"] * dw / (np.sqrt(config["cache"]) + config["epsilon"])
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
def adam(w, dw, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-3)
config.setdefault("beta1", 0.9)
config.setdefault("beta2", 0.999)
config.setdefault("epsilon", 1e-8)
config.setdefault("m", np.zeros_like(w))
config.setdefault("v", np.zeros_like(w))
config.setdefault("t", 0)
next_w = None
###########################################################################
# TODO: Implement the Adam update formula, storing the next value of w in #
# the next_w variable. Don't forget to update the m, v, and t variables #
# stored in config. #
# #
# NOTE: In order to match the reference output, please modify t _before_ #
# using it in any calculations. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
config["t"] += 1
# Momentum
config["m"] = config["beta1"] * config["m"] + (1 - config["beta1"]) * dw
m_unbias = config["m"] / (1 - config["beta1"] ** config["t"])
# RMSProp / ADAGRAD
config["v"] = config["beta2"] * config["v"] + (1 - config["beta2"]) * dw ** 2
v_unbias = config["v"] / (1 - config["beta2"] ** config["t"])
next_w = w - config["learning_rate"] * m_unbias / (np.sqrt(v_unbias) + config["epsilon"])
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
| 39.98913
| 102
| 0.534792
|
import numpy as np
def sgd(w, dw, config=None):
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
w -= config["learning_rate"] * dw
return w, config
def sgd_momentum(w, dw, config=None):
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
config.setdefault("momentum", 0.9)
v = config.get("velocity", np.zeros_like(w))
next_w = None
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.