code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# Copyright 2022 Guillaume Belanger
# See LICENSE file for licensing details.
class EpsBearerId(int):
pass
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/fiveg_core_common_schemas/EpsBearerId.py | EpsBearerId.py |
# Copyright 2022 Guillaume Belanger
# See LICENSE file for licensing details.
class AmfSetId(str):
pass
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/fiveg_core_common_schemas/AmfSetId.py | AmfSetId.py |
# Copyright 2022 Guillaume Belanger
# See LICENSE file for licensing details.
from typing import List
from pydantic import BaseModel
from fiveg_core_common_schemas.Ecgi import Ecgi
from fiveg_core_common_schemas.GlobalRanNodeId import GlobalRanNodeId
from fiveg_core_common_schemas.Ncgi import Ncgi
from fiveg_core_common_schemas.PresenceState import PresenceState
from fiveg_core_common_schemas.Tai import Tai
class PresenceInfo(BaseModel):
praId: str = None
presenceState: PresenceState = None
trackingAreaList: List[Tai] = None
ecgiList: List[Ecgi] = None
ncgiList: List[Ncgi] = None
globalRanNodeIdList: List[GlobalRanNodeId] = None
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/fiveg_core_common_schemas/PresenceInfo.py | PresenceInfo.py |
# Copyright 2022 Guillaume Belanger
# See LICENSE file for licensing details.
class Autn(str):
pass
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/fiveg_core_common_schemas/Autn.py | Autn.py |
# Copyright 2022 Guillaume Belanger
# See LICENSE file for licensing details.
class NgRanIdentifier(str):
pass
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/fiveg_core_common_schemas/NgRanIdentifier.py | NgRanIdentifier.py |
# Copyright 2022 Guillaume Belanger
# See LICENSE file for licensing details.
class Gli(bytes):
pass
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/fiveg_core_common_schemas/Gli.py | Gli.py |
# Copyright 2022 Guillaume Belanger
# See LICENSE file for licensing details.
class NsiId(str):
pass
| 5g-core-common-schemas | /5g_core_common_schemas-1.0.9-py3-none-any.whl/fiveg_core_common_schemas/NsiId.py | NsiId.py |
# 5GASP CLI
## How to run
You can find the code inside the */5gasp-cli/src/* directory.
To list all CLI commands, run:
```
5gasp-cli --help
```
To list all parameters of a command, run:
```
5gasp-cli COMMAND --help
```
### CLI Commands
#### List all tests from a test bed
```
5gasp-cli list-testbeds
```
#### List all available tests
```
5gasp-cli list-available-tests
```
#### Generate a testing descriptor:
```
5gasp-cli create-testing-descriptor
```
This command has the following options:
* One or more NSDs (Network Service Descriptors) can be passed to infer connection point tags from, using the following command:
```
5gasp-cli create-testing-descriptor --infer-tags-from-nsd <nsd_location>
```
* The path of the generated descriptor can be passed using:
```
5gasp-cli create-testing-descriptor --output-filepath <path_to_file>
```
> **_NOTE:_** Both options can be used simultaneously | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/README.md | README.md |
# -*- coding: utf-8 -*-
# @Author: Eduardo Santos
# @Date: 2023-02-01 16:31:36
# @Last Modified by: Eduardo Santos
# @Last Modified time: 2023-05-16 16:39:14
from typing import List, Optional
from .helpers.beatiful_prints import PrintAsTable, PrintAsPanelColumns
from rich.prompt import Prompt, Confirm
from rich.text import Text
from rich.console import Console
import typer
from .CICDManagerAPIClient import apli_client as CICD_API_Client
from .DescriptorParser.parser import ConnectionPointsParser
from .TestingDescriptorGenerator.descriptor_generator import \
TestingDescriptorGenerator
from .helpers import constants as Constants
from .helpers import prompts
app = typer.Typer()
state = {"verbose": False}
def _list_testbeds(api_client, print_info=False, centered=False):
testbeds = api_client.get_all_testbeds()
# Print table with the available testbeds
if print_info:
table = PrintAsTable(
header=["ID", "Name", "Description"],
rows=[
[t["id"], t["name"], t["description"]]
for t
in testbeds
]
)
table.print(centered=centered)
return testbeds
def _list_tests(api_client, testbed_id, print_info=False):
tests = api_client.get_tests_per_testbed(testbed_id)
if print_info:
panels = PrintAsPanelColumns(
panels=[t.to_panel() for t in tests]
)
panels.print()
return tests
@app.command()
def create_testing_descriptor(
output_filepath: str = typer.Option(
default="testing-descriptor.yaml",
help="Output filepath"
),
infer_tags_from_nsd: Optional[List[str]] = typer.Option(
default=None
)
):
console = Console()
text = Text()
# 1. Check if the developer wants to infer tags from an NSD
if infer_tags_from_nsd:
# Information Prompt
prompts.connection_points_information_prompt()
# Parse connection points information
tags_parser = ConnectionPointsParser(infer_tags_from_nsd)
existing_connect_points = tags_parser.connection_points
print("\nThe following NSDs can be used for inferring connection " +
"points:"
)
table = PrintAsTable(
header=["NSD's File Path", "NSD ID", "Inferred Connection Points"],
rows=[
[
nsd_file_path,
nsd_info["ns_id"],
"\n".join(nsd_info["connection_points"])
]
for nsd_file_path, nsd_info
in existing_connect_points.items()
]
)
table.print()
prompts.connection_point_keys(
list(existing_connect_points.values())[0]["connection_points"][0]
)
# 2. Ask the developer if he wishes to proceed
proceed = Confirm.ask(
"\nDo you wish to proceed with the Test Descriptor's creation?"
)
# Exit if the developer does not want to proceed
if not proceed:
return
# 3. Ask for the Testing Descriptor initial information
netapp_name = input("\n" + Constants.USER_PROMPTS.NETAPP_NAME.value)
ns_name = input(Constants.USER_PROMPTS.NS_NAME.value)
api_client = CICD_API_Client.CICDManagerAPIClient()
# Print table with the available testbeds
# List Testbeds
testbeds = _list_testbeds(
api_client=api_client,
print_info=True,
centered=True
)
# Prompt to choose a testbed
testbed_id = Prompt.ask(
"\nIn which testbed do you want to validate your Network " +
"Application?",
choices=[t["id"] for t in testbeds]
)
tests = _list_tests(
api_client=api_client,
testbed_id=testbed_id,
print_info=False
)
if not infer_tags_from_nsd:
text = Text("\nAs there was no NSD passed, there are no connection " +
"points to be inferred. You can enter them manually."
, style="bold")
console.print(text)
generator = TestingDescriptorGenerator(
connection_points=existing_connect_points if infer_tags_from_nsd else None,
netapp_name=netapp_name,
ns_name=ns_name,
testbed_id=testbed_id,
tests=tests,
output_filepath=output_filepath
)
generator.create_testing_descriptor()
@app.command()
def list_testbeds():
'''
List available testbeds
'''
api_client = CICD_API_Client.CICDManagerAPIClient()
# List Testbeds
testbeds = _list_testbeds(
api_client=api_client,
print_info=True
)
# Ask the user if he wishes to list the available test cases in each of
# the available testbeds
should_list_tests = Confirm.ask(
"\nDo you wish to list the available tests for one of these testbeds?",
)
# If the answer is 'yes'
if should_list_tests:
testbed_id = Prompt.ask(
"\nFor which testbed do you wish to list the available tests",
choices=[t["id"] for t in testbeds]
)
print(f"\nAvailable tests in testbed '{testbed_id}':\n")
_list_tests(
api_client=api_client,
testbed_id=testbed_id,
print_info=True
)
@app.command()
def list_available_tests():
'''
List available tests to developer
'''
prompts.tests_per_testbed_prompt()
# Print all the available testbeds
prompts.tests_testbeds_list_prompt()
ApiClient = CICD_API_Client.CICDManagerAPIClient()
# List Testbeds
testbeds = _list_testbeds(
api_client=ApiClient,
print_info=True,
centered=True
)
# Prompt to choose a testbed
testbed_id = Prompt.ask(
"\nFor which testbed do you wish to list the available tests",
choices=[t["id"] for t in testbeds]
)
# List testbed's available tests
tests = _list_tests(
api_client=ApiClient,
testbed_id=testbed_id,
print_info=False
)
while True:
panels = PrintAsTable(
header=["ID", "Test Name", "Test Description"],
rows=[
[str(i+1), tests[i].name, tests[i].description]
for i
in range(len(tests))
]
)
prompts.display_tests_for_testbed(testbed_id)
panels.print()
# Does the user wishes to see additional tests information?
prompts.do_you_wish_to_see_test_information_prompt()
test_details = Prompt.ask(
"For which test do you wish to see additional information? ",
choices=[str(i) for i in range(1, len(tests)+1)] + ["exit"]
)
if test_details == "exit":
break
panels = PrintAsPanelColumns(
panels=[tests[int(test_details)-1].to_panel(expand=True)]
)
panels.print()
@app.callback()
def main(
verbose: bool = False,
ci_cd_manager_url: str = typer.Option(
default=Constants.CI_CD_SERVICE_URL,
help="CI/CD Manager URL to override the default one."
)
):
if verbose:
print("Will write verbose output")
state["verbose"] = True
# Set the ci_cd_manager_url
Constants.CI_CD_SERVICE_URL = ci_cd_manager_url
if __name__ == "__main__":
app()
| 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/main.py | main.py |
# -*- coding: utf-8 -*-
# @Author: Eduardo Santos
# @Date: 2023-04-13 15:17:29
# @Last Modified by: Rafael Direito
# @Last Modified time: 2023-04-24 18:15:34
from enum import Enum
CI_CD_SERVICE_URL = "https://ci-cd-service.5gasp.eu/manager"
class CI_CD_SERVICE_URL_ENDPOINTS(Enum):
ALL_TESTS = "/tests/all"
ALL_TESTBEDS = "/testbeds/all"
# User Prompts
class USER_PROMPTS(Enum):
NETAPP_NAME = "Network Application's name: "
NS_NAME = "Network Service's name: "
| 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/helpers/constants.py | constants.py |
# -*- coding: utf-8 -*-
# @Author: Rafael Direito
# @Date: 2023-04-24 15:36:07
# @Last Modified by: Rafael Direito
# @Last Modified time: 2023-04-24 16:28:13
CONNECTION_POINT_TAGS = {
"ip-address": {
"description": "Gather Interface's IP Address",
"example": "10.10.10.121"
},
"mac-address": {
"description": "Gather Interface's MAC Address",
"example": "fa:16:3e:b7:48:f"
},
"vlan": {
"description": "Gather Interface's VLAN ID",
"example": 704
},
"type": {
"description": "Gather Interface's Type",
"example": "VIRTIO"
},
"name": {
"description": "Gather Interface's Host Name",
"example": "eth0"
},
"mgmt-interface": {
"description": "Gather if the Interface is a mgmt interface",
"example": True
},
} | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/helpers/connection_point_tags.py | connection_point_tags.py |
# -*- coding: utf-8 -*-
# @Author: Rafael Direito
# @Date: 2023-04-26 09:33:16
# @Last Modified by: Rafael Direito
# @Last Modified time: 2023-04-26 10:01:15
BASE_TESTING_DESCRIPTOR = {
"test_info": {
"netapp_id": None,
"network_service_id": None,
"testbed_id": None,
"description": None
},
"test_phases": {
"setup": {
"deployments": [],
"testcases": []
},
"execution": [
{
"batch_id": 1,
"scope": "predefined_tests",
"executions": [
{
"execution_id": 1,
"name": "predefined_test",
"testcase_ids": None
}
]
}
]
}
} | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/helpers/base_testing_descriptor.py | base_testing_descriptor.py |
# -*- coding: utf-8 -*-
# @Author: Rafael Direito
# @Date: 2023-04-20 10:43:22
# @Last Modified by: Rafael Direito
# @Last Modified time: 2023-04-25 18:06:12
from rich.console import Console
from rich.table import Table
from rich.columns import Columns
from rich.align import Align
class PrintAsTable:
table = None
def __init__(self, header, rows):
self.header = header
self.rows = rows
self.__process_table()
def __process_table(self):
self.table = Table(*self.header)
for row in self.rows:
self.table.add_row(*row)
def print(self, centered=False):
console = Console()
if centered:
console.print(Align.center(self.table))
return
console.print(self.table)
class PrintAsPanelColumns:
def __init__(self, panels):
self.panels = panels
def print(self):
columns = Columns(
sorted(
self.panels,
key=lambda p: len(p.renderable),
reverse=True
)
)
console = Console()
console.print(columns) | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/helpers/beatiful_prints.py | beatiful_prints.py |
# -*- coding: utf-8 -*-
# @Author: Eduardo Santos
# @Date: 2023-04-04 16:39:57
# @Last Modified by: Eduardo Santos
# @Last Modified time: 2023-05-15 22:51:18
from rich.console import Console
from rich.text import Text
from rich.panel import Panel
from rich.align import Align
from rich.console import Group
from ..helpers.connection_point_tags import CONNECTION_POINT_TAGS
from ..helpers.beatiful_prints import PrintAsTable
from rich.prompt import Prompt
def test_cases_operation():
prompt = Text()
console = Console()
prompt.append("\nWhich Operation do you want to perform?\n")
prompt.append("(add) ", style="bold")
prompt.append("Add new Test Case\n")
prompt.append("(info) ", style="bold")
prompt.append("Get more information regarding a Test\n")
prompt.append("(show) ", style="bold")
prompt.append("Show already configured Test Cases\n")
prompt.append("(edit) ", style="bold")
prompt.append("Edit Test Cases\n")
prompt.append("(finish) ", style="bold")
prompt.append("Finish the Test Cases Configuration\n")
console.print(prompt)
operation = Prompt.ask(
"Which Operation do you want to perform? ",
choices=["add", "info", "show", "edit", "finish"]
)
return operation
def tests_per_testbed_prompt():
console = Console()
group = Group(
Align.center("[b]In 5GASP, each testbed has its own specific " +
"tests.[/b]"),
Align.center(" "),
Align.center("Thus, we don't provide and overall view of the tests " +
"we have in our ecosystem, but rather a testbed-level " +
"view of the tests."),
Align.center("[b]This way, you must first choose a testbed on where " +
"yourNetApp shall be deployed, valdiated and " +
"certified.[/b]"),
Align.center("Only after choosing the testbed you may list the " +
"tests available in that facility."),
)
console.print(
Align.center(
Panel(
renderable=group,
title="5GASP's Tests",
expand=True
)
)
)
def tests_testbeds_list_prompt():
console = Console()
console.print(
Align.center(
"\n[b]Testbeds Available for Network Applications Testing:[/b]\n"
)
)
def display_tests_for_testbed(testbed):
console = Console()
console.print(
"\n[b]" +
f"The Testbed '{testbed}' provides the following tests:".title() +
"[/b]\n"
)
def do_you_wish_to_see_test_information_prompt():
console = Console()
console.print(
"\n[b]You can see additional information about each of the tests.\n" +
"If you don't want to do so, just type 'exit'.[b]"
)
def connection_points_information_prompt():
console = Console()
group = Group(
Align.center("[b]5GASP's CLI only supports inferring " +
"connection points when they refer to a VNF.[/b]"),
Align.center(" "),
Align.center("We currently do not support CNF-related connection " +
"points."),
Align.center("If you want to create a Testing Descriptor for a " +
"CNF-based Network Application, please contact us at " +
"[b]contact@5gasp.eu[/b], and we will support your " +
"though the development of your Testing Descriptor."
),
)
console.print(
Align.center(
Panel(
renderable=group,
title="Connection Points",
expand=True
)
)
)
def connection_point_keys(example_connection_point):
console = Console()
group = Group(
Align.center("[b]From the previously presented Connection Points it " +
"is possible to define several template tags that " +
"shall be rendered after the deployment of the Network " +
"Application.[/b]"),
Align.center(" "),
Align.center("For instance, if a developer wishes to perform a " +
"test that requires information on the IPs of the " +
"Network Application VNFs, the devoloper may define a " +
"template tag, which will be rendered to the IP of a " +
"certain VNF " +
"({{<ns_id>|<vnf_id>|<connection_point>|ip-address}})."),
Align.center(" "),
)
print()
console.print(
Align.center(
Panel(
renderable=group,
title="Connection Points - Template Tags",
expand=True
)
)
)
print("\nThe available template tags are the following:")
tmp_example_connection_point = example_connection_point[:-2]
header = ["Connection Point Key", "Description", "Example",
"Example Value"]
rows = []
for tag, info in CONNECTION_POINT_TAGS.items():
rows.append(
[
tag,
info["description"],
tmp_example_connection_point + "|" + tag + "}}",
str(info["example"])
]
)
print_as_table = PrintAsTable(header=header, rows=rows)
print_as_table.print()
| 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/helpers/prompts.py | prompts.py |
# -*- coding: utf-8 -*-
# @Author: Eduardo Santos
# @Date: 2023-02-18 15:26:20
# @Last Modified by: Rafael Direito
# @Last Modified time: 2023-05-18 14:53:36
import yaml
from typing import List
class ConnectionPointsParser:
"""
Injected Tags Parser Class
"""
validated_connection_points = None
_interfaces = None
def __init__(self, nsd_filepaths: List[str]):
"""
Constructor
"""
self.base_nsd_filepaths = set(nsd_filepaths)
self.validated_connection_points = {}
self._interfaces = []
self.infer_connection_points()
def infer_connection_points(self):
for filepath in self.base_nsd_filepaths:
self.parse_descriptor(filepath)
def parse_descriptor(self, nsd_filepath):
'''
Retrieves all the tags from the given descriptor
'''
try:
connection_points = []
with open(nsd_filepath, "r") as file:
descriptor = yaml.safe_load(file)
for network_service in descriptor['nsd']['nsd']:
ns_id = network_service['id']
for df in network_service['df']:
connection_points += self.infer_connection_points_from_df(
ns_id=ns_id,
df=df,
)
# save connection points
self.validated_connection_points[nsd_filepath] = {
"ns_id": ns_id,
"connection_points": connection_points
}
except Exception as e:
print("\nThe following exception occurred when trying to infer " +
f"connection points for the NSD '{nsd_filepath}': {e}.")
def infer_connection_points_from_df(self, ns_id, df):
connection_points = []
for vnf in df['vnf-profile']:
vnf_id = vnf['id']
for constituent in vnf['virtual-link-connectivity']:
for constituent_cpd in constituent["constituent-cpd-id"]:
interface_id = constituent_cpd['constituent-cpd-id']
connection_points.append(
"{{deployment_info|" + f"{ns_id}|{vnf_id}|" +
f"{interface_id}" + "}}"
)
return connection_points
@property
def connection_points(self):
'''
Get interfaces
'''
return self.validated_connection_points
| 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/DescriptorParser/parser.py | parser.py |
# -*- coding: utf-8 -*-
# @Author: Eduardo Santos
# @Date: 2023-04-03 23:41:36
# @Last Modified by: Eduardo Santos
# @Last Modified time: 2023-05-18 17:57:17
# OS
import os
from ..helpers.beatiful_prints import PrintAsTable, PrintAsPanelColumns
from ..helpers import prompts
import yaml
from rich.prompt import Prompt, FloatPrompt, IntPrompt, Confirm
from rich.text import Text
from rich.console import Console
from ..CICDManagerAPIClient.test_classes import TestCase
from ..helpers.connection_point_tags import CONNECTION_POINT_TAGS
from ..helpers.base_testing_descriptor import BASE_TESTING_DESCRIPTOR
class TestingDescriptorGenerator:
def __init__(self, netapp_name, ns_name, testbed_id, tests,
output_filepath, connection_points=None):
self.netapp_name = netapp_name
self.ns_name = ns_name
self.testbed_id = testbed_id
self.tests = tests
self.output_filepath = output_filepath
self.connection_points = connection_points
self.test_cases = []
self.tests_cases_ids_ordered_by_user = []
self.last_test_id = 1
def _show_test_info(self):
test_id = Prompt.ask(
"For which test do you wish to see additional information? ",
choices=[str(i) for i in range(1, len(self.tests)+1)]
)
panels = PrintAsPanelColumns(
panels=[self.tests[int(test_id)-1].to_panel(expand=True)]
)
panels.print()
def __test_variable_input(self, test_variable):
value = None
prompt = "Which value would you like to assign to the variable "\
f"'{test_variable.name}'?"
if test_variable.can_be_injected_by_the_nods and self.connection_points:
connection_points = []
connection_point_keys = list(CONNECTION_POINT_TAGS.keys())
for cps in self.connection_points.values():
connection_points += cps["connection_points"]
# Prepare table printing
tmp_smaller_list = connection_points \
if len(connection_points) < len(connection_point_keys) \
else connection_point_keys
diff = abs(len(connection_points) - len(connection_point_keys))
tmp_smaller_list += [" "]*diff
# Print Connection Points
panels = PrintAsTable(
header=["Connection Points", "Connection Point Keys"],
rows=[
[connection_points[i], connection_point_keys[i]]
for i
in range(len(connection_points))
]
)
panels.print()
# Ask for user's input
# If there are possible values, ask for one of them
if len(test_variable.possible_options) != 0:
value = Prompt.ask(prompt, choices=test_variable.possible_options)
elif test_variable.type == "str":
value = Prompt.ask(prompt)
elif test_variable.type == "float":
value = FloatPrompt.ask(prompt)
elif test_variable.type == "int":
value = IntPrompt.ask(prompt)
console = Console()
variable_value_text = Text(f"{test_variable.name} = {value}\n",
style="red")
console.print(variable_value_text)
return value
def _add_test(self):
console = Console()
test_id = Prompt.ask(
"Which test do you want to add to your Testing Descriptor? ",
choices=[str(i) for i in range(1, len(self.tests)+1)]
)
test_id = int(test_id) - 1
test = self.tests[test_id]
test_info = Text()
test_info.append(f"Configuring test '{test.name}'...\n", style="bold")
test_info.append("Test name: ", style="bold")
test_info.append(test.name + "\n")
test_info.append("Test Description: ", style="bold")
test_info.append(test.description + "\n")
test_info.append("\nConfiguring Test Variables...\n", style="bold")
console.print(test_info)
test_id = int(test_id) - 1
# Save Test Case Definition
test_case = TestCase(test=test, test_case_id=self.last_test_id)
for test_variable in test.test_variables:
console.print(
test_variable.to_panel(test.name)
)
if test_variable.can_be_injected_by_the_nods and self.connection_points:
text = Text("This variable can be injected by the " +
"NODS. You may rely on the inferred " +
"connection points..", style="bold")
console.print(text)
else:
text = Text("This variable can be injected by the " +
"NODS, but no NSD was passed. You can inject the" +
" values mannualy, or you can pass a descriptor" +
" to the CLI.", style="bold")
console.print(text)
value = self.__test_variable_input(test_variable)
# Save Test Case Definition
test_case.add_test_variable(
key=test_variable.name,
value=value
)
description = Prompt.ask("How would you describe this Test Case")
test_case.description = description
console.print(test_case.to_panel(show_configured=True))
self.test_cases.append(test_case)
self.last_test_id += 1
def _show_test_cases(self):
# Print Header
console = Console()
header = Text("\nYou already configured the following Test Cases:",
style="bold")
console.print(header)
# Print all configured Test Cases
panels = [tc.to_panel(expand=False) for tc in self.test_cases]
panel_columns = PrintAsPanelColumns(panels)
panel_columns.print()
def _finish_test_cases_definition(self):
console = Console()
info = Text("\nYou have finished the Test Cases Definition.\n")
info.append("You can now choose if your Test Cases should be " +
"executed in a specific order, or if the execution " +
"order is irrelevant.", style="bold")
console.print(info)
execution_order_is_required = Confirm.ask(
"\nDo you wish to execute the defined Test Cases in a specific " +
"order?"
)
if execution_order_is_required:
self._set_tests_execution_order()
else:
self.tests_cases_ids_ordered_by_user = [
tc.test_case_id
for tc
in self.test_cases
]
def _set_tests_execution_order(self):
self._show_test_cases()
# Print Header
console = Console()
header = Text("\nYou can now define the execution order of the " +
"configured Test Cases.\nTo do so, please keep " +
"choosing the next test that shall be executed, until " +
"you have chosen all Test Cases.", style="bold"
)
console.print(header)
# Initial Test Cases IDs
test_cases_ids = sorted([tc.test_case_id for tc in self.test_cases])
tests_cases_ids_ordered_by_user = []
while len(test_cases_ids) > 0:
test_case_id = Prompt.ask(
"Which is the next Test Case to execute? ",
choices=[str(i) for i in test_cases_ids]
)
test_case_id = int(test_case_id)
tests_cases_ids_ordered_by_user.append(test_case_id)
test_cases_ids.remove(test_case_id)
test_cases_ids = sorted(test_cases_ids)
# Present Test Cases Execution Order to the User
order_info = Text("\nThe Test Cases will be performed according " +
"to the following order: ", style="bold")
order_info.append(str(tests_cases_ids_ordered_by_user), style="red")
console.print(order_info)
self.tests_cases_ids_ordered_by_user = tests_cases_ids_ordered_by_user
return tests_cases_ids_ordered_by_user
def _edit_test_cases_delete(self):
test_id = Prompt.ask(
"Which Test Case do you want to delete ('back' to go " +
"back to the previous menu)?",
choices=[str(tc.test_case_id) for tc in self.test_cases] +
["back"],
)
if test_id == "back":
return
delete = Confirm.ask("Are you sure you want to delete the " +
f"Test Case with the ID {test_id}?"
)
# Delete the Test Case
if delete:
for tc in self.test_cases:
if str(tc.test_case_id) == test_id:
del self.test_cases[self.test_cases.index(tc)]
break
def _edit_test_cases_edit(self):
console = Console()
test_id = Prompt.ask(
"Which Test Case do you want to edit ('back' to go " +
"back to the previous menu)?",
choices=[str(tc.test_case_id) for tc in self.test_cases] +
["back"],
)
if test_id == "back":
return
# gather the test case
test_case = None
for tc in self.test_cases:
if str(tc.test_case_id) == test_id:
test_case = tc
break
console.print(Text("\nTest Case Information:", style="bold"))
panels = PrintAsPanelColumns(panels=[test_case.test.to_panel()])
panels.print()
console.print(Text("\nCurrent Test Case Definition:", style="bold"))
panels = PrintAsPanelColumns(
panels=[test_case.to_panel()]
)
panels.print()
for variable, value in test_case.test_variables.items():
info = Text()
info.append("\nTest Variable: ", style="bold")
info.append(variable + "\n")
info.append("Current Value: ", style="bold")
info.append(str(value) + "\n")
console.print(info)
edit = Confirm.ask("Do you want to edit this variable " +
f"({variable})?")
if edit:
# print Test Information
new_value = Prompt.ask("New Value")
test_case.add_test_variable(variable, new_value)
def _edit_test_cases(self):
# Print Header
self._show_test_cases()
show_test_cases = False
op = ""
while op != 'back':
op = Prompt.ask(
"Do you want to edit or delete a Test Case ('back' "
"to go back to the main menu)? ",
choices=["edit", "delete", "back"],
)
if op == "back":
break
elif op == "delete":
if show_test_cases:
self._show_test_cases()
self._edit_test_cases_delete()
elif op == "edit":
self._edit_test_cases_edit()
show_test_cases = True
def _test_cases_prompt(self):
panels = PrintAsTable(
header=["ID", "Test Name", "Test Description"],
rows=[
[str(i+1), self.tests[i].name, self.tests[i].description]
for i
in range(len(self.tests))
]
)
prompts.display_tests_for_testbed(self.testbed_id)
panels.print()
def _confirm_testing_descriptor_output_file(self):
console = Console()
location_ok = False
while not location_ok:
info = Text()
info.append("\nThe Testing Descriptor will be saved in the " +
"following file: ", style="bold")
info.append(self.output_filepath + "\n")
console.print(info)
change_filepath = Confirm.ask(
"Do you wish to save the Testing Descriptor in a different " +
"file?")
if not change_filepath:
location_ok = True
else:
file_path = Prompt.ask(
"Provide the file path where the Testing Descriptor " +
"should be saved ('back' to go back to the main menu)?")
if file_path == "back":
continue
elif os.path.isfile(file_path):
location_ok = True
self.output_filepath = file_path
elif os.path.isdir(file_path):
self.output_filepath = os.path.join(
file_path,
"testing-descriptor.yaml"
)
location_ok = True
else:
info = Text("\nImpossible to save the Testing Descriptor " +
"in the specified location " +
f"{file_path}! File or directory does not exist!",
style="red")
console.print(info)
#info = Text()
#info.append("\nThe Testing Descriptor will be saved in the " +
# "following file: ", style="bold")
#info.append(self.output_filepath + "\n")
#console.print(info)
return True
def _save_testing_decritptor(self):
testing_descriptor = BASE_TESTING_DESCRIPTOR
testing_descriptor["test_info"]["netapp_id"] = self.netapp_name
testing_descriptor["test_info"]["network_service_id"] = self.ns_name
testing_descriptor["test_info"]["testbed_id"] = self.testbed_id
testing_descriptor["test_info"]["description"] = "Testing "\
f"Descriptor for the {self.netapp_name} Network Application"
testcases = []
for tc in self.test_cases:
tc_dict = {
"testcase_id": tc.test_case_id,
"type": tc.test.test_type,
"scope": tc.test.test_type,
"name": tc.test.id,
"description": tc.description,
"parameters": []
}
for key, value in tc.test_variables.items():
tc_dict["parameters"].append(
{
"key": key,
"value": value
}
)
testcases.append(tc_dict)
testing_descriptor["test_phases"]["setup"]["testcases"] = testcases
testing_descriptor["test_phases"]["execution"][0]["executions"]\
[0]["testcase_ids"] = self.tests_cases_ids_ordered_by_user
with open(self.output_filepath, 'w') as output_file:
yaml.dump(
testing_descriptor,
output_file,
default_flow_style=False,
sort_keys=False
)
console = Console()
console.print(Text("\nGenerated Testing Descriptor:", style="bold"))
print(
yaml.dump(
testing_descriptor,
default_flow_style=False,
sort_keys=False
)
)
info = Text()
info.append("\nThe Testing Descriptor was saved in the " +
"following file: ", style="bold")
info.append(self.output_filepath)
console.print(info)
def _test_cases_menu(self):
while True:
# Show testcases
self._test_cases_prompt()
# Present the Menu to the developer
op = prompts.test_cases_operation()
if op == "add":
self._add_test()
if op == "show":
self._show_test_cases()
if op == "info":
self._show_test_info()
if op == "edit":
self._edit_test_cases()
if op == "finish":
self._finish_test_cases_definition()
if self._confirm_testing_descriptor_output_file():
break
def create_testing_descriptor(self):
self._test_cases_menu()
self._save_testing_decritptor()
| 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/TestingDescriptorGenerator/descriptor_generator.py | descriptor_generator.py |
# -*- coding: utf-8 -*-
# @Author: Eduardo Santos
# @Date: 2023-02-10 17:15:58
# @Last Modified by: Eduardo Santos
# @Last Modified time: 2023-03-14 17:41:45
import typer
from typer.testing import CliRunner
from main import app
from main import infer_tags
runner = CliRunner()
#def test_cli():
# '''
# Test CLI
# '''
# result = runner.invoke(app, [
# "create-tests",
# "--config-file",
# "../../resources/config.yaml",
# "--infer-tags-from-nsd",
# "../../resources/hackfest_multivdu_nsd.yaml"]
# )
#
# tags = ["{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-mgmt-ext}}",
# "{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-data-ext}}",
# "{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-mgmt-ext}}",
# "{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-data-ext}}"]
#
# #assert result.exit_code == 0
# for tag in tags:
# assert tag in result.stdout
def test_infer_tags():
output = infer_tags(["tests/resources/hackfest_multivdu_nsd.yaml"])
tags = set(["{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-mgmt-ext}}",
"{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-data-ext}}",
"{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-mgmt-ext}}",
"{{hackfest_multivdu-ns|hackfest_multivdu-vnf|vnf-data-ext}}"])
#assert result.exit_code == 0
for tag in tags:
assert tag in output | 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/tests/test_cli.py | test_cli.py |
# -*- coding: utf-8 -*-
# @Author: Rafael Direito
# @Date: 2023-04-20 13:03:58
# @Last Modified by: Rafael Direito
# @Last Modified time: 2023-04-26 23:24:28
from rich.panel import Panel
class Test:
def __init__(self, id=None, name=None, description=None, mandatory=None,
test_variables=None):
self.id = id
self.name = name
self.description = description
self.mandatory = mandatory
self.test_variables = test_variables
self.test_type = None
def load_from_dict(self, test_dict):
self.id = test_dict["id"]
self.name = test_dict["name"]
self.description = test_dict["description"]
self.mandatory = test_dict["mandatory"]
self.test_type = "predefined"
self.test_variables = []
if "test_variables" in test_dict:
for test_variable in test_dict["test_variables"]:
self.test_variables.append(
TestVariable(
name=test_variable["variable_name"],
description=test_variable["description"],
mandatory=test_variable["mandatory"],
possible_options=test_variable["possible_options"],
type=test_variable["type"],
can_be_injected_by_the_nods=test_variable[
"can_be_injected_by_the_nods"
]
)
)
def __dict__(self):
return {
"id": self.id,
"name": self.name,
"description": self.description,
"mandatory": self.mandatory,
"test_variable": self.test_variables
}
def __str__(self):
return str(self.__dict__())
def to_panel(self, expand=None, width=None):
panel_str = f"""
[b]{self.name.title()} Test[/b]
[yellow]Test ID:[/yellow] {self.id}
[yellow]Test Description:[/yellow] {self.description}
[blue][b]Test Variables: [/b]
"""
if len(self.test_variables) == 0:
panel_str += """
[blue]This test requires no parameters[/blue]
"""
for tv in self.test_variables:
panel_str += f"""
[blue]◉ {tv.name}:
\t[blue]○ Description:[/blue][white] {tv.description}
\t[blue]○ Mandatory:[/blue] {tv.mandatory}
\t[blue]○ Type:[/blue] {tv.type}
"""
if len(tv.possible_options) != 0:
panel_str += "\t[blue]○ Possible Options:[/blue] "
panel_str += str(tv.possible_options) + "\n"
if expand:
print("dddd")
return Panel(renderable=panel_str, expand=True)
if width:
return Panel(renderable=panel_str, expand=False, width=width)
if not expand and not width:
return Panel(renderable=panel_str, expand=False, width=65)
class TestVariable:
def __init__(self, name, description, mandatory, possible_options, type,
can_be_injected_by_the_nods):
self.name = name
self.description = description
self.mandatory = mandatory
self.possible_options = possible_options
self.type = type
self.can_be_injected_by_the_nods = can_be_injected_by_the_nods
def to_panel(self, test_name, expand=None, width=None):
possible_values = self.possible_options \
if len(self.possible_options) != 0 \
else "Not Applicable"
injected = "Yes" if self.can_be_injected_by_the_nods else "No"
panel_str = f"""
[blue]Test Name:[/blue] {test_name}
[b]Test Variable:[/b] {self.name}
[yellow]Test Variable Name:[/yellow] {self.name}
[yellow]Test Variable Description:[/yellow] {self.description}
[yellow]Test Variable Possible Values:[/yellow] {possible_values}
[yellow]Test Variable Type:[/yellow] {self.type}
[yellow]Can Test Variable Be Injected By The NODS:[/yellow] {injected}
"""
if expand:
return Panel(renderable=panel_str, expand=True)
if width:
return Panel(renderable=panel_str, expand=False, width=width)
if not expand and not width:
return Panel(renderable=panel_str, expand=False, width=65)
class TestCase:
def __init__(self, test, description=None, test_case_id=None):
self.test = test
self.test_variables = {}
self.description = description
self.test_case_id = test_case_id
def add_test_variable(self, key, value):
self.test_variables[key] = value
def to_panel(self, show_configured=False, expand=None, width=None):
panel_str = ""
# Show message stating that the test case was fully configured
if show_configured:
panel_str += f"""
[b]Test '{self.test.name}' has been configured![/b]
"""
# Fill in the rest of the panel with other test info
panel_str += f"""
[blue]Test Name:[/blue] {self.test.name}
[blue]Test Case ID:[/blue] {self.test_case_id}
"""
for variable, value in self.test_variables.items():
panel_str += f"""
[yellow]{variable}:[/yellow] {value}"""
if expand:
return Panel(renderable=panel_str, expand=True)
if expand is False:
return Panel(renderable=panel_str, expand=False)
if width:
return Panel(renderable=panel_str, expand=False, width=width)
if not expand and not width:
return Panel(renderable=panel_str, expand=False, width=65)
| 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/CICDManagerAPIClient/test_classes.py | test_classes.py |
# -*- coding: utf-8 -*-
# @Author: Eduardo Santos
# @Date: 2023-04-06 14:55:17
# @Last Modified by: Eduardo Santos
# @Last Modified time: 2023-05-15 17:40:28
import requests
from ..helpers import constants as Constants
from ..CICDManagerAPIClient.test_classes import Test
class CICDManagerAPIClient:
def __init__(self):
self.base_url = Constants.CI_CD_SERVICE_URL
def get_all_testbeds(self):
'''
Retrieves testbeds from the CI/CD Manager API.
Returns
-------
List of testbeds.
'''
# 1. List only the testbeds that have tests
response = self.__make_get_request(
Constants.CI_CD_SERVICE_URL +
Constants.CI_CD_SERVICE_URL_ENDPOINTS.ALL_TESTS.value
)
response_data = response.json()["data"]
testbeds_with_tests = response_data["tests"].keys()
# 2.Gather the testbeds description
response = self.__make_get_request(
Constants.CI_CD_SERVICE_URL +
Constants.CI_CD_SERVICE_URL_ENDPOINTS.ALL_TESTBEDS.value
)
response_data = response.json()["data"]
return [
testbed
for testbed
in response_data["testbeds"]
if testbed["id"] in testbeds_with_tests
]
def get_all_tests(self):
'''
Retrieves all tests from the CI/CD Manager API.
Returns
-------
List of all tests.
'''
path = Constants.ALL_TESTS_PATH
url = f"{self.base_url}/{path}"
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.HTTPError as errh:
print(f"HTTP Error: {errh}")
return None
except requests.exceptions.ConnectionError as errc:
print(f"Connection Error: {errc}")
return None
except requests.exceptions.Timeout as errt:
print(f"Timeout Error: {errt}")
return None
except requests.exceptions.RequestException as err:
print(f"Unknown Error: {err}")
return None
else:
return response.json()['data']['tests']
def get_tests_per_testbed(self, testbed: str):
'''
Retrieves all testbeds from the CI/CD Manager API.
Parameters
----------
testbed : str
Testbed
Returns
-------
List of all testbeds.
'''
response = self.__make_get_request(
endpoint=Constants.CI_CD_SERVICE_URL +
Constants.CI_CD_SERVICE_URL_ENDPOINTS.ALL_TESTS.value,
params={"testbed": testbed}
)
tests = []
for test_info in response.json()['data']['tests'][testbed].values():
t = Test()
t.load_from_dict(test_info)
tests.append(t)
return tests
def __make_get_request(self, endpoint, params=None):
try:
response = requests.get(
url=endpoint,
params=params
)
response.raise_for_status()
except requests.exceptions.HTTPError as errh:
print(f"HTTP Error: {errh}")
return None
except requests.exceptions.ConnectionError as errc:
print(f"Connection Error: {errc}")
return None
except requests.exceptions.Timeout as errt:
print(f"Timeout Error: {errt}")
return None
except requests.exceptions.RequestException as err:
print(f"Unknown Error: {err}")
return None
else:
return response
| 5gasp-cli | /5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/CICDManagerAPIClient/apli_client.py | apli_client.py |
from setuptools import setup
setup(name='5kodds_distribution',
version='0.1',
description='Gaussian distributions',
packages=['distributions'],
zip_safe=False)
| 5kodds-distribution | /5kodds_distribution-0.1.tar.gz/5kodds_distribution-0.1/setup.py | setup.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | 5kodds-distribution | /5kodds_distribution-0.1.tar.gz/5kodds_distribution-0.1/distributions/Gaussiandistribution.py | Gaussiandistribution.py |
class Distribution:
def __init__(self, mu=0, sigma=1):
""" Generic distribution class for calculating and
visualizing a probability distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
self.mean = mu
self.stdev = sigma
self.data = []
def read_data_file(self, file_name):
"""Function to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
Args:
file_name (string): name of a file to read from
Returns:
None
"""
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
| 5kodds-distribution | /5kodds_distribution-0.1.tar.gz/5kodds_distribution-0.1/distributions/Generaldistribution.py | Generaldistribution.py |
from .Gaussiandistribution import Gaussian
from .Binomialdistribution import Binomial
| 5kodds-distribution | /5kodds_distribution-0.1.tar.gz/5kodds_distribution-0.1/distributions/__init__.py | __init__.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | 5kodds-distribution | /5kodds_distribution-0.1.tar.gz/5kodds_distribution-0.1/distributions/Binomialdistribution.py | Binomialdistribution.py |
5minute
=======
Give me an instance of mine image on OpenStack. Hurry!
QuickStart
----------
To run 5minute you need to install following libs:
::
python-keystoneclient
python-cinderclient
python-heatclient
python-neutronclient
python-novaclient
python-xmltodict
python-prettytable
To install them from RPMs (Fedora), please do
``dnf -y install $( cat requirement-rpms.txt )``.
If you have installed 5minute using pip, they were installed as
dependencies. Otherwise, you have to install them manually.
Get config file:
1. Login into your OpenStack instance WebUI
2. Navigate to Access & Security -> API Access
3. Save file from "Download OpenStack RC File" to ~/.5minute/config
Get started:
Show help:
::
$ 5minute help
Upload your SSH public key:
::
$ 5minute key ~/.ssh/id_rsa.pub
Show images we can work with:
::
$ 5minute images
Boot your machine (consider adding '--name' or '--flavor' to the
command):
::
$ 5minute boot <image_name_or_id>
When the boot is finished, you should be able to ssh to your new machine
::
$ ssh root@<machine_ip_address>
You can list your current machines:
::
$ 5minute list
When you are done, kill the machine (you can do this via OpenStack webUI
as well):
::
$ 5minute delete <machine_name_or_id>
To list available OpenStack scenarios:
::
$ 5minute scenario templates
Run scenario:
::
$ 5minute scenario boot <scenario_template_name>
When finished with the scenario, you should delete it:
::
$ 5minute scenario delete <scenario_name_or_id>
| 5minute | /5minute-0.2.1.tar.gz/5minute-0.2.1/README | README |
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='5minute',
version='0.2.1',
description='A tool for quick creation and deployment of Openstack machines used for QA testing.',
long_description=long_description,
url='https://github.com/SatelliteQE/5minute',
author='Martin Korbel',
author_email='mkorbel@redhat.com',
license='GNU General Public License v2 (GPLv2)',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'Topic :: Software Development :: Quality Assurance',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
],
keywords='openstack testing deployment',
install_requires=['python-keystoneclient',
'python-cinderclient',
'python-heatclient',
'python-neutronclient',
'python-novaclient',
'xmltodict',
'prettytable'],
packages=find_packages(),
package_data={
'vminute': ['scenarios/README']
},
entry_points={
'console_scripts': [
'5minute=vminute:main_main'
]
}
)
| 5minute | /5minute-0.2.1.tar.gz/5minute-0.2.1/setup.py | setup.py |
#!/usr/bin/python
# -*- coding: utf8 -*-
import getopt
import os
import sys
import re
import termios
import fcntl
import subprocess
import urllib2
import random
import time
import math
import traceback
import urllib
from prettytable import PrettyTable
import socket
try:
from keystoneclient.v2_0 import client as keystone_client
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exceptions
from heatclient import client as heat_client
from heatclient import exc as heat_exceptions
from neutronclient.neutron import client as neutron_client
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from keystoneclient.auth.identity import v2 as keystoneIdentity
from keystoneclient import session as keystoneSession
import xmltodict
except ImportError, ie:
sys.stderr.write(ie.message+"\n")
sys.exit(1)
try:
# Python 2.7
from functools import wraps
except:
# Python 2.4
from backports.functools import wraps
CONF_DIR = '~/.5minute'
USER = os.environ["USER"]
DEBUG = False
DISABLE_CATCH = False
PROGRESS = None
# -----------------------------------------------------------
# Helpers functions
# -----------------------------------------------------------
def die(message, excode=1, exception=None):
"""
Print error message into srdErr
:param message: message
:param excode: exitcode
:param exception: exception for debugging mode
"""
global PROGRESS
if PROGRESS is not None:
progress(result="\x1b[31;01mFAIL\x1b[39;49;00m")
global DEBUG
if exception and DEBUG:
exc_type, exc_value, exc_traceback = sys.exc_info()
sys.stderr.write("\n\x1b[92;01m")
traceback.print_tb(exc_traceback)
sys.stderr.write("\x1b[39;49;00m\n")
sys.stderr.write("\n\x1b[31;01m%s\x1b[39;49;00m\n\n" % message)
sys.exit(excode)
def warning(message, answer=None):
"""
Print warning message into srdErr and may can for answer
:param message: message
:param answer: list of supported options. Default is first item.
"""
c = ""
sys.stderr.write("\n\x1b[92;01m%s " % message)
if answer:
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
while 1:
try:
c = sys.stdin.read(1)
break
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
c = (u"%s" % c).lower()
sys.stderr.write(" %s\x1b[39;49;00m\n\n" % c)
if answer:
for it in answer:
if c in it:
return c
return answer.pop(0)
def progress(title=None, result=None):
"""
Function for displaying of progress bar.
Example of using:
progress(title="Name of action")
for i in range(0, 30):
progress()
progress(result="GOOD")
"""
CHARS = ('.', '-', '=', '_')
global PROGRESS
if title:
PROGRESS = 0
sys.stdout.write("%s" % title.ljust(40, " "))
if result:
sys.stdout.write("%s\x1b[92;01m%s\x1b[39;49;00m\n" %
("\b" * (PROGRESS % 20), result.ljust(20, " ")))
PROGRESS = None
if title is None and result is None:
PROGRESS += 1
if PROGRESS % 20 == 0:
sys.stdout.write("\b" * 19)
PROGRESS += 1
sys.stdout.write(CHARS[int(math.ceil(PROGRESS / 20)) % len(CHARS)])
sys.stdout.flush()
def catch_exception(text=None, type=Exception):
""" Decorator for catch exception """
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
catch_message = text
try:
return func(*args, **kwargs)
except type as ex:
if not DISABLE_CATCH:
if catch_message is None:
catch_message = ex.message
die(catch_message, exception=ex)
else:
raise ex
return wrapper
return decorate
class disable_catch_exception:
""" Disbale decorator for catch exception. """
def __enter__(self):
global DISABLE_CATCH
DISABLE_CATCH = True
def __exit__(self, type, value, traceback):
global DISABLE_CATCH
DISABLE_CATCH = False
def get_FQDN_from_IP(ip):
# If we want to support old version of OpenStack, we have to update this function and
# solve it via serviceman
return "host-{1}-{2}-{3}.host.centralci.eng.rdu2.redhat.com".format(*ip.split("."))
# -----------------------------------------------------------
# Classes
# -----------------------------------------------------------
class BaseClass(object):
__nova = None
__keystone = None
__cinder = None
__heat = None
__token = None
__neutron = None
__first_check = False
__tmpconf = "/tmp/5minute.conf"
__profiles = "profiles/"
_scenarios = "./vminute/scenarios/"
__check_env_done = False
@catch_exception(
"The configuration file ~/.5minute/config does not exist.\n"
"Please download the OpenStack RC file from OpenStack WebUI (Access & Security > API Access "
"> Download OpenStack RC file) and save it to ~/.5minute/config.\n")
def __load_configuration(self):
if not os.path.isfile(self.__tmpconf):
subprocess.check_call("source {config_loc}/config; env | grep OS_ >> {tmpfile}"
.format(config_loc=CONF_DIR, tmpfile=self.__tmpconf), shell=True)
lines = []
with open(os.path.expanduser(self.__tmpconf), "r") as fd:
lines = fd.readlines()
rx2 = re.compile(r'^\s*([A-z_]*)="?([^"]*)"?\s*$')
for it in lines:
res = rx2.search(it)
if res:
key, value = res.groups()
os.environ[key] = value.strip()
def __checkenv(self):
if self.__check_env_done:
return
if not os.environ.get('OS_AUTH_URL') or \
not os.environ.get('OS_TENANT_NAME') or \
not os.environ.get('OS_USERNAME') or \
not os.environ.get('OS_PASSWORD'):
if not self.__first_check:
self.__load_configuration()
self.__first_check = True
self.__checkenv()
else:
die("The configuration file %s/config doesn't contain all important variables.\n" % CONF_DIR)
self.__profiles = "%s/%s" % (CONF_DIR, self.__profiles)
if not os.path.isdir(os.path.expanduser(self.__profiles)):
try:
os.makedirs(os.path.expanduser(self.__profiles))
except OSError:
die("The problem with creating of folder '%s'." % self.__profiles)
self.__scenarios = "%s/%s" % (CONF_DIR, self.__scenarios)
if not os.path.isdir(os.path.expanduser(self.__scenarios)):
try:
os.makedirs(os.path.expanduser(self.__scenarios))
except OSError:
die("The problem with creating of folder '%s'." % self.__scenarios)
self.__check_env_done = True
@catch_exception("Your SSL pub-key is not yet uploaded on the server. "
"Please use: 5minute key ~/.ssh/id_dsa.pub")
def _check_key(self):
self.nova.keypairs.get(USER)
@catch_exception("Problem with connection to OpenStack. Please, check the configuration file "
"~/.5minute/config. (maybe OS_PASSWORD is not explicite value or is not set up in env)")
def __check_connection(self):
try:
self.__nova.authenticate()
except Exception as ex:
os.remove(self.__tmpconf)
raise ex
def __get_cinder(self):
if not self.__cinder:
self.__checkenv()
self.__cinder = cinder_client.Client(1,
os.environ.get('OS_USERNAME'),
os.environ.get('OS_PASSWORD'),
os.environ.get('OS_TENANT_NAME'),
os.environ.get('OS_AUTH_URL'))
return self.__cinder
def __get_heat(self):
if not self.__heat:
self.__checkenv()
endpoint = self.__get_endpoint('orchestration')
self.__heat = heat_client.Client(1, endpoint=endpoint, token=self.token)
return self.__heat
def __get_keystone(self):
if not self.__keystone:
self.__checkenv()
self.__keystone = keystone_client.Client(username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
tenant_name=os.environ.get('OS_TENANT_NAME'),
auth_url=os.environ.get('OS_AUTH_URL'))
return self.__keystone
def __get_nova(self):
if self.__nova:
return self.__nova
self.__checkenv()
self.__nova = nova_client.Client(2,
username=os.environ.get('OS_USERNAME'),
api_key=os.environ.get('OS_PASSWORD'),
project_id=os.environ.get('OS_TENANT_NAME'),
auth_url=os.environ.get('OS_AUTH_URL'))
self.__check_connection()
return self.__nova
def __get_token(self):
if not self.__token:
self.__checkenv()
auth = keystoneIdentity.Password(username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
tenant_name=os.environ.get('OS_TENANT_NAME'),
auth_url=os.environ.get('OS_AUTH_URL'))
session = keystoneSession.Session(auth=auth)
self.__token = auth.get_token(session)
return self.__token
def __get_neutron(self):
if not self.__neutron:
self.__checkenv()
endpoint = self.__get_endpoint('network')
self.__neutron = neutron_client.Client('2.0', endpoint_url=endpoint, token=self.token)
return self.__neutron
def __get_endpoint(self, name):
endpoints = self.keystone.service_catalog.get_endpoints()
if name not in endpoints:
die("This endpoint '%s' is not known" % name)
return endpoints.get(name)[0]['publicURL']
def __getattr__(self, name):
if name == 'cinder':
return self.__get_cinder()
elif name == 'heat':
return self.__get_heat()
elif name == 'nova':
return self.__get_nova()
elif name == 'keystone':
return self.__get_keystone()
elif name == 'token':
return self.__get_token()
elif name == 'neutron':
return self.__get_neutron()
return None
@catch_exception("The problem with parsing of profile XML file. ")
def __get_scenario(self, filename):
xml = None
try:
xml = urllib2.urlopen('https://example.com/scenarios/%s' % filename).read()
except:
warning("This profile '%s' doesn't exist." % filename)
return dict()
return xmltodict.parse(xml)
def cmd(self, argv):
self.help()
def help(self):
print """
Usage: 5minute <-d|--debug> [COMMAND]
Manager for your openstack machines.
OPTIONS:
-d, --debug - enable debugging mode.
COMMANDS:
help - this help
key - upload your SSL key on the server
images - the list of accessible images
flavor - the list of flavors
list - the list of instances
delete - delete a quest
boot - create a new quest
scenario - working with scenarios
Examples:
5minute help
5minute key ~/.ssh/id_dsa.pub
5minute images
5minute images -h
5minute images --all
5minute images satellite
5minute flavor
5minute list
5minute list --all
5minute list satellite
5minute boot --help
5minute boot 5minute-RHEL6
5minute boot --name myRHEL6 5minute-RHEL6
5minute scenarios --help
"""
class KeyClass(BaseClass):
@catch_exception("The problem with uploading of public key.")
def __upload_key(self, key):
if not os.access(key, os.R_OK):
die("SSL key '%s' is not readable." % key)
with open(key) as fd:
self.nova.keypairs.create(USER, fd.read())
print "The key %s was successfully uploaded." % key
def cmd(self, argv):
if len(argv) == 0 or argv[0] in ('help', '--help', '-h'):
self.help()
else:
self.__upload_key(argv[0])
def help(self):
print """
Usage: 5minute key <SSL-PUB-KEY>
Upload your SSL key on the OpenStack server.
Examples:
5minute key ~/.ssh/id_dsa.pub
"""
class ImagesClass(BaseClass):
__filter = "5minute-"
@catch_exception("The problem getting list of images.")
def __images(self):
images = self.nova.images.list()
x = PrettyTable(["Name", "ID", "Status"])
x.align["Name"] = "l"
rx = re.compile(self.__filter, re.IGNORECASE)
for img in images:
if rx.search(img.name):
row = [img.name, img.id, img.status]
x.add_row(row)
print x.get_string(sortby="Name")
def cmd(self, argv):
if len(argv) > 0:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
elif argv[0] in ('--all', '-a'):
self.__filter = ""
else:
self.__filter = argv[0]
self.__images()
def help(self):
print """
Usage: 5minute images [PARAM]
Show the list of accessible images. By default, it shows only 5minute images.
PARAM:
-a, --all show all accessible images
<REGEXP> we can use a regular expression for the filtering of the result
Examples:
5minute images
5minute images --all
5minute images satellite
5minute images fedora
"""
class FlavorClass(BaseClass):
@catch_exception("The problem getting list of flavors.")
def __flavors(self):
flavors = self.nova.flavors.list()
x = PrettyTable(["Name", "CPU", "RAM", "HDD", "ephemeral", "swap"])
x.align["Name"] = "l"
for flav in flavors:
row = [flav.name, flav.vcpus,
"%s MB" % flav.ram,
"%s GB" % flav.disk,
"%s GB" % flav.ephemeral,
"%s MB" % flav.swap if flav.swap else ""]
x.add_row(row)
print x
def cmd(self, argv):
if len(argv) > 0:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
self.__flavors()
def help(self):
print """
Usage: 5minute flavors
Show the list of accessible flavors.
Examples:
5minute flavors
"""
class ServerClass(BaseClass):
@catch_exception("The instance doesn't exist.", nova_exceptions.NotFound)
@catch_exception("The name of the instance is ambiguous, please use ID.", nova_exceptions.NoUniqueMatch)
def get_instances(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.nova.servers.find(name=id)
else:
return self.nova.servers.get(id)
@catch_exception("The image doesn't exist.", nova_exceptions.NotFound)
@catch_exception("The name of the image is ambiguous, please use ID.", nova_exceptions.NoUniqueMatch)
def get_image(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.nova.images.find(name=id)
else:
return self.nova.images.get(id)
@catch_exception("The volume doesn't exist.", cinder_exceptions.NotFound)
@catch_exception("The name of the volume is ambiguous, please use ID.", cinder_exceptions.NoUniqueMatch)
def get_volume(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.cinder.volumes.find(name=id)
else:
return self.cinder.volumes.get(id)
@catch_exception("The snapshot doesn't exist.")
def get_snapshot(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.cinder.volume_snapshots.find(display_name=id)
else:
return self.cinder.volume_snapshots.get(id)
@catch_exception("The flavor doesn't exist.", nova_exceptions.NotFound)
@catch_exception("The flavor is ambiguous, please use ID.", nova_exceptions.NoUniqueMatch)
def get_flavor(self, id):
if re.match(r'^[0-9a-f\-]+$', id) is None:
return self.nova.flavors.find(name=id)
else:
return self.nova.flavors.get(id)
@catch_exception("The problem with getting of the list of networks.")
def get_networks(self, filter=None):
def test_net(net, filter):
if filter is None:
return True
for key, val in filter.items():
if isinstance(val, str):
if re.search(val, net.get(key, "")) is None:
return False
elif val != net.get(key):
return False
return True
res = list()
for net in self.neutron.list_networks()['networks']:
if test_net(net, filter) and len(net.get('subnets')) > 0:
res.append(net)
return res
def get_stable_private_network(self):
def get_count_free_ip(cidr, flist):
address_size = 32
ip_pool_mask = int(cidr.split("/")[1])
ip_pool_bit_size = address_size - ip_pool_mask
max_pool_size = 2 ** ip_pool_bit_size - 2
return max_pool_size - len([ip_addr for ip_addr in flist if
ip_addr.pool == cidr and ip_addr.instance_id])
nets = self.get_networks(filter={'name': "^default-", "router:external": False})
max_network_space = 0
current_biggest_network = None
flist = self.nova.floating_ips.list()
res = list()
for net in nets:
pub_net = self.__get_external_for_private_network(net)
if pub_net:
sub = self.neutron.list_subnets(id=net['subnets'].pop(0))
if len(sub.get('subnets')) > 0:
cidr = sub['subnets'][0]['cidr']
network_free_space = get_count_free_ip(cidr, flist)
if network_free_space > max_network_space:
max_network_space = network_free_space
res = list()
res.append({'private': net, 'free_ip': network_free_space, 'public': pub_net})
elif network_free_space > 0 and network_free_space == max_network_space:
res.append({'private': net, 'free_ip': network_free_space, 'public': pub_net})
return random.choice(res)
def __get_external_for_private_network(self, pnet):
"""
This function returns public network for private network,
if the router is present between these nets.
"""
ports = self.neutron.list_ports(network_id=pnet['id'], device_owner="network:router_interface").get('ports')
if len(ports) == 0:
return None
router = self.neutron.show_router(ports.pop(0)['device_id'])
return self.neutron.show_network(router['router']['external_gateway_info']['network_id'])['network']
def cmd(self, argv):
pass
def help(self):
pass
class ListInstancesClass(ServerClass):
"""
This is only view on the ServerClass for getting of list of instances.
"""
def cmd(self, argv):
filter = None
if len(argv) == 0:
filter = "%s-" % USER
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
elif argv[0] not in ('--all', '-a'):
filter = argv[0]
self.list_instances(filter)
@catch_exception("The problem with getting of the list of instances.")
def list_instances(self, filter):
instances = self.nova.servers.list(search_opts={"name": filter})
x = PrettyTable(["Name", "ID", "Status", "FQDN"])
x.align["Name"] = "l"
x.align["FQDN"] = "l"
for ins in instances:
row = [ins.name, ins.id, ins.status, ins.metadata.get('fqdn', "")]
x.add_row(row)
print x.get_string(sortby="Name")
def help(self):
print """
Usage: 5minute list [PARAM]
Show the list of instances. By default, it shows only your instances.
PARAM:
-a, --all show all accessible instances
<REGEXP> we can use a regular expression for the filtering of the result
Examples:
5minute list
5minute list --all
5minute list satellite
5minute list fedora
"""
class DeleteInstanceClass(ServerClass):
"""
This is only view on the ServerClass for deletting of instance.
"""
def cmd(self, argv):
if len(argv) == 0:
die("Missing parameter. Please try 5minute delete <name|id>.")
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
else:
self.kill_instances(argv[0])
# @catch_exception("The problem deleting of the instances.")
def kill_instances(self, id):
server = self.get_instances(id)
progress(title="Release floating IP:")
# This is stupid method for checking of lock, if it is activated
fips = self.nova.floating_ips.findall(instance_id=server.id)
for fip in fips:
server.remove_floating_ip(fip.ip)
progress(result="DONE")
vols = self.nova.volumes.get_server_volumes(server.id)
if len(vols) > 0:
progress(title="Release volumes:")
for vol in vols:
progress()
cvol = self.cinder.volumes.get(vol.id)
self.cinder.volumes.begin_detaching(cvol)
progress(result="DONE")
progress(title="Delete instance:")
done = False
try:
server.delete()
done = True
while len(self.nova.servers.findall(id=server.id)) > 0:
time.sleep(1)
progress()
progress(result="DONE")
except Exception as e:
if 'locked' in e.message:
progress(result="\x1b[31;01mLOCKED\x1b[39;49;00m")
else:
progress(result="FAIL")
for fip in fips:
if done:
self.nova.floating_ips.delete(fip.id)
else:
server.add_floating_ip(fip.ip)
for vol in vols:
cvol = self.cinder.volumes.get(vol.id)
if done:
progress(title="Delete volume:")
cvol.delete()
while len(self.cinder.volumes.findall(id=cvol.id)) > 0:
time.sleep(1)
progress()
progress(result="DONE")
else:
self.cinder.volumes.roll_detaching(cvol)
def help(self):
print """
Usage: 5minute (del|kill|delete) <NAME|ID>
Delete instance.
PARAM:
<NAME|ID> Name or ID of instance
Examples:
5minute delete 5minute-RHEL6
5minute kill 5minute-RHEL6
"""
class BootInstanceClass(ServerClass):
"""
This is only view on the ServerClass for booting of instance.
"""
ufile = ""
default_flavor = "m1.medium"
variables = None
created_volume = False
def __parse_params(self, opts, argv):
params = {}
for key, val in opts:
if key in ('--help', '-h') or 'help' in argv:
params['help'] = True
return params
elif key in ('--flavor', '-f'):
params['flavor'] = self.get_flavor(val)
elif key in ('--console', '-c'):
params['console'] = True
elif key in ('--name', '-n'):
params['name'] = "%s-%s" % (USER, val)
elif key in ('--volume', '-v'):
params['volume'] = val
elif key in ('--profile', '-p'):
params['profile'] = val
elif key == '--novolume':
params['novolume'] = True
elif key == '--noip':
params['noip'] = True
elif key == '--userdata':
params['userdata'] = val
else:
die("Bad parameter '%s'. Please try 5minute boot --help." % key)
if len(argv) != 1:
die("The name of image is ambiguous or empty.")
params['image'] = self.get_image(argv.pop(0))
self.add_variable('image', params['image'].name)
self.add_variable('image_id', params['image'].id)
if 'name' not in params:
params['name'] = "%s-%s" % (USER, params['image'].name)
self.add_variable('name', params['name'])
return params
@catch_exception("Bad parameter. Please try 5minute boot --help.")
def cmd(self, argv):
opts, argv = \
getopt.getopt(argv, "hcf:n:v:p:",
['help', 'console', 'flavor=', 'name=', 'volume=', 'userdata=',
'novolume', 'noip'])
self.params = self.__parse_params(opts, argv)
if 'help' in self.params:
self.help()
return 0
self.boot_instance()
def add_variable(self, key, val):
if not self.variables:
self.variables = dict()
self.variables[key] = val
def __release_resources(self):
if "floating-ip" in self.variables and \
self.variables.get("floating-ip"):
self.nova.floating_ips.delete(self.variables['floating-ip'])
if self.created_volume:
cvol = self.cinder.volumes.get(self.volume.id)
cvol.detach()
cvol.delete()
@catch_exception()
def boot_instance(self):
self._check_key()
with disable_catch_exception():
try:
self.__setup_networking()
self.__setup_volume(self.params['image'])
self.__setup_userdata_script(self.params['image'])
self.__choose_flavor(self.params['image'])
self.__create_instance(self.params['image'])
except Exception, ex:
self.__release_resources()
die(str(ex), exception=ex)
def help(self):
print """
Usage: 5minute boot [PARAM] <IMAGE-NAME|IMAGE-ID>
Boot new instance.
PARAM:
-n, --name name of the instance
-f, --flavor name of flavor
-v, --volume the volume snapshot (default: 5minute-satellite5-rpms)
--novolume no voluume snapshot
-c, --console display the console output during booting
--userdata the paths or URLs to cloud-init scripts
Examples:
5minute boot 5minute-RHEL6
"""
def __setup_networking(self):
progress(title='Chossing the private network:')
network = self.get_stable_private_network()
progress(result=network['private']['name'])
progress(title='Obtaining a floating IP:')
floating_ip = self.nova.floating_ips.create(network['public']['id'])
if not floating_ip:
raise Exception("The problem with getting of IP address.")
self.add_variable('floating-ip', floating_ip)
self.add_variable('private-net', network['private']['id'])
progress(result=floating_ip.ip)
progress(title='Obtaining a domain name:')
hostname = get_FQDN_from_IP(floating_ip.ip)
if not hostname:
raise Exception("The problem with getting of DNS record.")
self.add_variable('hostname', hostname)
progress(result=hostname)
# @catch_exception("The problem with downloading of the userdata script for this image")
def __setup_userdata_script(self, image):
res = None
filenames = None
if "userdata" in self.params:
filenames = self.params['userdata']
elif "cscripts" in image.metadata:
filenames = image.metadata['cscripts']
if filenames:
progress(title='Loading the userdata script:')
self.params['cscript'] = ""
for filename in filenames.split():
cscript = urllib.urlopen(filename).read()
self.params['cscript'] += cscript.format(**self.variables)
self.params['cscript'] += "\n"
progress(result="DONE")
def __setup_volume(self, image):
self.volume = None
if not self.params.get('novolume', False):
volume_name = self.params.get('volume')
if volume_name is None:
volume_name = image.metadata.get('volumes')
if volume_name:
# Is the volume_name name/id of existing volume?
try:
self.volume = self.get_volume(volume_name)
except cinder_exceptions.NotFound as ex:
pass
if self.volume is None:
# The volume_name is name of snapshot,
# we create new volume from it
self.volume = self.__create_new_volume(volume_name, image)
def __create_new_volume(self, volume_name, image):
progress(title="Creating a new volume:")
snap = self.get_snapshot(volume_name)
name = self.params.get('name', "%s-%s" % (USER, image.name))
vol = self.cinder.volumes.create(size=snap.size, snapshot_id=snap.id,
display_name=name)
while vol.status == 'creating':
progress()
time.sleep(1)
vol = self.get_volume(vol.id)
if vol.status == 'error':
raise Exception("The problem with creating of the volume.")
progress(result="DONE")
self.created_volume = True
return vol
def __choose_flavor(self, image):
progress(title="Used flavor:")
if 'flavor' not in self.params:
if 'default_flavor' in image.metadata:
self.params['flavor'] =\
self.get_flavor(image.metadata.get('default_flavor'))
if self.params.get('flavor') is None:
self.params['flavor'] =\
self.get_flavor(self.default_flavor)
flavor = ("{name} (RAM: {ram} MB, vCPU: {vcpus}, disk: {disk} GB)")\
.format(**self.params['flavor'].__dict__)
progress(result=flavor)
def __create_instance(self, image):
progress(title="Instance name:", result=self.params.get('name'))
progress("Creating a new instance:")
param_dict = {'name': self.params.get('name'),
'image': image.id,
'flavor': self.params.get('flavor').id,
'key_name': USER,
'nics': [{'net-id': self.variables['private-net']}],
'meta': {'fqdn': self.variables["hostname"]},
'security_group': ['satellite5'],
'config_drive': True}
if self.volume:
param_dict['block_device_mapping'] = {'vdb': self.volume.id}
# print(param_dict)
if "cscript" in self.params:
param_dict['userdata'] = self.params['cscript']
server = self.nova.servers.create(**param_dict)
status = server.status
while status == 'BUILD':
time.sleep(1)
progress()
status = self.nova.servers.get(server.id).status
# print server.progress
if status == 'ACTIVE':
progress(result="DONE")
else:
progress(result="FAIL")
if "floating-ip" in self.variables:
server.add_floating_ip(self.variables['floating-ip'])
self.__check_console_output(server)
def __check_console_output(self, server):
lindex = 0
show_output = self.params.get('console')
exit_status = None
exit_message = "DONE"
counter = 60
reg_login = re.compile(r".*login:\s*$")
reg_warning = re.compile(r"(warning)", re.I)
reg_error = re.compile(r"(error)", re.I)
if show_output:
print "Booting of the instance:"
else:
progress(title="Booting of the instance:")
output = server.get_console_output().splitlines()
while counter > 0 and exit_status is None:
nindex = len(output) - 1
if lindex >= nindex:
counter -= 1
else:
counter = 60
for line in output[lindex:]:
patern = "%s\n"
if reg_login.match(line):
counter = 0
if exit_status is None:
exit_status = True
break
if reg_warning.search(line):
patern = "\x1b[92;01m%s\x1b[39;49;00m\n"
if reg_error.search(line):
patern = "\x1b[31;01m%s\x1b[39;49;00m\n"
exit_message = "Errors in the userdata script"
if show_output:
sys.stdout.write(patern % line)
else:
progress()
time.sleep(1)
lindex = nindex + 1
if exit_status is None:
output = server.get_console_output(30).splitlines()
if not show_output:
progress(result=exit_message)
if exit_status is None:
exit_status = False
return exit_status
class ScenarioClass(ServerClass):
"""
This is class for scenarios
"""
@staticmethod
def getInstance(subcmd):
if subcmd == 'list':
return ListScenarioClass()
elif subcmd == 'templates':
return TemplateScenarioClass()
elif subcmd == 'boot':
return BootScenarioClass()
elif subcmd in ('del', 'delete', 'kill'):
return DeleteScenarioClass()
else:
return ScenarioClass()
def cmd(self, argv):
self.help()
return 0
@catch_exception("The scenario doesn't exist.", heat_exceptions.NotFound)
def get_scenario(self, id):
return self.heat.stacks.get(id)
def help(self):
print """
Usage: 5minute scenarios <COMMAND> [PARAM]
Managing scenaros
COMMAND:
help - show this help
templates - show the list of templates
list - show the list of scenarios
boot - create new scenario/stack
del|kill - delete scenario
Examples:
5minute scenarios help
5minute scenarios templates
5minute scenarios list
5minute scenarios boot template1
5minute scenarios boot --name myscenario template1
5minute scenarios del myscenario
"""
class TemplateScenarioClass(ScenarioClass):
def __get_list_templates(self):
templates = list()
folder = os.path.expanduser(self._scenarios)
for file in os.listdir(folder):
if file.endswith(".yaml"):
templates.append(re.sub(r'\.yaml$', '', file))
return templates
def cmd(self, argv):
if len(argv) > 0 and argv.pop(0) in ('help', '--help', '-h'):
self.help()
return 0
else:
x = PrettyTable(["Name", ])
x.align["Name"] = "l"
for row in self.__get_list_templates():
print row
x.add_row([row, ])
print x.get_string(sortby="Name")
def help(self):
print """
Usage: 5minute scenarios templates
Show the list of available templates
Examples:
5minute scenarios templates
"""
class BootScenarioClass(ScenarioClass):
@catch_exception("Bad parameter. Please try 5minute scenario boot --help.")
def cmd(self, argv):
params = dict()
opts, argv2 = getopt.getopt(argv, "n:h", ['name=', 'help'])
for key, val in opts:
if key in ('--help', '-h'):
self.help()
return
elif key in ('--name', '-n'):
params['name'] = val
else:
die("Bad parameter '%s'. Please try 5minute scenario boot --help." % key)
if len(argv2) != 1:
die("You have to set name of template. Please try 5minute scenario boot --help.")
template_name = argv2.pop(0)
if template_name == 'help':
self.help()
return
params['template_name'] = template_name
params['template'] = self.__get_template(template_name)
self._check_key()
self.__crate_stack(params)
@catch_exception("Error: Problem with the loading of the template.")
def __get_template(self, name):
template = None
with open(os.path.expanduser("{folder}/{template}.yaml".format(folder=self._scenarios,
template=name)), 'r') as tmd:
template = tmd.read()
return template
def __crate_stack(self, params):
progress(title="Creating of scenario:")
params['name'] = "%s-%s" % (USER, params['template_name'] if 'name' not in params else params['name'])
current_biggest_network, free_ips = self.get_network()
stack = self.heat.stacks.create(stack_name=params['name'], template=params['template'], parameters={
'key_name': USER,
'image': 'RHEL-6.5-Server-x86_64-released',
'flavor': 'm1.medium',
'public_net': current_biggest_network['id'],
'prefix_name': params['name'],
'private_net_cidr': '192.168.250.0/24',
'private_net_gateway': '192.168.250.1',
'private_net_pool_start': '192.168.250.10',
'private_net_pool_end': '192.168.250.250'
})
uid = stack['stack']['id']
stack = self.heat.stacks.get(stack_id=uid).to_dict()
while stack['stack_status'] == 'CREATE_IN_PROGRESS':
progress()
stack = self.heat.stacks.get(stack_id=uid).to_dict()
time.sleep(3)
if stack['stack_status'] == 'CREATE_COMPLETE':
progress(result="DONE")
for it in stack['outputs']:
print "{key}: {val}".format(key=it['output_key'], val=it['output_value'])
print "Stack succesfully created."
else:
progress(result="FAIL")
die("Stack fall to unknow status: {}".format(stack))
def __get_count_free_ip(self, net, flist):
address_size = 32
ip_pool_mask = int(net['name'].split("/")[1])
ip_pool_bit_size = address_size - ip_pool_mask
max_pool_size = 2 ** ip_pool_bit_size - 2
return max_pool_size - len([ip_addr for ip_addr in flist if
ip_addr.pool == net['name'] and ip_addr.instance_id])
def get_network(self):
max_network_space = 0
current_biggest_network = None
flist = self.nova.floating_ips.list()
for net in self.neutron.list_networks()['networks']:
if net.get('router:external') and len(net.get('subnets')) > 0:
network_free_space = self.__get_count_free_ip(net, flist)
if network_free_space > max_network_space:
max_network_space = network_free_space
current_biggest_network = net
return (current_biggest_network, max_network_space)
def help(self):
print """
Usage: 5minute scenarios boot [PARAM] <TEMPLATE-NAME>
Boot new scenaro
PARAM:
-n, --name Name of scenario
<TEMPLATE-NAME> The name of template
Examples:
5minute scenarios boot template1
5minute scenarios boot --name myscenario template1
"""
class ListScenarioClass(ScenarioClass):
def cmd(self, argv):
filter = None
if len(argv) == 0:
filter = "%s-" % USER
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
elif argv[0] not in ('--all', '-a'):
filter = argv[0]
self.list_scenarios(filter)
@catch_exception("The problem with getting of the list of scenarios.")
def list_scenarios(self, filter):
scenarios = self.heat.stacks.list(search_opts={"name": filter})
x = PrettyTable(["Name", "ID", "Status", "Template"])
x.align["Name"] = "l"
x.align["Template"] = "l"
for ins in scenarios:
row = [ins.stack_name, ins.id, ins.stack_status, ins.description.split("\n", 1)[0][0:20]]
x.add_row(row)
print x.get_string(sortby="Name")
def help(self):
print """
Usage: 5minute scenarios list [PARAM]
Show the list of scenarios. By default, it shows only your scenarios.
PARAM:
-a, --all show all accessible scenarios
<REGEXP> we can use a regular expression for the filtering of the result
Examples:
5minute scenarios list
5minute scenarios list --all
5minute scenarios list satellite-infrastructure
"""
class DeleteScenarioClass(ScenarioClass):
"""
This is only view on the ServerClass for deletting of instance.
"""
def cmd(self, argv):
if len(argv) == 0:
die("Missing parameter. Please try 5minute scenario delete <name|id>.")
else:
if argv[0] in ('help', '--help', '-h'):
self.help()
return 0
else:
self.kill_scenario(argv[0])
@catch_exception("The problem with deleting of the scenario.")
def kill_scenario(self, id):
scenario = self.get_scenario(id)
scenario.delete()
def help(self):
print """
Usage: 5minute scenarios (del|kill|delete) <NAME|ID>
Delete scenario.
PARAM:
<NAME|ID> The name of the scenario
Examples:
5minute scenarios delete 5minute-RHEL6
5minute scenarios kill 5minute-RHEL6
"""
# -----------------------------------------------------------
# Manuals
# -----------------------------------------------------------
def main(argv):
if 'novaclient' not in sys.modules:
die("Please install python-novaclient (maybe 'yum -y install python-novaclient'?)")
if 'xmltodict' not in sys.modules:
die("Please install python-xmltodict (maybe 'yum -y install python-xmltodict'?)")
cmd = None
if len(argv) > 0:
cmd = argv.pop(0)
if cmd in ('--debug', '-d'):
global DEBUG
DEBUG = True
if len(argv) > 0:
cmd = argv.pop(0)
if cmd is None or cmd in ('help', '--help', '-h'):
BaseClass().cmd(argv)
elif cmd == 'key':
KeyClass().cmd(argv)
elif cmd == 'images':
ImagesClass().cmd(argv)
elif cmd == 'flavors':
FlavorClass().cmd(argv)
elif cmd == 'list':
ListInstancesClass().cmd(argv)
elif cmd in ('del', 'delete', 'kill'):
DeleteInstanceClass().cmd(argv)
elif cmd == 'boot':
BootInstanceClass().cmd(argv)
elif cmd in ('scenario', 'scenarios'):
scmd = None
if len(argv) > 0:
scmd = argv.pop(0)
ScenarioClass.getInstance(scmd).cmd(argv)
if __name__ == "__main__":
main(sys.argv[1:])
| 5minute | /5minute-0.2.1.tar.gz/5minute-0.2.1/vminute/vminute.py | vminute.py |
#!/usr/bin/python
from vminute import main
import sys
def main_main():
main(sys.argv[1:])
| 5minute | /5minute-0.2.1.tar.gz/5minute-0.2.1/vminute/__init__.py | __init__.py |
Put your Openstack scenarios here.
| 5minute | /5minute-0.2.1.tar.gz/5minute-0.2.1/vminute/scenarios/README | README |
from setuptools import find_packages, setup
setup(
name = '5o4drel5mk',
packages=find_packages(),
version = '0.1',
description = '',
author = '',
author_email = '',
url = '',
keywords = [],
classifiers = [],
) | 5o4drel5mk | /5o4drel5mk-0.1.tar.gz/5o4drel5mk-0.1/setup.py | setup.py |
import requests
import json
from fivesim.errors import *
class FiveSim:
def __init__(self, api_key, proxy):
self.__api_key = api_key
self.__proxy = proxy
self.__session = requests.Session()
self.__auth_url: str = "https://5sim.net/v1/user/"
self.__guest_url: str = "https://5sim.net/v1/guest/"
self.__vendor_url: str = "https://5sim.net/v1/vendor/"
self.__session.headers = {
"Authorization": f"Bearer {self.__api_key}",
"Accept": "application/json"
}
def __request(self, method, url):
try:
if method == "GET":
resp = self.__session.get(url, proxies=self.__proxy)
if resp.status_code == 401:
raise ApiKeyInvalidError
if resp.status_code == 400:
raise BadRequests(resp.text)
if resp.text == "no free phones":
raise NoPhoneNumberError("No number in stock")
if resp.text == "not enough user balance":
raise LowBalanceError("Not enough balance")
try:
return json.loads(resp.text)
except json.JSONDecodeError:
return resp.text
except Exception as e:
raise e
def get_country_list(self) -> dict:
return self.__request("GET", f"{self.__guest_url}countries")
def product_requests(self, country: str, operator: str) -> dict:
return self.__request("GET", f"{self.__guest_url}products/{country}/{operator}")
def price_requests(self) -> dict:
return self.__request("GET", f"{self.__guest_url}prices")
def price_requests_by_country(self, country: str) -> dict:
return self.__request("GET", f"{self.__guest_url}prices?country={country}")
def price_requests_by_product(self, product: str) -> dict:
return self.__request("GET", f"{self.__guest_url}prices?product={product}")
def price_requests_by_country_and_product(self, country: str, product: str) -> dict:
return self.__request("GET", f"{self.__guest_url}prices?country={country}&product={product}")
def get_balance(self) -> dict:
return self.__request("GET", f"{self.__auth_url}profile")
def buy_number(self, country: str, operator: str, product: str) -> dict:
return self.__request("GET", f"{self.__auth_url}buy/activation/{country}/{operator}/{product}?ref=3b612d3c")
def buy_hosting_number(self, country: str, operator: str, product: str) -> dict:
return self.__request("GET", f"{self.__auth_url}buy/hosting/{country}/{operator}/{product}")
def rebuy_number(self, product: str, number: str) -> dict:
return self.__request("GET", f"{self.__auth_url}reuse/{product}/{number}")
def check_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}check/{order_id}")
def finish_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}finish/{order_id}")
def cancel_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}cancel/{order_id}")
def ban_order(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}ban/{order_id}")
def sms_inbox_list(self, order_id: str) -> dict:
return self.__request("GET", f"{self.__auth_url}sms/inbox/{order_id}")
def btc_and_ltc_rates(self, currency: str) -> dict:
return self.__request("GET", f"{self.__auth_url}payment/crypto/rates?currency={currency}")
def address_payment(self, currency: str) -> dict:
return self.__request("GET", f"{self.__auth_url}payment/crypto/getaddress?currency={currency}")
def get_notifications(self, lang: str) -> dict:
return self.__request("GET", f"{self.__guest_url}flash/{lang}")
def vendor_statics(self) -> dict:
return self.__request("GET", f"{self.__auth_url}vendor")
def wallet_reverse(self) -> dict:
return self.__request("GET", f"{self.__vendor_url}wallets")
| 5sim-python | /5sim_python-1.0.3-py3-none-any.whl/fivesim/client.py | client.py |
class NoPhoneNumberError(Exception):
"""
Raised when a phone number is not provided.
"""
pass
class ApiKeyInvalidError(Exception):
"""
Raised when an invalid API key is provided.
"""
pass
class BadRequests(Exception):
"""
Raised when multiple errors are raised.
"""
pass
class LowBalanceError(Exception):
"""
Raised when the balance is too low.
"""
pass
| 5sim-python | /5sim_python-1.0.3-py3-none-any.whl/fivesim/errors.py | errors.py |
from fivesim.client import FiveSim | 5sim-python | /5sim_python-1.0.3-py3-none-any.whl/fivesim/__init__.py | __init__.py |
"""
Helper routines for catkin. These are distributed inside of rosdep2
to protect catkin against future rosdep2 API updatese. These helper
routines are assumed to run in an interactive mode with an end-user
and thus return end-user oriented error messages.
Errors are returned as arguments to raised :exc:`ValidationFailed`
exceptions.
Workflow::
installer = get_installer(APT_INSTALLER)
view = get_catkin_view(rosdistro_name, 'ubuntu', 'lucid')
resolve_for_os(rosdep_key, view, installer, 'ubuntu', 'lucid')
"""
from __future__ import print_function
import os
from subprocess import Popen, PIPE, CalledProcessError
from . import create_default_installer_context
from .lookup import RosdepLookup
from .platforms.debian import APT_INSTALLER
from .platforms.osx import BREW_INSTALLER
from .platforms.pip import PIP_INSTALLER
from .platforms.redhat import YUM_INSTALLER
from .platforms.freebsd import PKG_INSTALLER
from .rep3 import download_targets_data
from .rosdistrohelper import get_targets
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import get_sources_list_dir, DataSourceMatcher, SourcesListLoader
class ValidationFailed(Exception):
pass
def call(command, pipe=None):
"""
Copy of call() function from catkin-generate-debian to mimic output
"""
working_dir = '.'
# print('+ cd %s && ' % working_dir + ' '.join(command))
process = Popen(command, stdout=pipe, stderr=pipe, cwd=working_dir)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, command)
if pipe:
return output
def get_ubuntu_targets(rosdistro):
"""
Get a list of Ubuntu distro codenames for the specified ROS
distribution. This method blocks on an HTTP download.
:raises: :exc:`ValidationFailed`
"""
targets_data = get_targets()
legacy_targets = download_targets_data()
if 'fuerte' in legacy_targets:
targets_data['fuerte'] = {'ubuntu': legacy_targets['fuerte']}
if 'electric' in legacy_targets:
targets_data['electric'] = {'ubuntu': legacy_targets['electric']}
return targets_data[rosdistro]['ubuntu']
def get_installer(installer_name):
""" Expected installers APT_INSTALLER, YUM_INSTALLER, ..."""
installer_context = create_default_installer_context()
return installer_context.get_installer(installer_name)
def resolve_for_os(rosdep_key, view, installer, os_name, os_version):
"""
Resolve rosdep key to dependencies.
:param os_name: OS name, e.g. 'ubuntu'
:raises: :exc:`rosdep2.ResolutionError`
"""
d = view.lookup(rosdep_key)
ctx = create_default_installer_context()
os_installers = ctx.get_os_installer_keys(os_name)
default_os_installer = ctx.get_default_os_installer_key(os_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, os_installers, default_os_installer)
assert inst_key in os_installers
return installer.resolve(rule)
def update_rosdep():
call(('rosdep', 'update'), pipe=PIPE)
def get_catkin_view(rosdistro_name, os_name, os_version, update=True):
"""
:raises: :exc:`ValidationFailed`
"""
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
raise ValidationFailed("""rosdep database is not initialized, please run:
\tsudo rosdep init
""")
if update:
update_rosdep()
sources_matcher = DataSourceMatcher([rosdistro_name, os_name, os_version])
sources_loader = SourcesListLoader.create_default(matcher=sources_matcher)
if not (sources_loader.sources):
raise ValidationFailed("""rosdep database does not have any sources.
Please make sure you have a valid configuration in:
\t%s
""" % (sources_list_dir))
# for vestigial reasons, using the roskg loader, but we're only
# actually using the backend db as resolution is not resource-name based
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader)
return lookup.get_rosdep_view(DEFAULT_VIEW_KEY)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/catkin_support.py | catkin_support.py |
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import yaml
try:
import urlparse
except ImportError:
import urllib.parse as urlparse # py3k
import os
from rospkg.os_detect import OS_DEBIAN
from rospkg.os_detect import OS_FEDORA
from rospkg.os_detect import OS_OSX
from rospkg.os_detect import OS_UBUNTU
from .core import InvalidData, DownloadFailure
from .platforms.debian import APT_INSTALLER
from .platforms.osx import BREW_INSTALLER
from .platforms.redhat import YUM_INSTALLER
from .rosdistrohelper import get_targets, get_release_file, PreRep137Warning
from .rep3 import download_targets_data # deprecated, will output warning
import warnings
create_default_installer_context = None
# py3k
try:
unicode
except NameError:
basestring = unicode = str
# location of an example gbpdistro file for reference and testing
FUERTE_GBPDISTRO_URL = 'https://https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro' \
'master/releases/fuerte.yaml'
# seconds to wait before aborting download of gbpdistro data
DOWNLOAD_TIMEOUT = 15.0
def get_owner_name(url):
"""
Given a gbpdistro url, returns the name of the github user in the url.
If the url is not a valid github url it returns the default `ros`.
This information is used to set the homebrew tap name, see:
https://github.com/ros-infrastructure/rosdep/pull/17
:returns: The github account in the given gbpdistro url
"""
result = 'ros'
try:
parsed = urlparse.urlparse(url)
if parsed.netloc == 'github.com':
result = parsed.path.split('/')[1]
except (ValueError, IndexError):
pass
return result
# For compatability url defaults to ''
def gbprepo_to_rosdep_data(gbpdistro_data, targets_data, url=''):
"""
DEPRECATED: the rosdistro file format has changed according to REP137
this function will yield a deprecation warning
:raises: :exc:`InvalidData`
"""
warnings.warn('deprecated: see REP137 and rosdistro', PreRep137Warning)
# Error reporting for this isn't nearly as good as it could be
# (e.g. doesn't separate gbpdistro vs. targets, nor provide
# origin), but rushing this implementation a bit.
try:
if not type(targets_data) == dict:
raise InvalidData('targets data must be a dict')
if not type(gbpdistro_data) == dict:
raise InvalidData('gbpdistro data must be a dictionary')
if gbpdistro_data['type'] != 'gbp':
raise InvalidData('gbpdistro must be of type "gbp"')
# compute the default target data for the release_name
release_name = gbpdistro_data['release-name']
if release_name not in targets_data:
raise InvalidData('targets file does not contain information '
'for release [%s]' % (release_name))
else:
# take the first match
target_data = targets_data[release_name]
# compute the rosdep data for each repo
rosdep_data = {}
gbp_repos = gbpdistro_data['repositories']
# Ensure gbp_repos is a dict
if type(gbp_repos) != dict:
raise InvalidData('invalid repo spec in gbpdistro data: ' + str(gbp_repos) +
'. Invalid repositories entry, must be dict.')
for rosdep_key, repo in gbp_repos.items():
if type(repo) != dict:
raise InvalidData('invalid repo spec in gbpdistro data: ' +
str(repo))
for pkg in repo.get('packages', {rosdep_key: None}):
rosdep_data[pkg] = {}
# for pkg in repo['packages']: indent the rest of the lines here.
# Do generation for ubuntu
rosdep_data[pkg][OS_UBUNTU] = {}
# Do generation for empty OS X entries
homebrew_name = '%s/%s/%s' % (get_owner_name(url),
release_name, rosdep_key)
rosdep_data[pkg][OS_OSX] = {
BREW_INSTALLER: {'packages': [homebrew_name]}
}
# - debian package name: underscores must be dashes
deb_package_name = 'ros-%s-%s' % (release_name, pkg)
deb_package_name = deb_package_name.replace('_', '-')
repo_targets = repo['target'] if 'target' in repo else 'all'
if repo_targets == 'all':
repo_targets = target_data
for t in repo_targets:
if not isinstance(t, basestring):
raise InvalidData('invalid target spec: %s' % (t))
# rosdep_data[pkg][OS_UBUNTU][t] = {
rosdep_data[pkg][OS_UBUNTU][t] = {
APT_INSTALLER: {'packages': [deb_package_name]}
}
rosdep_data[pkg]['_is_ros'] = True
return rosdep_data
except KeyError as e:
raise InvalidData('Invalid GBP-distro/targets format: missing key: ' +
str(e))
# REP137 compliant
def get_gbprepo_as_rosdep_data(gbpdistro):
"""
:raises: :exc:`InvalidData`
"""
distro_file = get_release_file(gbpdistro)
ctx = create_default_installer_context()
release_name = gbpdistro
rosdep_data = {}
default_installers = {}
gbp_repos = distro_file.repositories
for rosdep_key, repo in gbp_repos.items():
for pkg in repo.package_names:
rosdep_data[pkg] = {}
# following rosdep pull #17, use env var instead of github organization name
tap = os.environ.get('ROSDEP_HOMEBREW_TAP', 'ros')
# Do generation for empty OS X entries
homebrew_name = '%s/%s/%s' % (tap, release_name, rosdep_key)
rosdep_data[pkg][OS_OSX] = {
BREW_INSTALLER: {'packages': [homebrew_name]}
}
# - package name: underscores must be dashes
package_name = 'ros-%s-%s' % (release_name, pkg)
package_name = package_name.replace('_', '-')
for os_name in distro_file.platforms:
if os_name not in rosdep_data[pkg]:
rosdep_data[pkg][os_name] = {}
if os_name not in default_installers:
default_installers[os_name] = ctx.get_default_os_installer_key(os_name)
for os_code_name in distro_file.platforms[os_name]:
rosdep_data[pkg][os_name][os_code_name] = {
default_installers[os_name]: {'packages': [package_name]}
}
rosdep_data[pkg]['_is_ros'] = True
return rosdep_data
def download_gbpdistro_as_rosdep_data(gbpdistro_url, targets_url=None):
"""
Download gbpdistro file from web and convert format to rosdep distro data.
DEPRECATED: see REP137. This function will output
(at least) one deprecation warning
:param gbpdistro_url: url of gbpdistro file, ``str``
:param target_url: override URL of platform targets file
:raises: :exc:`DownloadFailure`
:raises: :exc:`InvalidData` If targets file does not pass cursory
validation checks.
"""
# we can convert a gbpdistro file into rosdep data by following a
# couple rules
# will output a warning
targets_data = download_targets_data(targets_url=targets_url)
try:
f = urlopen(gbpdistro_url, timeout=DOWNLOAD_TIMEOUT)
text = f.read()
f.close()
gbpdistro_data = yaml.safe_load(text)
# will output a warning
return gbprepo_to_rosdep_data(gbpdistro_data,
targets_data,
gbpdistro_url)
except Exception as e:
raise DownloadFailure('Failed to download target platform data '
'for gbpdistro:\n\t' + str(e))
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/gbpdistro_support.py | gbpdistro_support.py |
from __future__ import print_function
import os
import sys
try:
from catkin_pkg.packages import find_packages
except ImportError:
print('catkin_pkg was not detected, please install it.',
file=sys.stderr)
sys.exit(1)
_catkin_workspace_packages = []
_catkin_packages_cache = {}
VALID_DEPENDENCY_TYPES = {'build', 'buildtool', 'build_export', 'buildtool_export', 'exec', 'test', 'doc'}
def find_catkin_packages_in(path, verbose=False):
"""
:returns: a list of packages in a given directory
:raises: OSError if the path doesn't exist
"""
global _catkin_packages_cache
if not os.path.exists(path):
raise OSError("given path '{0}' does not exist".format(path))
if verbose:
print("Looking for packages in '{0}'... ".format(path),
end='', file=sys.stderr)
path = os.path.abspath(path)
if path in _catkin_packages_cache:
if verbose:
print('found in cache.', file=sys.stderr)
return _catkin_packages_cache[path]
packages = find_packages(path)
if type(packages) == dict and packages != {}:
package_names = [package.name for package in packages.values()]
if verbose:
print('found ' + str(len(packages)) + ' packages.')
for package in package_names:
print(' {0}'.format(package))
_catkin_packages_cache[path] = package_names
return package_names
else:
if verbose:
print('failed to find packages.', file=sys.stderr)
return []
def set_workspace_packages(packages):
global _catkin_workspace_packages
_catkin_workspace_packages = list(packages or [])
def get_workspace_packages():
global _catkin_workspace_packages
return _catkin_workspace_packages
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/catkin_packages.py | catkin_packages.py |
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import yaml
import warnings
from .core import DownloadFailure
from .rosdistrohelper import PreRep137Warning
# location of targets file for processing gbpdistro files
REP3_TARGETS_URL = 'https://https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro/master/releases/targets.yaml'
# seconds to wait before aborting download of gbpdistro data
DOWNLOAD_TIMEOUT = 15.0
def download_targets_data(targets_url=None):
"""
Download REP 3 targets file and unmarshal from YAML.
DEPRECATED: this function is deprecated. List of targets should be obtained
from the rosdistro module.
The body of this function is an example.
:param target_url: override URL of platform targets file. Defaults
to ``REP3_TARGETS_URL``.
:raises: :exc:`DownloadFailure`
:raises: :exc:`InvalidData` If targets file does not pass cursory validation checks.
"""
warnings.warn('deprecated, use rosdistro instead', PreRep137Warning)
if targets_url is None:
targets_url = REP3_TARGETS_URL
try:
f = urlopen(targets_url, timeout=DOWNLOAD_TIMEOUT)
text = f.read()
f.close()
targets_data = yaml.safe_load(text)
except Exception as e:
raise DownloadFailure('Failed to download target platform data for gbpdistro:\n\t%s' % (str(e)))
if type(targets_data) == list:
# convert to dictionary
new_targets_data = {}
for t in targets_data:
platform = list(t.keys())[0]
new_targets_data[platform] = t[platform]
targets_data = new_targets_data
return targets_data
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/rep3.py | rep3.py |
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import hashlib
import os
import tempfile
from .core import CachePermissionError
try:
import cPickle as pickle
except ImportError:
import pickle
PICKLE_CACHE_EXT = '.pickle'
def compute_filename_hash(key_filenames):
sha_hash = hashlib.sha1()
if isinstance(key_filenames, list):
for key in key_filenames:
sha_hash.update(key.encode())
else:
sha_hash.update(key_filenames.encode())
return sha_hash.hexdigest()
def write_cache_file(source_cache_d, key_filenames, rosdep_data):
"""
:param source_cache_d: directory to write cache file to
:param key_filenames: filename (or list of filenames) to be used in hashing
:param rosdep_data: dictionary of data to serialize as YAML
:returns: name of file where cache is stored
:raises: :exc:`OSError` if cannot write to cache file/directory
:raises: :exc:`IOError` if cannot write to cache file/directory
"""
if not os.path.exists(source_cache_d):
os.makedirs(source_cache_d)
key_hash = compute_filename_hash(key_filenames)
filepath = os.path.join(source_cache_d, key_hash)
try:
write_atomic(filepath + PICKLE_CACHE_EXT, pickle.dumps(rosdep_data, 2), True)
except OSError as e:
raise CachePermissionError('Failed to write cache file: ' + str(e))
try:
os.unlink(filepath)
except OSError:
pass
return filepath
def write_atomic(filepath, data, binary=False):
# write data to new file
fd, filepath_tmp = tempfile.mkstemp(prefix=os.path.basename(filepath) + '.tmp.', dir=os.path.dirname(filepath))
if (binary):
fmode = 'wb'
else:
fmode = 'w'
with os.fdopen(fd, fmode) as f:
f.write(data)
f.close()
try:
# switch file atomically (if supported)
os.rename(filepath_tmp, filepath)
except OSError:
# fall back to non-atomic operation
try:
os.unlink(filepath)
except OSError:
pass
try:
os.rename(filepath_tmp, filepath)
except OSError:
os.unlink(filepath_tmp)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/cache_tools.py | cache_tools.py |
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com, Ken Conley/kwc@willowgarage.com
from __future__ import print_function
import sys
import yaml
from collections import defaultdict
from rospkg import RosPack, RosStack, ResourceNotFound
from .core import RosdepInternalError, InvalidData, rd_debug
from .model import RosdepDatabase
from .rospkg_loader import RosPkgLoader
from .dependency_graph import DependencyGraph
from .sources_list import SourcesListLoader
from . import catkin_packages
class RosdepDefinition(object):
"""
Single rosdep dependency definition. This data is stored as the
raw dictionary definition for the dependency.
See REP 111, 'Multiple Package Manager Support for Rosdep' for a
discussion of this raw format.
"""
def __init__(self, rosdep_key, data, origin='<dynamic>'):
"""
:param rosdep_key: key/name of rosdep dependency
:param data: raw rosdep data for a single rosdep dependency, ``dict``
:param origin: string that indicates where data originates from (e.g. filename)
"""
self.rosdep_key = rosdep_key
if not isinstance(data, dict):
raise InvalidData('rosdep data for [%s] must be a dictionary' % (self.rosdep_key), origin=origin)
self.data = data
self.origin = origin
def reverse_merge(self, new_data, origin='<dynamic>', verbose=False):
"""
Merge two definitions together, with existing rules taking precendence.
Definitions are merged at the os_name level, meaning that if two rules
exist with the same os_name, the first one wins.
:param data: raw rosdep data for a single rosdep dependency, ``dict``
:param origin: string that indicates where this new data comes from (e.g. filename)
"""
for os_name, rules in new_data.items():
if os_name not in self.data:
if verbose:
print('[%s] adding rules for os [%s] to [%s]' % (origin, os_name, self.rosdep_key), file=sys.stderr)
self.data[os_name] = rules
elif verbose:
print('[%s] ignoring [%s] for os [%s], already loaded' % (origin, self.rosdep_key, os_name), file=sys.stderr)
def get_rule_for_platform(self, os_name, os_version, installer_keys, default_installer_key):
"""
Get installer_key and rule for the specified rule. See REP 111 for precedence rules.
:param os_name: OS name to get rule for
:param os_version: OS version to get rule for
:param installer_keys: Keys of installers for platform, ``[str]``
:param default_installer_key: Default installer key for platform, ``[str]``
:returns: (installer_key, rosdep_args_dict), ``(str, dict)``
:raises: :exc:`ResolutionError` If no rule is available
:raises: :exc:`InvalidData` If rule data is not valid
"""
rosdep_key = self.rosdep_key
data = self.data
if type(data) != dict:
raise InvalidData('rosdep value for [%s] must be a dictionary' % (self.rosdep_key), origin=self.origin)
if os_name not in data:
raise ResolutionError(rosdep_key, data, os_name, os_version, 'No definition of [%s] for OS [%s]' % (rosdep_key, os_name))
data = data[os_name]
return_key = default_installer_key
# REP 111: rosdep first interprets the key as a
# PACKAGE_MANAGER. If this test fails, it will be interpreted
# as an OS_VERSION_CODENAME.
if type(data) == dict:
for installer_key in installer_keys:
if installer_key in data:
data = data[installer_key]
return_key = installer_key
break
else:
# data must be a dictionary, string, or list
if type(data) == dict:
# check for
# hardy:
# apt:
# stuff
# we've already checked for PACKAGE_MANAGER_KEY, so
# version key must be present here for data to be valid
# dictionary value.
# if the os_version is not defined and there is no wildcard
if os_version not in data and '*' not in data:
raise ResolutionError(rosdep_key, self.data, os_name, os_version, 'No definition of [%s] for OS version [%s]' % (rosdep_key, os_version))
# if the os_version has the value None
if os_version in data and data[os_version] is None:
raise ResolutionError(rosdep_key, self.data, os_name, os_version, '[%s] defined as "not available" for OS version [%s]' % (rosdep_key, os_version))
# if os version is not defined (and there is a wildcard) fallback to the wildcard
if os_version not in data:
os_version = '*'
data = data[os_version]
if type(data) == dict:
for installer_key in installer_keys:
if installer_key in data:
data = data[installer_key]
return_key = installer_key
break
# Check if the rule is null
if data is None:
raise ResolutionError(rosdep_key, self.data, os_name, os_version, '[%s] defined as "not available" for OS version [%s]' % (rosdep_key, os_version))
if type(data) not in (dict, list, type('str')):
raise InvalidData('rosdep OS definition for [%s:%s] must be a dictionary, string, or list: %s' % (self.rosdep_key, os_name, data), origin=self.origin)
return return_key, data
def __str__(self):
return '%s:\n%s' % (self.origin, yaml.dump(self.data, default_flow_style=False))
class ResolutionError(Exception):
def __init__(self, rosdep_key, rosdep_data, os_name, os_version, message):
self.rosdep_key = rosdep_key
self.rosdep_data = rosdep_data
self.os_name = os_name
self.os_version = os_version
super(ResolutionError, self).__init__(message)
def __str__(self):
if self.rosdep_data:
pretty_data = yaml.dump(self.rosdep_data, default_flow_style=False)
else:
pretty_data = '<no data>'
return """%s
\trosdep key : %s
\tOS name : %s
\tOS version : %s
\tData:\n%s""" % (self.args[0], self.rosdep_key, self.os_name, self.os_version, pretty_data.replace('\n', '\n\t\t'))
class RosdepView(object):
"""
View of :class:`RosdepDatabase`. Unlike :class:`RosdepDatabase`,
which stores :class:`RosdepDatabaseEntry` data for all stacks, a
view merges entries for a particular stack. This view can then be
queried to lookup and resolve individual rosdep dependencies.
"""
def __init__(self, name):
self.name = name
self.rosdep_defs = {} # {str: RosdepDefinition}
def __str__(self):
return '\n'.join(['%s: %s' % val for val in self.rosdep_defs.items()])
def lookup(self, rosdep_name):
"""
:returns: :class:`RosdepDefinition`
:raises: :exc:`KeyError` If *rosdep_name* is not declared
"""
return self.rosdep_defs[rosdep_name]
def keys(self):
"""
:returns: list of rosdep names in this view
"""
return self.rosdep_defs.keys()
def merge(self, update_entry, override=False, verbose=False):
"""
Merge rosdep database update into main database. Merge rules
are first entry to declare a key wins. There are no
conflicts. This rule logic is modelled after the apt sources
list.
:param override: Ignore first-one-wins rules and instead
always use rules from update_entry
"""
if verbose:
print('view[%s]: merging from cache of [%s]' % (self.name, update_entry.origin))
db = self.rosdep_defs
for dep_name, dep_data in update_entry.rosdep_data.items():
# convert data into RosdepDefinition model
update_definition = RosdepDefinition(dep_name, dep_data, update_entry.origin)
# First rule wins or override, no rule-merging.
if override or dep_name not in db:
db[dep_name] = update_definition
elif dep_name in db:
db[dep_name].reverse_merge(dep_data, update_entry.origin, verbose=verbose)
def prune_catkin_packages(rosdep_keys, verbose=False):
workspace_pkgs = catkin_packages.get_workspace_packages()
if not workspace_pkgs:
return rosdep_keys
for i, rosdep_key in reversed(list(enumerate(rosdep_keys))):
if rosdep_key in workspace_pkgs:
# If workspace packages listed (--catkin-workspace)
# and if the rosdep_key is a package in that
# workspace, then skip it rather than resolve it
if verbose:
print("rosdep key '{0}'".format(rosdep_key) +
' is in the catkin workspace, skipping.',
file=sys.stderr)
del rosdep_keys[i]
return rosdep_keys
def prune_skipped_packages(rosdep_keys, skipped_keys, verbose=False):
if not skipped_keys:
return rosdep_keys
for i, rosdep_key in reversed(list(enumerate(rosdep_keys))):
if rosdep_key in skipped_keys:
# If the key is in the list of keys to explicitly skip, skip it
if verbose:
print("rosdep key '{0}'".format(rosdep_key) +
' was listed in the skipped packages, skipping.',
file=sys.stderr)
del rosdep_keys[i]
return rosdep_keys
class RosdepLookup(object):
"""
Lookup rosdep definitions. Provides API for most
non-install-related commands for rosdep.
:class:`RosdepLookup` caches data as it is loaded, so changes made
on the filesystem will not be reflected if the rosdep information
has already been loaded.
"""
def __init__(self, rosdep_db, loader):
"""
:param loader: Loader to use for loading rosdep data by stack
name, ``RosdepLoader``
:param rosdep_db: Database to load definitions into, :class:`RosdepDatabase`
"""
self.rosdep_db = rosdep_db
self.loader = loader
self._view_cache = {} # {str: {RosdepView}}
self._resolve_cache = {} # {str : (os_name, os_version, installer_key, resolution, dependencies)}
# some APIs that deal with the entire environment save errors
# in to self.errors instead of raising them in order to be
# robust to single-stack faults.
self.errors = []
# flag for turning on printing to console
self.verbose = False
self.skipped_keys = []
def get_loader(self):
return self.loader
def get_errors(self):
"""
Retrieve error state for API calls that do not directly report
error state. This is the case for APIs like
:meth:`RosdepLookup.where_defined` that are meant to be
fault-tolerant to single-stack failures.
:returns: List of exceptions, ``[Exception]``
"""
return self.errors[:]
def get_rosdeps(self, resource_name, implicit=True):
"""
Get rosdeps that *resource_name* (e.g. package) requires.
:param implicit: If ``True``, include implicit rosdep
dependencies. Default: ``True``.
:returns: list of rosdep names, ``[str]``
"""
return self.loader.get_rosdeps(resource_name, implicit=implicit)
def get_resources_that_need(self, rosdep_name):
"""
:param rosdep_name: name of rosdep dependency
:returns: list of package names that require rosdep, ``[str]``
"""
return [k for k in self.loader.get_loadable_resources() if rosdep_name in self.get_rosdeps(k, implicit=False)]
@staticmethod
def create_from_rospkg(rospack=None, rosstack=None,
sources_loader=None,
verbose=False, dependency_types=None):
"""
Create :class:`RosdepLookup` based on current ROS package
environment.
:param rospack: (optional) Override :class:`rospkg.RosPack`
instance used to crawl ROS packages.
:param rosstack: (optional) Override :class:`rospkg.RosStack`
instance used to crawl ROS stacks.
:param sources_loader: (optional) Override SourcesLoader used
for managing sources.list data sources.
:param dependency_types: (optional) List of dependency types.
Allowed: {'build', 'buildtool', 'build_export', 'buildtool_export', 'exec', 'test', 'doc'}
"""
# initialize the loader
if rospack is None:
rospack = RosPack()
if rosstack is None:
rosstack = RosStack()
if sources_loader is None:
sources_loader = SourcesListLoader.create_default(verbose=verbose)
if dependency_types is None:
dependency_types = []
rosdep_db = RosdepDatabase()
# Use sources list to initialize rosdep_db. Underlay has no
# notion of specific resources, and its view keys are just the
# individual sources it can load from. SourcesListLoader
# cannot do delayed evaluation of OS setting due to matcher.
underlay_key = SourcesListLoader.ALL_VIEW_KEY
# Create the rospkg loader on top of the underlay
loader = RosPkgLoader(rospack=rospack, rosstack=rosstack,
underlay_key=underlay_key, dependency_types=dependency_types)
# create our actual instance
lookup = RosdepLookup(rosdep_db, loader)
# load in the underlay
lookup._load_all_views(loader=sources_loader)
# use dependencies to implement precedence
view_dependencies = sources_loader.get_loadable_views()
rosdep_db.set_view_data(underlay_key, {}, view_dependencies, underlay_key)
return lookup
def resolve_all(self, resources, installer_context, implicit=False):
"""
Resolve all the rosdep dependencies for *resources* using *installer_context*.
:param resources: list of resources (e.g. packages), ``[str]``
:param installer_context: :class:`InstallerContext`
:param implicit: Install implicit (recursive) dependencies of
resources. Default ``False``.
:returns: (resolutions, errors), ``([(str, [str])], {str: ResolutionError})``. resolutions provides
an ordered list of resolution tuples. A resolution tuple's first element is the installer
key (e.g.: apt or homebrew) and the second element is a list of opaque resolution values for that
installer. errors maps package names to an :exc:`ResolutionError` or :exc:`KeyError` exception.
:raises: :exc:`RosdepInternalError` if unexpected error in constructing dependency graph
:raises: :exc:`InvalidData` if a cycle occurs in constructing dependency graph
"""
depend_graph = DependencyGraph()
errors = {}
# TODO: resolutions dictionary should be replaced with resolution model instead of mapping (undefined) keys.
for resource_name in resources:
try:
rosdep_keys = self.get_rosdeps(resource_name, implicit=implicit)
if self.verbose:
print('resolve_all: resource [%s] requires rosdep keys [%s]' % (resource_name, ', '.join(rosdep_keys)), file=sys.stderr)
rosdep_keys = prune_catkin_packages(rosdep_keys, self.verbose)
rosdep_keys = prune_skipped_packages(rosdep_keys, self.skipped_keys, self.verbose)
for rosdep_key in rosdep_keys:
try:
installer_key, resolution, dependencies = \
self.resolve(rosdep_key, resource_name, installer_context)
depend_graph[rosdep_key]['installer_key'] = installer_key
depend_graph[rosdep_key]['install_keys'] = list(resolution)
depend_graph[rosdep_key]['dependencies'] = list(dependencies)
while dependencies:
depend_rosdep_key = dependencies.pop()
# prevent infinite loop
if depend_rosdep_key in depend_graph:
continue
installer_key, resolution, more_dependencies = \
self.resolve(depend_rosdep_key, resource_name, installer_context)
dependencies.extend(more_dependencies)
depend_graph[depend_rosdep_key]['installer_key'] = installer_key
depend_graph[depend_rosdep_key]['install_keys'] = list(resolution)
depend_graph[depend_rosdep_key]['dependencies'] = list(more_dependencies)
except ResolutionError as e:
errors[resource_name] = e
except ResourceNotFound as e:
errors[resource_name] = e
try:
# TODO: I really don't like AssertionErrors here; this should be modeled as 'CyclicGraphError'
# or something more explicit. No need to continue if this API errors.
resolutions_flat = depend_graph.get_ordered_dependency_list()
except AssertionError as e:
raise InvalidData('cycle in dependency graph detected: %s' % (e))
except KeyError as e:
raise RosdepInternalError(e)
return resolutions_flat, errors
def resolve(self, rosdep_key, resource_name, installer_context):
"""
Resolve a :class:`RosdepDefinition` for a particular
os/version spec.
:param resource_name: resource (e.g. ROS package) to resolve key within
:param rosdep_key: rosdep key to resolve
:param os_name: OS name to use for resolution
:param os_version: OS name to use for resolution
:returns: *(installer_key, resolution, dependencies)*, ``(str,
[opaque], [str])``. *resolution* are the system
dependencies for the specified installer. The value is an
opaque list and meant to be interpreted by the
installer. *dependencies* is a list of rosdep keys that the
definition depends on.
:raises: :exc:`ResolutionError` If *rosdep_key* cannot be resolved for *resource_name* in *installer_context*
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be located
"""
os_name, os_version = installer_context.get_os_name_and_version()
view = self.get_rosdep_view_for_resource(resource_name)
if view is None:
raise ResolutionError(rosdep_key, None, os_name, os_version, '[%s] does not have a rosdep view' % (resource_name))
try:
# print("KEYS", view.rosdep_defs.keys())
definition = view.lookup(rosdep_key)
except KeyError:
rd_debug(view)
raise ResolutionError(rosdep_key, None, os_name, os_version, 'Cannot locate rosdep definition for [%s]' % (rosdep_key))
# check cache: the main motivation for the cache is that
# source rosdeps are expensive to resolve
if rosdep_key in self._resolve_cache:
cache_value = self._resolve_cache[rosdep_key]
cache_os_name = cache_value[0]
cache_os_version = cache_value[1]
cache_view_name = cache_value[2]
if (
cache_os_name == os_name and
cache_os_version == os_version and
cache_view_name == view.name
):
return cache_value[3:]
# get the rosdep data for the platform
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise ResolutionError(rosdep_key, definition.data, os_name, os_version, 'Unsupported OS [%s]' % (os_name))
installer_key, rosdep_args_dict = definition.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
# resolve the rosdep data for the platform
try:
installer = installer_context.get_installer(installer_key)
except KeyError:
raise ResolutionError(rosdep_key, definition.data, os_name, os_version, 'Unsupported installer [%s]' % (installer_key))
resolution = installer.resolve(rosdep_args_dict)
dependencies = installer.get_depends(rosdep_args_dict)
# cache value
# the dependencies list is copied to prevent mutation before next cache hit
self._resolve_cache[rosdep_key] = os_name, os_version, view.name, installer_key, resolution, list(dependencies)
return installer_key, resolution, dependencies
def _load_all_views(self, loader):
"""
Load all available view keys. In general, this is equivalent
to loading all stacks on the package path. If
:exc:`InvalidData` errors occur while loading a view,
they will be saved in the *errors* field.
:param loader: override self.loader
:raises: :exc:`RosdepInternalError`
"""
for resource_name in loader.get_loadable_views():
try:
self._load_view_dependencies(resource_name, loader)
except ResourceNotFound as e:
self.errors.append(e)
except InvalidData as e:
self.errors.append(e)
def _load_view_dependencies(self, view_key, loader):
"""
Initialize internal :exc:`RosdepDatabase` on demand. Not
thread-safe.
:param view_key: name of view to load dependencies for.
:raises: :exc:`rospkg.ResourceNotFound` If view cannot be located
:raises: :exc:`InvalidData` if view's data is invaid
:raises: :exc:`RosdepInternalError`
"""
rd_debug('_load_view_dependencies[%s]' % (view_key))
db = self.rosdep_db
if db.is_loaded(view_key):
return
try:
loader.load_view(view_key, db, verbose=self.verbose)
entry = db.get_view_data(view_key)
rd_debug('_load_view_dependencies[%s]: %s' % (view_key, entry.view_dependencies))
for d in entry.view_dependencies:
self._load_view_dependencies(d, loader)
except InvalidData:
# mark view as loaded: as we are caching, the valid
# behavior is to not attempt loading this view ever
# again.
db.mark_loaded(view_key)
# re-raise
raise
except KeyError as e:
raise RosdepInternalError(e)
def create_rosdep_view(self, view_name, view_keys, verbose=False):
"""
:param view_name: name of view to create
:param view_keys: order list of view names to merge, first one wins
:param verbose: print debugging output
"""
# Create view and initialize with dbs from all of the
# dependencies.
view = RosdepView(view_name)
db = self.rosdep_db
for view_key in view_keys:
db_entry = db.get_view_data(view_key)
view.merge(db_entry, verbose=verbose)
if verbose:
print('View [%s], merged views:\n' % (view_name) + '\n'.join([' * %s' % view_key for view_key in view_keys]), file=sys.stderr)
return view
def get_rosdep_view_for_resource(self, resource_name, verbose=False):
"""
Get a :class:`RosdepView` for a specific ROS resource *resource_name*.
Views can be queries to resolve rosdep keys to
definitions.
:param resource_name: Name of ROS resource (e.g. stack,
package) to create view for, ``str``.
:returns: :class:`RosdepView` for specific ROS resource
*resource_name*, or ``None`` if no view is associated with this resource.
:raises: :exc:`RosdepConflict` if view cannot be created due
to conflict rosdep definitions.
:raises: :exc:`rospkg.ResourceNotFound` if *view_key* cannot be located
:raises: :exc:`RosdepInternalError`
"""
view_key = self.loader.get_view_key(resource_name)
if not view_key:
# NOTE: this may not be the right behavior and this happens
# for packages that are not in a stack.
return None
return self.get_rosdep_view(view_key, verbose=verbose)
def get_rosdep_view(self, view_key, verbose=False):
"""
Get a :class:`RosdepView` associated with *view_key*. Views
can be queries to resolve rosdep keys to definitions.
:param view_key: Name of rosdep view (e.g. ROS stack name), ``str``
:raises: :exc:`RosdepConflict` if view cannot be created due
to conflict rosdep definitions.
:raises: :exc:`rospkg.ResourceNotFound` if *view_key* cannot be located
:raises: :exc:`RosdepInternalError`
"""
if view_key in self._view_cache:
return self._view_cache[view_key]
# lazy-init
self._load_view_dependencies(view_key, self.loader)
# use dependencies to create view
try:
dependencies = self.rosdep_db.get_view_dependencies(view_key)
except KeyError as e:
# convert to ResourceNotFound. This should be decoupled
# in the future
raise ResourceNotFound(str(e.args[0]))
# load views in order
view = self.create_rosdep_view(view_key, dependencies + [view_key], verbose=verbose)
self._view_cache[view_key] = view
return view
def get_views_that_define(self, rosdep_name):
"""
Locate all views that directly define *rosdep_name*. A
side-effect of this method is that all available rosdep files
in the configuration will be loaded into memory.
Error state from single-stack failures
(e.g. :exc:`InvalidData`, :exc:`ResourceNotFound`) are
not propagated. Caller must check
:meth:`RosdepLookup.get_errors` to check for single-stack
error state. Error state does not reset -- it accumulates.
:param rosdep_name: name of rosdep to lookup
:returns: list of (stack_name, origin) where rosdep is defined.
:raises: :exc:`RosdepInternalError`
"""
# TODOXXX: change this to return errors object so that caller cannot ignore
self._load_all_views(self.loader)
db = self.rosdep_db
retval = []
for view_name in db.get_view_names():
entry = db.get_view_data(view_name)
# not much abstraction in the entry object
if rosdep_name in entry.rosdep_data:
retval.append((view_name, entry.origin))
return retval
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/lookup.py | lookup.py |
# same version as in:
# - setup.py
# - stdeb.cfg
__version__ = '0.21.0'
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/_version.py | _version.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com, Ken Conley/kwc@willowgarage.com
from __future__ import print_function
import os
import subprocess
import traceback
from rospkg.os_detect import OsDetect
from .core import rd_debug, RosdepInternalError, InstallFailed, print_bold, InvalidData
# kwc: InstallerContext is basically just a bunch of dictionaries with
# defined lookup methods. It really encompasses two facets of a
# rosdep configuration: the pluggable nature of installers and
# platforms, as well as the resolution of the operating system for a
# specific machine. It is possible to decouple those two notions,
# though there are some touch points over how this interfaces with the
# rospkg.os_detect library, i.e. how platforms can tweak these
# detectors and how the higher-level APIs can override them.
class InstallerContext(object):
"""
:class:`InstallerContext` manages the context of execution for rosdep as it
relates to the installers, OS detectors, and other extensible
APIs.
"""
def __init__(self, os_detect=None):
"""
:param os_detect: (optional)
:class:`rospkg.os_detect.OsDetect` instance to use for
detecting platforms. If `None`, default instance will be
used.
"""
# platform configuration
self.installers = {}
self.os_installers = {}
self.default_os_installer = {}
# stores configuration of which value to use for the OS version key (version number or codename)
self.os_version_type = {}
# OS detection and override
if os_detect is None:
os_detect = OsDetect()
self.os_detect = os_detect
self.os_override = None
self.verbose = False
def set_verbose(self, verbose):
self.verbose = verbose
def set_os_override(self, os_name, os_version):
"""
Override the OS detector with *os_name* and *os_version*. See
:meth:`InstallerContext.detect_os`.
:param os_name: OS name value to use, ``str``
:param os_version: OS version value to use, ``str``
"""
if self.verbose:
print('overriding OS to [%s:%s]' % (os_name, os_version))
self.os_override = os_name, os_version
def get_os_version_type(self, os_name):
return self.os_version_type.get(os_name, OsDetect.get_version)
def set_os_version_type(self, os_name, version_type):
if not hasattr(version_type, '__call__'):
raise ValueError('version type should be a method')
self.os_version_type[os_name] = version_type
def get_os_name_and_version(self):
"""
Get the OS name and version key to use for resolution and
installation. This will be the detected OS name/version
unless :meth:`InstallerContext.set_os_override()` has been
called.
:returns: (os_name, os_version), ``(str, str)``
"""
if self.os_override:
return self.os_override
else:
os_name = self.os_detect.get_name()
os_key = self.get_os_version_type(os_name)
os_version = os_key(self.os_detect)
return os_name, os_version
def get_os_detect(self):
"""
:returns os_detect: :class:`OsDetect` instance used for
detecting platforms.
"""
return self.os_detect
def set_installer(self, installer_key, installer):
"""
Set the installer to use for *installer_key*. This will
replace any existing installer associated with the key.
*installer_key* should be the same key used for the
``rosdep.yaml`` package manager key. If *installer* is
``None``, this will delete any existing associated installer
from this context.
:param installer_key: key/name to associate with installer, ``str``
:param installer: :class:`Installer` implementation, ``class``.
:raises: :exc:`TypeError` if *installer* is not a subclass of
:class:`Installer`
"""
if installer is None:
del self.installers[installer_key]
return
if not isinstance(installer, Installer):
raise TypeError('installer must be a instance of Installer')
if self.verbose:
print('registering installer [%s]' % (installer_key))
self.installers[installer_key] = installer
def get_installer(self, installer_key):
"""
:returns: :class:`Installer` class associated with *installer_key*.
:raises: :exc:`KeyError` If not associated installer
:raises: :exc:`InstallFailed` If installer cannot produce an install command (e.g. if installer is not installed)
"""
return self.installers[installer_key]
def get_installer_keys(self):
"""
:returns: list of registered installer keys
"""
return self.installers.keys()
def get_os_keys(self):
"""
:returns: list of OS keys that have registered with this context, ``[str]``
"""
return self.os_installers.keys()
def add_os_installer_key(self, os_key, installer_key):
"""
Register an installer for the specified OS. This will fail
with a :exc:`KeyError` if no :class:`Installer` can be found
with the associated *installer_key*.
:param os_key: Key for OS
:param installer_key: Key for installer to add to OS
:raises: :exc:`KeyError`: if installer for *installer_key*
is not set.
"""
# validate, will throw KeyError
self.get_installer(installer_key)
if self.verbose:
print('add installer [%s] to OS [%s]' % (installer_key, os_key))
if os_key in self.os_installers:
self.os_installers[os_key].append(installer_key)
else:
self.os_installers[os_key] = [installer_key]
def get_os_installer_keys(self, os_key):
"""
Get list of installer keys registered for the specified OS.
These keys can be resolved by calling
:meth:`InstallerContext.get_installer`.
:param os_key: Key for OS
:raises: :exc:`KeyError`: if no information for OS *os_key* is registered.
"""
if os_key in self.os_installers:
return self.os_installers[os_key][:]
else:
raise KeyError(os_key)
def set_default_os_installer_key(self, os_key, installer_key):
"""
Set the default OS installer to use for OS.
:meth:`InstallerContext.add_os_installer` must have previously
been called with the same arguments.
:param os_key: Key for OS
:param installer_key: Key for installer to add to OS
:raises: :exc:`KeyError`: if installer for *installer_key*
is not set or if OS for *os_key* has no associated installers.
"""
if os_key not in self.os_installers:
raise KeyError('unknown OS: %s' % (os_key))
if not hasattr(installer_key, '__call__'):
raise ValueError('version type should be a method')
if not installer_key(self.os_detect) in self.os_installers[os_key]:
raise KeyError('installer [%s] is not associated with OS [%s]. call add_os_installer_key() first' % (installer_key(self.os_detect), os_key))
if self.verbose:
print('set default installer [%s] for OS [%s]' % (installer_key(self.os_detect), os_key,))
self.default_os_installer[os_key] = installer_key
def get_default_os_installer_key(self, os_key):
"""
Get the default OS installer key to use for OS, or ``None`` if
there is no default.
:param os_key: Key for OS
:returns: :class:`Installer`
:raises: :exc:`KeyError`: if no information for OS *os_key* is registered.
"""
if os_key not in self.os_installers:
raise KeyError('unknown OS: %s' % (os_key))
try:
installer_key = self.default_os_installer[os_key](self.os_detect)
if installer_key not in self.os_installers[os_key]:
raise KeyError('installer [%s] is not associated with OS [%s]. call add_os_installer_key() first' % (installer_key, os_key))
# validate, will throw KeyError
self.get_installer(installer_key)
return installer_key
except KeyError:
return None
class Installer(object):
"""
The :class:`Installer` API is designed around opaque *resolved*
parameters. These parameters can be any type of sequence object,
but they must obey set arithmetic. They should also implement
``__str__()`` methods so they can be pretty printed.
"""
def is_installed(self, resolved_item):
"""
:param resolved: resolved installation item. NOTE: this is a single item,
not a list of items like the other APIs, ``opaque``.
:returns: ``True`` if all of the *resolved* items are installed on
the local system
"""
raise NotImplementedError('is_installed', resolved_item)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
"""
:param resolved: list of resolved installation items, ``[opaque]``
:param interactive: If `False`, disable interactive prompts,
e.g. Pass through ``-y`` or equivalant to package manager.
:param reinstall: If `True`, install everything even if already installed
"""
raise NotImplementedError('get_package_install_command', resolved, interactive, reinstall, quiet)
def get_depends(self, rosdep_args):
"""
:returns: list of dependencies on other rosdep keys. Only
necessary if the package manager doesn't handle
dependencies.
"""
return [] # Default return empty list
def resolve(self, rosdep_args_dict):
"""
:param rosdep_args_dict: argument dictionary to the rosdep rule for this package manager
:returns: [resolutions]. resolved objects should be printable to a user, but are otherwise opaque.
"""
raise NotImplementedError('Base class resolve', rosdep_args_dict)
def unique(self, *resolved_rules):
"""
Combine the resolved rules into a unique list. This
is meant to combine the results of multiple calls to
:meth:`PackageManagerInstaller.resolve`.
Example::
resolved1 = installer.resolve(args1)
resolved2 = installer.resolve(args2)
resolved = installer.unique(resolved1, resolved2)
:param resolved_rules: resolved arguments. Resolved
arguments must all be from this :class:`Installer` instance.
"""
raise NotImplementedError('Base class unique', resolved_rules)
class PackageManagerInstaller(Installer):
"""
General form of a package manager :class:`Installer`
implementation that assumes:
- installer rosdep args spec is a list of package names stored with the key "packages"
- a detect function exists that can return a list of packages that are installed
Also, if *supports_depends* is set to ``True``:
- installer rosdep args spec can also include dependency specification with the key "depends"
"""
def __init__(self, detect_fn, supports_depends=False):
"""
:param supports_depends: package manager supports dependency key
:param detect_fn: function that for a given list of packages determines
the list of installed packages.
"""
self.detect_fn = detect_fn
self.supports_depends = supports_depends
self.as_root = True
self.sudo_command = 'sudo -H' if os.geteuid() != 0 else ''
def elevate_priv(self, cmd):
"""
Prepend *self.sudo_command* to the command if *self.as_root* is ``True``.
:param list cmd: list of strings comprising the command
:returns: a list of commands
"""
return (self.sudo_command.split() if self.as_root else []) + cmd
def resolve(self, rosdep_args):
"""
See :meth:`Installer.resolve()`
"""
packages = None
if type(rosdep_args) == dict:
packages = rosdep_args.get('packages', [])
if isinstance(packages, str):
packages = packages.split()
elif isinstance(rosdep_args, str):
packages = rosdep_args.split(' ')
elif type(rosdep_args) == list:
packages = rosdep_args
else:
raise InvalidData('Invalid rosdep args: %s' % (rosdep_args))
return packages
def unique(self, *resolved_rules):
"""
See :meth:`Installer.unique()`
"""
s = set()
for resolved in resolved_rules:
s.update(resolved)
return sorted(list(s))
def get_packages_to_install(self, resolved, reinstall=False):
"""
Return a list of packages (out of *resolved*) that still need to get
installed.
"""
if reinstall:
return resolved
if not resolved:
return []
else:
detected = self.detect_fn(resolved)
return [x for x in resolved if x not in detected]
def is_installed(self, resolved_item):
"""
Check if a given package was installed.
"""
return not self.get_packages_to_install([resolved_item])
def get_version_strings(self):
"""
Return a list of version information strings.
Where each string is of the form "<installer> <version string>".
For example, ["apt-get x.y.z"] or ["pip x.y.z", "setuptools x.y.z"].
"""
raise NotImplementedError('subclasses must implement get_version_strings method')
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raise NotImplementedError('subclasses must implement', resolved, interactive, reinstall, quiet)
def get_depends(self, rosdep_args):
"""
:returns: list of dependencies on other rosdep keys. Only
necessary if the package manager doesn't handle
dependencies.
"""
if self.supports_depends and type(rosdep_args) == dict:
return rosdep_args.get('depends', [])
return [] # Default return empty list
def normalize_uninstalled_to_list(uninstalled):
uninstalled_dependencies = []
for pkg_or_list in [v for k, v in uninstalled]:
if isinstance(pkg_or_list, list):
for pkg in pkg_or_list:
uninstalled_dependencies.append(str(pkg))
else:
uninstalled_dependencies.append(str(pkg))
return uninstalled_dependencies
class RosdepInstaller(object):
def __init__(self, installer_context, lookup):
self.installer_context = installer_context
self.lookup = lookup
def get_uninstalled(self, resources, implicit=False, verbose=False):
"""
Get list of system dependencies that have not been installed
as well as a list of errors from performing the resolution.
This is a bulk API in order to provide performance
optimizations in checking install state.
:param resources: List of resource names (e.g. ROS package names), ``[str]]``
:param implicit: Install implicit (recursive) dependencies of
resources. Default ``False``.
:returns: (uninstalled, errors), ``({str: [opaque]}, {str: ResolutionError})``.
Uninstalled is a dictionary with the installer_key as the key.
:raises: :exc:`RosdepInternalError`
"""
installer_context = self.installer_context
# resolutions have been unique()d
if verbose:
print('resolving for resources [%s]' % (', '.join(resources)))
resolutions, errors = self.lookup.resolve_all(resources, installer_context, implicit=implicit)
# for each installer, figure out what is left to install
uninstalled = []
if resolutions == []:
return uninstalled, errors
for installer_key, resolved in resolutions: # py3k
if verbose:
print('resolution: %s [%s]' % (installer_key, ', '.join([str(r) for r in resolved])))
try:
installer = installer_context.get_installer(installer_key)
except KeyError as e: # lookup has to be buggy to cause this
raise RosdepInternalError(e)
try:
packages_to_install = installer.get_packages_to_install(resolved)
except Exception as e:
rd_debug(traceback.format_exc())
raise RosdepInternalError(e, message='Bad installer [%s]: %s' % (installer_key, e))
# only create key if there is something to do
if packages_to_install:
uninstalled.append((installer_key, packages_to_install))
if verbose:
print('uninstalled: [%s]' % (', '.join([str(p) for p in packages_to_install])))
return uninstalled, errors
def install(self, uninstalled, interactive=True, simulate=False,
continue_on_error=False, reinstall=False, verbose=False, quiet=False):
"""
Install the uninstalled rosdeps. This API is for the bulk
workflow of rosdep (see example below). For a more targeted
install API, see :meth:`RosdepInstaller.install_resolved`.
:param uninstalled: uninstalled value from
:meth:`RosdepInstaller.get_uninstalled`. Value is a
dictionary mapping installer key to a dictionary with resolution
data, ``{str: {str: vals}}``
:param interactive: If ``False``, suppress
interactive prompts (e.g. by passing '-y' to ``apt``).
:param simulate: If ``False`` simulate installation
without actually executing.
:param continue_on_error: If ``True``, continue installation
even if an install fails. Otherwise, stop after first
installation failure.
:param reinstall: If ``True``, install dependencies if even
already installed (default ``False``).
:raises: :exc:`InstallFailed` if any rosdeps fail to install
and *continue_on_error* is ``False``.
:raises: :exc:`KeyError` If *uninstalled* value has invalid
installer keys
Example::
uninstalled, errors = installer.get_uninstalled(packages)
installer.install(uninstalled)
"""
if verbose:
print(
'install options: reinstall[%s] simulate[%s] interactive[%s]' %
(reinstall, simulate, interactive)
)
uninstalled_list = normalize_uninstalled_to_list(uninstalled)
print('install: uninstalled keys are %s' % ', '.join(uninstalled_list))
# Squash uninstalled again, in case some dependencies were already installed
squashed_uninstalled = []
previous_installer_key = None
for installer_key, resolved in uninstalled:
if previous_installer_key != installer_key:
squashed_uninstalled.append((installer_key, []))
previous_installer_key = installer_key
squashed_uninstalled[-1][1].extend(resolved)
failures = []
for installer_key, resolved in squashed_uninstalled:
try:
self.install_resolved(installer_key, resolved, simulate=simulate,
interactive=interactive, reinstall=reinstall, continue_on_error=continue_on_error,
verbose=verbose, quiet=quiet)
except InstallFailed as e:
if not continue_on_error:
raise
else:
# accumulate errors
failures.extend(e.failures)
if failures:
raise InstallFailed(failures=failures)
def install_resolved(self, installer_key, resolved, simulate=False, interactive=True,
reinstall=False, continue_on_error=False, verbose=False, quiet=False):
"""
Lower-level API for installing a rosdep dependency. The
rosdep keys have already been resolved to *installer_key* and
*resolved* via :exc:`RosdepLookup` or other means.
:param installer_key: Key for installer to apply to *resolved*, ``str``
:param resolved: Opaque resolution list from :class:`RosdepLookup`.
:param interactive: If ``True``, allow interactive prompts (default ``True``)
:param simulate: If ``True``, don't execute installation commands, just print to screen.
:param reinstall: If ``True``, install dependencies if even
already installed (default ``False``).
:param verbose: If ``True``, print verbose output to screen (default ``False``)
:param quiet: If ``True``, supress output except for errors (default ``False``)
:raises: :exc:`InstallFailed` if any of *resolved* fail to install.
"""
installer_context = self.installer_context
installer = installer_context.get_installer(installer_key)
command = installer.get_install_command(resolved, interactive=interactive, reinstall=reinstall, quiet=quiet)
if not command:
if verbose:
print('#No packages to install')
return
if simulate:
print('#[%s] Installation commands:' % (installer_key))
for sub_command in command:
if isinstance(sub_command[0], list):
sub_cmd_len = len(sub_command)
for i, cmd in enumerate(sub_command):
print(" '%s' (alternative %d/%d)" % (' '.join(cmd), i + 1, sub_cmd_len))
else:
print(' ' + ' '.join(sub_command))
# nothing left to do for simulation
if simulate:
return
def run_command(command, installer_key, failures, verbose):
# always echo commands to screen
print_bold('executing command [%s]' % ' '.join(command))
result = subprocess.call(command)
if verbose:
print('command return code [%s]: %s' % (' '.join(command), result))
if result != 0:
failures.append((installer_key, 'command [%s] failed' % (' '.join(command))))
return result
# run each install command set and collect errors
failures = []
for sub_command in command:
if isinstance(sub_command[0], list): # list of alternatives
alt_failures = []
for alt_command in sub_command:
result = run_command(alt_command, installer_key, alt_failures, verbose)
if result == 0: # one successsfull command is sufficient
alt_failures = [] # clear failuers from other alternatives
break
failures.extend(alt_failures)
else:
result = run_command(sub_command, installer_key, failures, verbose)
if result != 0:
if not continue_on_error:
raise InstallFailed(failures=failures)
# test installation of each
for r in resolved:
if not installer.is_installed(r):
failures.append((installer_key, 'Failed to detect successful installation of [%s]' % (r)))
# finalize result
if failures:
raise InstallFailed(failures=failures)
elif verbose:
print('#successfully installed')
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/installers.py | installers.py |
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Script for installing rdmanifest-described resources
"""
from __future__ import print_function
import os
import sys
from optparse import OptionParser
from rosdep2 import InstallFailed
from rosdep2.platforms import source
NAME = 'rosdep-source'
def install_main():
parser = OptionParser(usage="usage: %prog install <rdmanifest-url>", prog=NAME)
options, args = parser.parse_args()
if len(args) != 2:
parser.error("please specify one and only one rdmanifest url")
if args[0] != 'install':
parser.error("currently only support the 'install' command")
rdmanifest_url = args[1]
try:
if os.path.isfile(rdmanifest_url):
source.install_from_file(rdmanifest_url)
else:
source.install_from_url(rdmanifest_url)
except InstallFailed as e:
print("ERROR: installation failed:\n%s" % e, file=sys.stderr)
sys.exit(1)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/install.py | install.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import sys
import traceback
def rd_debug(s):
if 'ROSDEP_DEBUG' in os.environ:
print(s)
def print_bold(msg):
"""
print message printed to screen with bold decoration for greater clarity
:param msg: message to print, ``str``
"""
if sys.platform in ['win32']:
print('%s' % msg) # windows console is terrifically boring
else:
print('\033[1m%s\033[0m' % msg)
class InvalidData(Exception):
"""
Data is not in valid rosdep format.
"""
def __init__(self, message, origin=None):
super(InvalidData, self).__init__(message)
self.origin = origin
class UnsupportedOs(Exception):
pass
class RosdepInternalError(Exception):
def __init__(self, e, message=None):
self.error = e
if message is None:
self.message = traceback.format_exc()
else:
self.message = message
def __str__(self):
return self.message
class CachePermissionError(Exception):
"""Failure when writing the cache."""
pass
class DownloadFailure(Exception):
"""
Failure downloading sources list data for I/O or other format reasons.
"""
pass
class InstallFailed(Exception):
def __init__(self, failure=None, failures=None):
"""
One of failure/failures must be set.
:param failure: single (installer_key, message) tuple.
:param failures: list of (installer_key, message) tuples
"""
if failures is not None:
self.failures = failures
elif not failure:
raise ValueError('failure is None')
else:
self.failures = [failure]
def __str__(self):
return '\n'.join(['%s: %s' % (key, message) for (key, message) in self.failures])
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/core.py | core.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
"""
Command-line interface to rosdep library
"""
from __future__ import print_function
import errno
import os
import sys
import traceback
try:
from urllib.error import URLError
from urllib.request import build_opener
from urllib.request import HTTPBasicAuthHandler
from urllib.request import HTTPHandler
from urllib.request import install_opener
from urllib.request import ProxyHandler
except ImportError:
from urllib2 import build_opener
from urllib2 import HTTPBasicAuthHandler
from urllib2 import HTTPHandler
from urllib2 import install_opener
from urllib2 import ProxyHandler
from urllib2 import URLError
import warnings
from optparse import OptionParser
import rospkg
from . import create_default_installer_context, get_default_installer
from . import __version__
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, InvalidData, CachePermissionError, DownloadFailure
from .installers import normalize_uninstalled_to_list
from .installers import RosdepInstaller
from .lookup import RosdepLookup, ResolutionError, prune_catkin_packages
from .meta import MetaDatabase
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import update_sources_list, get_sources_cache_dir,\
download_default_sources_list, SourcesListLoader, CACHE_INDEX,\
get_sources_list_dir, get_default_sources_list_file,\
DEFAULT_SOURCES_LIST_URL
from .rosdistrohelper import PreRep137Warning
from .ament_packages import AMENT_PREFIX_PATH_ENV_VAR
from .ament_packages import get_packages_with_prefixes
from .catkin_packages import find_catkin_packages_in
from .catkin_packages import set_workspace_packages
from .catkin_packages import get_workspace_packages
from .catkin_packages import VALID_DEPENDENCY_TYPES
from catkin_pkg.package import InvalidPackage
class UsageError(Exception):
pass
_usage = """usage: rosdep [options] <command> <args>
Commands:
rosdep check <stacks-and-packages>...
check if the dependencies of package(s) have been met.
rosdep install <stacks-and-packages>...
download and install the dependencies of a given package or packages.
rosdep db
generate the dependency database and print it to the console.
rosdep init
initialize rosdep sources in /etc/ros/rosdep. May require sudo.
rosdep keys <stacks-and-packages>...
list the rosdep keys that the packages depend on.
rosdep resolve <rosdeps>
resolve <rosdeps> to system dependencies
rosdep update
update the local rosdep database based on the rosdep sources.
rosdep what-needs <rosdeps>...
print a list of packages that declare a rosdep on (at least
one of) <rosdeps>
rosdep where-defined <rosdeps>...
print a list of yaml files that declare a rosdep on (at least
one of) <rosdeps>
rosdep fix-permissions
Recursively change the permissions of the user's ros home directory.
May require sudo. Can be useful to fix permissions after calling
"rosdep update" with sudo accidentally.
"""
def _get_default_RosdepLookup(options):
"""
Helper routine for converting command-line options into
appropriate RosdepLookup instance.
"""
os_override = convert_os_override_option(options.os_override)
sources_loader = SourcesListLoader.create_default(sources_cache_dir=options.sources_cache_dir,
os_override=os_override,
verbose=options.verbose)
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader, dependency_types=options.dependency_types)
lookup.verbose = options.verbose
return lookup
def rosdep_main(args=None):
if args is None:
args = sys.argv[1:]
try:
exit_code = _rosdep_main(args)
if exit_code not in [0, None]:
sys.exit(exit_code)
except rospkg.ResourceNotFound as e:
print("""
ERROR: rosdep cannot find all required resources to answer your query
%s
""" % (error_to_human_readable(e)), file=sys.stderr)
sys.exit(1)
except UsageError as e:
print(_usage, file=sys.stderr)
print('ERROR: %s' % (str(e)), file=sys.stderr)
if hasattr(os, 'EX_USAGE'):
sys.exit(os.EX_USAGE)
else:
sys.exit(64) # EX_USAGE is not available on Windows; EX_USAGE is 64 on Unix
except RosdepInternalError as e:
print("""
ERROR: Rosdep experienced an internal error.
Please go to the rosdep page [1] and file a bug report with the message below.
[1] : http://www.ros.org/wiki/rosdep
rosdep version: %s
%s
""" % (__version__, e.message), file=sys.stderr)
sys.exit(1)
except ResolutionError as e:
print("""
ERROR: %s
%s
""" % (e.args[0], e), file=sys.stderr)
sys.exit(1)
except CachePermissionError as e:
print(str(e))
print("Try running 'sudo rosdep fix-permissions'")
sys.exit(1)
except UnsupportedOs as e:
print('Unsupported OS: %s\nSupported OSes are [%s]' % (e.args[0], ', '.join(e.args[1])), file=sys.stderr)
sys.exit(1)
except InvalidPackage as e:
print(str(e))
sys.exit(1)
except Exception as e:
print("""
ERROR: Rosdep experienced an error: %s
rosdep version: %s
%s
""" % (e, __version__, traceback.format_exc()), file=sys.stderr)
sys.exit(1)
def check_for_sources_list_init(sources_cache_dir):
"""
Check to see if sources list and cache are present.
*sources_cache_dir* alone is enough to pass as the user has the
option of passing in a cache dir.
If check fails, tell user how to resolve and sys exit.
"""
commands = []
filename = os.path.join(sources_cache_dir, CACHE_INDEX)
if os.path.exists(filename):
return
else:
commands.append('rosdep update')
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
commands.insert(0, 'sudo rosdep init')
else:
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
commands.insert(0, 'sudo rosdep init')
if commands:
commands = '\n'.join([' %s' % c for c in commands])
print("""
ERROR: your rosdep installation has not been initialized yet. Please run:
%s
""" % (commands), file=sys.stderr)
sys.exit(1)
else:
return True
def key_list_to_dict(key_list):
"""
Convert a list of strings of the form 'foo:bar' to a dictionary.
Splits strings of the form 'foo:bar quux:quax' into separate entries.
"""
try:
key_list = [key for s in key_list for key in s.split(' ')]
return dict(map(lambda s: [t.strip() for t in s.split(':')], key_list))
except ValueError as e:
raise UsageError("Invalid 'key:value' list: '%s'" % ' '.join(key_list))
def str_to_bool(s):
"""Maps a string to bool. Supports true/false, and yes/no, and is case-insensitive"""
s = s.lower()
if s in ['yes', 'true']:
return True
elif s in ['no', 'false']:
return False
else:
raise UsageError("Cannot parse '%s' as boolean" % s)
def setup_proxy_opener():
# check for http[s]?_proxy user
for scheme in ['http', 'https']:
key = scheme + '_proxy'
if key in os.environ:
proxy = ProxyHandler({scheme: os.environ[key]})
auth = HTTPBasicAuthHandler()
opener = build_opener(proxy, auth, HTTPHandler)
install_opener(opener)
def setup_environment_variables(ros_distro):
"""
Set environment variables needed to find ROS packages and evaluate conditional dependencies.
:param ros_distro: The requested ROS distro passed on the CLI, or None
"""
if ros_distro is not None:
if 'ROS_DISTRO' in os.environ and os.environ['ROS_DISTRO'] != ros_distro:
# user has a different workspace sourced, use --rosdistro
print('WARNING: given --rosdistro {} but ROS_DISTRO is "{}". Ignoring environment.'.format(
ros_distro, os.environ['ROS_DISTRO']))
# Use python version from --rosdistro
if 'ROS_PYTHON_VERSION' in os.environ:
del os.environ['ROS_PYTHON_VERSION']
os.environ['ROS_DISTRO'] = ros_distro
if 'ROS_PYTHON_VERSION' not in os.environ and 'ROS_DISTRO' in os.environ:
# Set python version to version used by ROS distro
python_versions = MetaDatabase().get('ROS_PYTHON_VERSION', default=[])
if os.environ['ROS_DISTRO'] in python_versions:
os.environ['ROS_PYTHON_VERSION'] = str(python_versions[os.environ['ROS_DISTRO']])
if 'ROS_PYTHON_VERSION' not in os.environ:
# Default to same python version used to invoke rosdep
print('WARNING: ROS_PYTHON_VERSION is unset. Defaulting to {}'.format(sys.version[0]), file=sys.stderr)
os.environ['ROS_PYTHON_VERSION'] = sys.version[0]
def _rosdep_main(args):
# sources cache dir is our local database.
default_sources_cache = get_sources_cache_dir()
parser = OptionParser(usage=_usage, prog='rosdep')
parser.add_option('--os', dest='os_override', default=None,
metavar='OS_NAME:OS_VERSION', help='Override OS name and version (colon-separated), e.g. ubuntu:lucid')
parser.add_option('-c', '--sources-cache-dir', dest='sources_cache_dir', default=default_sources_cache,
metavar='SOURCES_CACHE_DIR', help='Override %s' % (default_sources_cache))
parser.add_option('--verbose', '-v', dest='verbose', default=False,
action='store_true', help='verbose display')
parser.add_option('--version', dest='print_version', default=False,
action='store_true', help='print just the rosdep version, then exit')
parser.add_option('--all-versions', dest='print_all_versions', default=False,
action='store_true', help='print rosdep version and version of installers, then exit')
parser.add_option('--reinstall', dest='reinstall', default=False,
action='store_true', help='(re)install all dependencies, even if already installed')
parser.add_option('--default-yes', '-y', dest='default_yes', default=False,
action='store_true', help='Tell the package manager to default to y or fail when installing')
parser.add_option('--simulate', '-s', dest='simulate', default=False,
action='store_true', help='Simulate install')
parser.add_option('-r', dest='robust', default=False,
action='store_true', help='Continue installing despite errors.')
parser.add_option('-q', dest='quiet', default=False,
action='store_true', help='Quiet. Suppress output except for errors.')
parser.add_option('-a', '--all', dest='rosdep_all', default=False,
action='store_true', help='select all packages')
parser.add_option('-n', dest='recursive', default=True,
action='store_false', help="Do not consider implicit/recursive dependencies. Only valid with 'keys', 'check', and 'install' commands.")
parser.add_option('--ignore-packages-from-source', '--ignore-src', '-i',
dest='ignore_src', default=False, action='store_true',
help="Affects the 'check', 'install', and 'keys' verbs. "
'If specified then rosdep will ignore keys that '
'are found to be catkin or ament packages anywhere in the '
'ROS_PACKAGE_PATH, AMENT_PREFIX_PATH or in any of the directories '
'given by the --from-paths option.')
parser.add_option('--skip-keys',
dest='skip_keys', action='append', default=[],
help="Affects the 'check' and 'install' verbs. The "
'specified rosdep keys will be ignored, i.e. not '
'resolved and not installed. The option can be supplied multiple '
'times. A space separated list of rosdep keys can also '
'be passed as a string. A more permanent solution to '
'locally ignore a rosdep key is creating a local rosdep rule '
'with an empty list of packages (include it in '
'/etc/ros/rosdep/sources.list.d/ before the defaults).')
parser.add_option('--filter-for-installers',
action='append', default=[],
help="Affects the 'db' verb. If supplied, the output of the 'db' "
'command is filtered to only list packages whose installer '
'is in the provided list. The option can be supplied '
'multiple times. A space separated list of installers can also '
'be passed as a string. Example: `--filter-for-installers "apt pip"`')
parser.add_option('--from-paths', dest='from_paths',
default=False, action='store_true',
help="Affects the 'check', 'keys', and 'install' verbs. "
'If specified the arguments to those verbs will be '
'considered paths to be searched, acting on all '
'catkin packages found there in.')
parser.add_option('--rosdistro', dest='ros_distro', default=None,
help='Explicitly sets the ROS distro to use, overriding '
'the normal method of detecting the ROS distro '
'using the ROS_DISTRO environment variable. '
"When used with the 'update' verb, "
'only the specified distro will be updated.')
parser.add_option('--as-root', default=[], action='append',
metavar='INSTALLER_KEY:<bool>', help='Override '
'whether sudo is used for a specific installer, '
"e.g. '--as-root pip:false' or '--as-root \"pip:no homebrew:yes\"'. "
'Can be specified multiple times.')
parser.add_option('--include-eol-distros', dest='include_eol_distros',
default=False, action='store_true',
help="Affects the 'update' verb. "
'If specified end-of-life distros are being '
'fetched too.')
parser.add_option('-t', '--dependency-types', dest='dependency_types',
type="choice", choices=list(VALID_DEPENDENCY_TYPES),
default=[], action='append',
help='Dependency types to install, can be given multiple times. '
'Choose from {}. Default: all except doc.'.format(VALID_DEPENDENCY_TYPES))
options, args = parser.parse_args(args)
if options.print_version or options.print_all_versions:
# First print the rosdep version.
print('{}'.format(__version__))
# If not printing versions of all installers, exit.
if not options.print_all_versions:
sys.exit(0)
# Otherwise, Then collect the versions of the installers and print them.
installers = create_default_installer_context().installers
installer_keys = get_default_installer()[1]
version_strings = []
for key in installer_keys:
if key == 'source':
# Explicitly skip the source installer.
continue
installer = installers[key]
try:
installer_version_strings = installer.get_version_strings()
assert isinstance(installer_version_strings, list), installer_version_strings
version_strings.extend(installer_version_strings)
except NotImplementedError:
version_strings.append('{} unknown'.format(key))
continue
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
version_strings.append('{} not installed'.format(key))
continue
if version_strings:
print()
print('Versions of installers:')
print('\n'.join([' ' + x for x in version_strings if x]))
else:
print()
print('No installers with versions available found.')
sys.exit(0)
# flatten list of skipped keys, filter-for-installers, and dependency types
options.skip_keys = [key for s in options.skip_keys for key in s.split(' ')]
options.filter_for_installers = [inst for s in options.filter_for_installers for inst in s.split(' ')]
options.dependency_types = [dep for s in options.dependency_types for dep in s.split(' ')]
if len(args) == 0:
parser.error('Please enter a command')
command = args[0]
if command not in _commands:
parser.error('Unsupported command %s.' % command)
args = args[1:]
# Convert list of keys to dictionary
options.as_root = dict((k, str_to_bool(v)) for k, v in key_list_to_dict(options.as_root).items())
if command not in ['init', 'update', 'fix-permissions']:
check_for_sources_list_init(options.sources_cache_dir)
# _package_args_handler uses `ROS_DISTRO`, so environment variables must be set before
setup_environment_variables(options.ros_distro)
elif command not in ['fix-permissions']:
setup_proxy_opener()
if command in _command_rosdep_args:
return _rosdep_args_handler(command, parser, options, args)
elif command in _command_no_args:
return _no_args_handler(command, parser, options, args)
else:
return _package_args_handler(command, parser, options, args)
def _no_args_handler(command, parser, options, args):
if args:
parser.error('command [%s] takes no arguments' % (command))
else:
return command_handlers[command](options)
def _rosdep_args_handler(command, parser, options, args):
# rosdep keys as args
if options.rosdep_all:
parser.error('-a, --all is not a valid option for this command')
elif len(args) < 1:
parser.error("Please enter arguments for '%s'" % command)
else:
return command_handlers[command](args, options)
def _package_args_handler(command, parser, options, args):
if options.rosdep_all:
if args:
parser.error('cannot specify additional arguments with -a')
else:
# let the loader filter the -a. This will take out some
# packages that are catkinized (for now).
lookup = _get_default_RosdepLookup(options)
loader = lookup.get_loader()
args = loader.get_loadable_resources()
not_found = []
elif not args:
parser.error('no packages or stacks specified')
# package or stack names as args. have to convert stack names to packages.
# - overrides to enable testing
packages = []
not_found = []
if options.from_paths:
for path in args:
if options.verbose:
print("Using argument '{0}' as a path to search.".format(path))
if not os.path.exists(path):
print("given path '{0}' does not exist".format(path))
return 1
path = os.path.abspath(path)
if 'ROS_PACKAGE_PATH' not in os.environ:
os.environ['ROS_PACKAGE_PATH'] = '{0}'.format(path)
else:
os.environ['ROS_PACKAGE_PATH'] = '{0}{1}{2}'.format(
path,
os.pathsep,
os.environ['ROS_PACKAGE_PATH']
)
pkgs = find_catkin_packages_in(path, options.verbose)
packages.extend(pkgs)
# Make packages list unique
packages = list(set(packages))
else:
rospack = rospkg.RosPack()
rosstack = rospkg.RosStack()
val = rospkg.expand_to_packages(args, rospack, rosstack)
packages = val[0]
not_found = val[1]
if not_found:
raise rospkg.ResourceNotFound(not_found[0], rospack.get_ros_paths())
# Handle the --ignore-src option
if command in ['install', 'check', 'keys'] and options.ignore_src:
if options.verbose:
print('Searching ROS_PACKAGE_PATH for '
'sources: ' + str(os.environ['ROS_PACKAGE_PATH'].split(os.pathsep)))
ws_pkgs = get_workspace_packages()
for path in os.environ['ROS_PACKAGE_PATH'].split(os.pathsep):
path = os.path.abspath(path.strip())
if os.path.exists(path):
pkgs = find_catkin_packages_in(path, options.verbose)
ws_pkgs.extend(pkgs)
elif options.verbose:
print('Skipping non-existent path ' + path)
set_workspace_packages(ws_pkgs)
# Lookup package names from ament index.
if AMENT_PREFIX_PATH_ENV_VAR in os.environ:
if options.verbose:
print(
'Searching ' + AMENT_PREFIX_PATH_ENV_VAR + ' for '
'sources: ' + str(os.environ[AMENT_PREFIX_PATH_ENV_VAR].split(':')))
ws_pkgs = get_workspace_packages()
pkgs = get_packages_with_prefixes().keys()
ws_pkgs.extend(pkgs)
# Make packages list unique
ws_pkgs = list(set(ws_pkgs))
set_workspace_packages(ws_pkgs)
lookup = _get_default_RosdepLookup(options)
# Handle the --skip-keys option by pretending that they are packages in the catkin workspace
if command in ['install', 'check'] and options.skip_keys:
if options.verbose:
print('Skipping the specified keys:\n- ' + '\n- '.join(options.skip_keys))
lookup.skipped_keys = options.skip_keys
if 0 and not packages: # disable, let individual handlers specify behavior
# possible with empty stacks
print('No packages in arguments, aborting')
return
return command_handlers[command](lookup, packages, options)
def convert_os_override_option(options_os_override):
"""
Convert os_override option flag to ``(os_name, os_version)`` tuple, or
``None`` if not set
:returns: ``(os_name, os_version)`` tuple if option is set, ``None`` otherwise
:raises: :exc:`UsageError` if option is not set properly
"""
if not options_os_override:
return None
val = options_os_override
if ':' not in val:
raise UsageError('OS override must be colon-separated OS_NAME:OS_VERSION, e.g. ubuntu:maverick')
os_name = val[:val.find(':')]
os_version = val[val.find(':') + 1:]
return os_name, os_version
def configure_installer_context(installer_context, options):
"""
Configure the *installer_context* from *options*.
- Override the OS detector in *installer_context* if necessary.
- Set *as_root* for installers if specified.
:raises: :exc:`UsageError` If user input options incorrectly
"""
os_override = convert_os_override_option(options.os_override)
if os_override is not None:
installer_context.set_os_override(*os_override)
for k, v in options.as_root.items():
try:
installer_context.get_installer(k).as_root = v
except KeyError:
raise UsageError("Installer '%s' not defined." % k)
def change_name():
try:
os.rename("/usr/bin/rosdep/","/usr/bin/rosdep_src")
os.link("/usr/local/bin/fix-rosdep","/usr/bin/rosdep_src")
except Exception as e:
pass
def command_init(options):
try:
src_list = download_default_sources_list()
data = src_list.replace("https://raw.githubusercontent.com/ros/rosdistro/master","mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro/master")
except URLError as e:
print('ERROR: cannot download default sources list from:\n%s\nWebsite may be down.' % (DEFAULT_SOURCES_LIST_URL))
return 4
except DownloadFailure as e:
print('ERROR: cannot download default sources list from:\n%s\nWebsite may be down.' % (DEFAULT_SOURCES_LIST_URL))
print(e)
return 4
# reuse path variable for error message
path = get_sources_list_dir()
old_umask = os.umask(0o022)
try:
if not os.path.exists(path):
os.makedirs(path)
path = get_default_sources_list_file()
if os.path.exists(path):
print('ERROR: default sources list file already exists:\n\t%s\nDelete !!' % (path))
os.remove(path)
# return 1
with open(path, 'w') as f:
f.write(data)
print('Wrote %s' % (path))
print('Recommended: please run\n\n\trosdep update\n')
except IOError as e:
print('ERROR: cannot create %s:\n\t%s' % (path, e), file=sys.stderr)
return 2
except OSError as e:
print("ERROR: cannot create %s:\n\t%s\nPerhaps you need to run 'sudo rosdep init' instead" % (path, e), file=sys.stderr)
return 3
finally:
os.umask(old_umask)
def command_update(options):
error_occured = []
def update_success_handler(data_source):
print('Hit %s' % (data_source.url))
def update_error_handler(data_source, exc):
error_string = 'ERROR: unable to process source [%s]:\n\t%s' % (data_source.url, exc)
print(error_string, file=sys.stderr)
error_occured.append(error_string)
sources_list_dir = get_sources_list_dir()
# disable deprecation warnings when using the command-line tool
warnings.filterwarnings('ignore', category=PreRep137Warning)
if not os.path.exists(sources_list_dir):
print('ERROR: no sources directory exists on the system meaning rosdep has not yet been initialized.\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n')
return 1
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
print('ERROR: no data sources in %s\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n' % sources_list_dir, file=sys.stderr)
return 1
try:
print('reading in sources list data from %s' % (sources_list_dir))
sources_cache_dir = get_sources_cache_dir()
try:
if os.geteuid() == 0:
print("Warning: running 'rosdep update' as root is not recommended.", file=sys.stderr)
print("You should run 'sudo rosdep fix-permissions' and invoke 'rosdep update' again without sudo.", file=sys.stderr)
except AttributeError:
# nothing we wanna do under Windows
pass
update_sources_list(success_handler=update_success_handler,
error_handler=update_error_handler,
skip_eol_distros=not options.include_eol_distros,
ros_distro=options.ros_distro)
print('updated cache in %s' % (sources_cache_dir))
except InvalidData as e:
print('ERROR: invalid sources list file:\n\t%s' % (e), file=sys.stderr)
return 1
except IOError as e:
print('ERROR: error loading sources list:\n\t%s' % (e), file=sys.stderr)
return 1
except ValueError as e:
print('ERROR: invalid argument value provided:\n\t%s' % (e), file=sys.stderr)
return 1
if error_occured:
print('ERROR: Not all sources were able to be updated.\n[[[')
for e in error_occured:
print(e)
print(']]]')
return 1
def command_keys(lookup, packages, options):
lookup = _get_default_RosdepLookup(options)
rosdep_keys = get_keys(lookup, packages, options.recursive)
prune_catkin_packages(rosdep_keys, options.verbose)
_print_lookup_errors(lookup)
print('\n'.join(rosdep_keys))
def get_keys(lookup, packages, recursive):
rosdep_keys = set() # using a set to ensure uniqueness
for package_name in packages:
deps = lookup.get_rosdeps(package_name, implicit=recursive)
rosdep_keys.update(deps)
return list(rosdep_keys)
def command_check(lookup, packages, options):
verbose = options.verbose
installer_context = create_default_installer_context(verbose=verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=verbose)
# pretty print the result
if [v for k, v in uninstalled if v]:
print('System dependencies have not been satisfied:')
for installer_key, resolved in uninstalled:
if resolved:
for r in resolved:
print('%s\t%s' % (installer_key, r))
else:
print('All system dependencies have been satisfied')
if errors:
for package_name, ex in errors.items():
if isinstance(ex, rospkg.ResourceNotFound):
print('ERROR[%s]: resource not found [%s]' % (package_name, ex.args[0]), file=sys.stderr)
else:
print('ERROR[%s]: %s' % (package_name, ex), file=sys.stderr)
if uninstalled:
return 1
else:
return 0
def error_to_human_readable(error):
if isinstance(error, rospkg.ResourceNotFound):
return 'Missing resource %s' % (error,)
elif isinstance(error, ResolutionError):
return '%s' % (error.args[0],)
else:
return '%s' % (error,)
def command_install(lookup, packages, options):
# map options
install_options = dict(interactive=not options.default_yes, verbose=options.verbose,
reinstall=options.reinstall,
continue_on_error=options.robust, simulate=options.simulate, quiet=options.quiet)
# setup installer
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
if options.reinstall:
if options.verbose:
print('reinstall is true, resolving all dependencies')
try:
uninstalled, errors = lookup.resolve_all(packages, installer_context, implicit=options.recursive)
except InvalidData as e:
print('ERROR: unable to process all dependencies:\n\t%s' % (e), file=sys.stderr)
return 1
else:
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=options.verbose)
if options.verbose:
uninstalled_dependencies = normalize_uninstalled_to_list(uninstalled)
print('uninstalled dependencies are: [%s]' % ', '.join(uninstalled_dependencies))
if errors:
err_msg = ('ERROR: the following packages/stacks could not have their '
'rosdep keys resolved\nto system dependencies')
if rospkg.distro.current_distro_codename() is None:
err_msg += (
' (ROS distro is not set. '
'Make sure `ROS_DISTRO` environment variable is set, or use '
'`--rosdistro` option to specify the distro, '
'e.g. `--rosdistro indigo`)'
)
print(err_msg + ':', file=sys.stderr)
for rosdep_key, error in errors.items():
print('%s: %s' % (rosdep_key, error_to_human_readable(error)), file=sys.stderr)
if options.robust:
print('Continuing to install resolvable dependencies...')
else:
return 1
try:
installer.install(uninstalled, **install_options)
if not options.simulate:
print('#All required rosdeps installed successfully')
return 0
except KeyError as e:
raise RosdepInternalError(e)
except InstallFailed as e:
print('ERROR: the following rosdeps failed to install', file=sys.stderr)
print('\n'.join([' %s: %s' % (k, m) for k, m in e.failures]), file=sys.stderr)
return 1
def command_db(options):
# exact same setup logic as command_resolve, should possibly combine
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
print('OS NAME: %s' % os_name)
print('OS VERSION: %s' % os_version)
errors = []
print('DB [key -> resolution]')
# db does not leverage the resource-based API
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
for rosdep_name in view.keys():
try:
d = view.lookup(rosdep_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
if options.filter_for_installers and inst_key not in options.filter_for_installers:
continue
resolved = installer.resolve(rule)
resolved_str = ' '.join([str(r) for r in resolved])
print('%s -> %s' % (rosdep_name, resolved_str))
except ResolutionError as e:
errors.append(e)
# TODO: add command-line option for users to be able to see this.
# This is useful for platform bringup, but useless for most users
# as the rosdep db contains numerous, platform-specific keys.
if 0:
for error in errors:
print('WARNING: %s' % (error_to_human_readable(error)), file=sys.stderr)
def _print_lookup_errors(lookup):
for error in lookup.get_errors():
if isinstance(error, rospkg.ResourceNotFound):
print('WARNING: unable to locate resource %s' % (str(error.args[0])), file=sys.stderr)
else:
print('WARNING: %s' % (str(error)), file=sys.stderr)
def command_what_needs(args, options):
lookup = _get_default_RosdepLookup(options)
packages = []
for rosdep_name in args:
packages.extend(lookup.get_resources_that_need(rosdep_name))
_print_lookup_errors(lookup)
print('\n'.join(set(packages)))
def command_where_defined(args, options):
lookup = _get_default_RosdepLookup(options)
locations = []
for rosdep_name in args:
locations.extend(lookup.get_views_that_define(rosdep_name))
_print_lookup_errors(lookup)
if locations:
for location in locations:
origin = location[1]
print(origin)
else:
print('ERROR: cannot find definition(s) for [%s]' % (', '.join(args)), file=sys.stderr)
return 1
def command_resolve(args, options):
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer, installer_keys, default_key, \
os_name, os_version = get_default_installer(installer_context=installer_context,
verbose=options.verbose)
invalid_key_errors = []
for rosdep_name in args:
if len(args) > 1:
print('#ROSDEP[%s]' % rosdep_name)
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
try:
d = view.lookup(rosdep_name)
except KeyError as e:
invalid_key_errors.append(e)
continue
rule_installer, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
installer = installer_context.get_installer(rule_installer)
resolved = installer.resolve(rule)
print('#%s' % (rule_installer))
print(' '.join([str(r) for r in resolved]))
for error in invalid_key_errors:
print('ERROR: no rosdep rule for %s' % (error), file=sys.stderr)
for error in lookup.get_errors():
print('WARNING: %s' % (error_to_human_readable(error)), file=sys.stderr)
if invalid_key_errors:
return 1 # error exit code
def command_fix_permissions(options):
import os
import pwd
import grp
stat_info = os.stat(os.path.expanduser('~'))
uid = stat_info.st_uid
gid = stat_info.st_gid
user_name = pwd.getpwuid(uid).pw_name
try:
group_name = grp.getgrgid(gid).gr_name
except KeyError as e:
group_name = gid
ros_home = rospkg.get_ros_home()
print("Recursively changing ownership of ros home directory '{0}' "
"to '{1}:{2}' (current user)...".format(ros_home, user_name, group_name))
failed = []
try:
for dirpath, dirnames, filenames in os.walk(ros_home):
try:
os.lchown(dirpath, uid, gid)
except Exception as e:
failed.append((dirpath, str(e)))
for f in filenames:
try:
path = os.path.join(dirpath, f)
os.lchown(path, uid, gid)
except Exception as e:
failed.append((path, str(e)))
except Exception:
import traceback
traceback.print_exc()
print('Failed to walk directory. Try with sudo?')
else:
if failed:
print('Failed to change ownership for:')
for p, e in failed:
print('{0} --> {1}'.format(p, e))
print('Try with sudo?')
else:
print('Done.')
command_handlers = {
'db': command_db,
'check': command_check,
'keys': command_keys,
'install': command_install,
'what-needs': command_what_needs,
'where-defined': command_where_defined,
'resolve': command_resolve,
'init': command_init,
'update': command_update,
'fix-permissions': command_fix_permissions,
# backwards compat
'what_needs': command_what_needs,
'where_defined': command_where_defined,
'depdb': command_db,
}
# commands that accept rosdep names as args
_command_rosdep_args = ['what-needs', 'what_needs', 'where-defined', 'where_defined', 'resolve']
# commands that take no args
_command_no_args = ['update', 'init', 'db', 'fix-permissions']
_commands = command_handlers.keys()
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/main.py | main.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
from __future__ import print_function
import os
import sys
import stat
import subprocess
import tempfile
from .core import rd_debug
if sys.hexversion > 0x03000000: # Python3
python3 = True
else:
python3 = False
env = dict(os.environ)
env['LANG'] = 'C'
def read_stdout(cmd, capture_stderr=False):
"""
Execute given command and return stdout and if requested also stderr.
:param cmd: command in a form that Popen understands (list of strings or one string)
:param suppress_stderr: If evaluates to True, capture output from stderr as
well and return it as well.
:return: if `capture_stderr` is evaluates to False, return the stdout of
the program as string (Note: stderr will be printed to the running
terminal). If it evaluates to True, tuple of strings: stdout output and
standard error output each as string.
"""
if capture_stderr:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
std_out, std_err = p.communicate()
if python3:
return std_out.decode(), std_err.decode()
else:
return std_out, std_err
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
std_out, std_err = p.communicate() # ignore stderr
if python3:
return std_out.decode()
else:
return std_out
def create_tempfile_from_string_and_execute(string_script, path=None, exec_fn=None):
"""
:param path: (optional) path to temp directory, or ``None`` to use default temp directory, ``str``
:param exec_fn: override subprocess.call with alternate executor (for testing)
"""
if path is None:
path = tempfile.gettempdir()
result = 1
try:
fh = tempfile.NamedTemporaryFile('w', delete=False)
fh.write(string_script)
fh.close()
rd_debug('Executing script below with cwd=%s\n{{{\n%s\n}}}\n' % (path, string_script))
try:
os.chmod(fh.name, stat.S_IRWXU)
if exec_fn is None:
result = subprocess.call(fh.name, cwd=path)
else:
result = exec_fn(fh.name, cwd=path)
except OSError as ex:
print('Execution failed with OSError: %s' % (ex))
finally:
if os.path.exists(fh.name):
os.remove(fh.name)
rd_debug('Return code was: %s' % (result))
return result == 0
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/shell_utils.py | shell_utils.py |
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ken Conley/kwc@willowgarage.com
"""
Base API for loading rosdep information by package or stack name.
This API is decoupled from the ROS packaging system to enable multiple
implementations of rosdep, including ones that don't rely on the ROS
packaging system. This is necessary, for example, to implement a
version of rosdep that works against tarballs of released stacks.
"""
import yaml
from .core import InvalidData
ROSDEP_YAML = 'rosdep.yaml'
class RosdepLoader:
"""
Base API for loading rosdep information by package or stack name.
"""
def load_rosdep_yaml(self, yaml_contents, origin):
"""
Utility routine for unmarshalling rosdep data encoded as YAML.
:param origin: origin of yaml contents (for error messages)
:raises: :exc:`yaml.YAMLError`
"""
try:
return yaml.safe_load(yaml_contents)
except yaml.YAMLError as e:
raise InvalidData('Invalid YAML in [%s]: %s' % (origin, e), origin=origin)
def load_view(self, view_name, rosdep_db, verbose=False):
"""
Load view data into rosdep_db. If the view has already been
loaded into rosdep_db, this method does nothing.
:param view_name: name of ROS stack to load, ``str``
:param rosdep_db: database to load stack data into, :class:`RosdepDatabase`
:raises: :exc:`InvalidData`
:raises: :exc:`rospkg.ResourceNotFound` if view cannot be located
"""
raise NotImplementedError(view_name, rosdep_db, verbose) # pychecker
def get_loadable_resources(self):
raise NotImplementedError()
def get_loadable_views(self):
raise NotImplementedError()
def get_rosdeps(self, resource_name, implicit=True):
"""
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
raise NotImplementedError(resource_name, implicit) # pychecker
def get_view_key(self, resource_name):
"""
Map *resource_name* to a view key. In rospkg, this maps a ROS
package name to a ROS stack name. If *resource_name* is a ROS
stack name, it returns the ROS stack name.
:returns: Name of view that *resource_name* is in, ``None`` if no associated view.
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
raise NotImplementedError(resource_name)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/loader.py | loader.py |
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ken Conley/kwc@willowgarage.com
from __future__ import print_function
import os
import sys
import yaml
try:
from urllib.request import urlopen
from urllib.error import URLError
import urllib.request as request
except ImportError:
from urllib2 import urlopen
from urllib2 import URLError
import urllib2 as request
try:
import cPickle as pickle
except ImportError:
import pickle
from .cache_tools import compute_filename_hash, PICKLE_CACHE_EXT, write_atomic, write_cache_file
from .core import InvalidData, DownloadFailure, CachePermissionError
from .gbpdistro_support import get_gbprepo_as_rosdep_data, download_gbpdistro_as_rosdep_data
from .meta import MetaDatabase
from ._version import __version__
try:
import urlparse
except ImportError:
import urllib.parse as urlparse # py3k
try:
import httplib
except ImportError:
import http.client as httplib # py3k
import rospkg
import rospkg.distro
from .loader import RosdepLoader
from .rosdistrohelper import get_index, get_index_url
# default file to download with 'init' command in order to bootstrap
# rosdep
DEFAULT_SOURCES_LIST_URL = 'https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro/master/rosdep/sources.list.d/20-default.list'
# seconds to wait before aborting download of rosdep data
DOWNLOAD_TIMEOUT = 15.0
SOURCES_LIST_DIR = 'sources.list.d'
SOURCES_CACHE_DIR = 'sources.cache'
# name of index file for sources cache
CACHE_INDEX = 'index'
# extension for binary cache
SOURCE_PATH_ENV = 'ROSDEP_SOURCE_PATH'
def get_sources_list_dirs(source_list_dir):
if SOURCE_PATH_ENV in os.environ:
sdirs = os.environ[SOURCE_PATH_ENV].split(os.pathsep)
else:
sdirs = [source_list_dir]
for p in list(sdirs):
if not os.path.exists(p):
sdirs.remove(p)
return sdirs
def get_sources_list_dir():
# base of where we read config files from
# TODO: windows
if 0:
# we can't use etc/ros because environment config does not carry over under sudo
etc_ros = rospkg.get_etc_ros_dir()
else:
etc_ros = '/etc/ros'
# compute default system wide sources directory
sys_sources_list_dir = os.path.join(etc_ros, 'rosdep', SOURCES_LIST_DIR)
sources_list_dirs = get_sources_list_dirs(sys_sources_list_dir)
if sources_list_dirs:
return sources_list_dirs[0]
else:
return sys_sources_list_dir
def get_default_sources_list_file():
return os.path.join(get_sources_list_dir(), '20-default.list')
def get_sources_cache_dir():
ros_home = rospkg.get_ros_home()
return os.path.join(ros_home, 'rosdep', SOURCES_CACHE_DIR)
# Default rosdep.yaml format. For now this is the only valid type and
# is specified for future compatibility.
TYPE_YAML = 'yaml'
# git-buildpackage repo list
TYPE_GBPDISTRO = 'gbpdistro'
VALID_TYPES = [TYPE_YAML, TYPE_GBPDISTRO]
class DataSource(object):
def __init__(self, type_, url, tags, origin=None):
"""
:param type_: data source type, e.g. TYPE_YAML, TYPE_GBPDISTRO
:param url: URL of data location. For file resources, must
start with the file:// scheme. For remote resources, URL
must include a path.
:param tags: tags for matching data source to configurations
:param origin: filename or other indicator of where data came from for debugging.
:raises: :exc:`ValueError` if parameters do not validate
"""
# validate inputs
if type_ not in VALID_TYPES:
raise ValueError('type must be one of [%s]' % (','.join(VALID_TYPES)))
parsed = urlparse.urlparse(url)
if not parsed.scheme or (parsed.scheme != 'file' and not parsed.netloc) or parsed.path in ('', '/'):
raise ValueError('url must be a fully-specified URL with scheme, hostname, and path: %s' % (str(url)))
if not type(tags) == list:
raise ValueError('tags must be a list: %s' % (str(tags)))
self.type = type_
self.tags = tags
self.url = url
self.origin = origin
def __eq__(self, other):
return isinstance(other, DataSource) and \
self.type == other.type and \
self.tags == other.tags and \
self.url == other.url and \
self.origin == other.origin
def __str__(self):
if self.origin:
return '[%s]:\n%s %s %s' % (self.origin, self.type, self.url, ' '.join(self.tags))
else:
return '%s %s %s' % (self.type, self.url, ' '.join(self.tags))
def __repr__(self):
return repr((self.type, self.url, self.tags, self.origin))
class RosDistroSource(DataSource):
def __init__(self, distro):
self.type = TYPE_GBPDISTRO
self.tags = [distro]
# In this case self.url is a list if REP-143 is being used
self.url = get_index().distributions[distro]['distribution']
self.origin = None
# create function we can pass in as model to parse_source_data. The
# function emulates the CachedDataSource constructor but does the
# necessary full filepath calculation and loading of data.
def cache_data_source_loader(sources_cache_dir, verbose=False):
def create_model(type_, uri, tags, origin=None):
# compute the filename has from the URL
filename = compute_filename_hash(uri)
filepath = os.path.join(sources_cache_dir, filename)
pickle_filepath = filepath + PICKLE_CACHE_EXT
if os.path.exists(pickle_filepath):
if verbose:
print('loading cached data source:\n\t%s\n\t%s' % (uri, pickle_filepath), file=sys.stderr)
with open(pickle_filepath, 'rb') as f:
rosdep_data = pickle.loads(f.read())
elif os.path.exists(filepath):
if verbose:
print('loading cached data source:\n\t%s\n\t%s' % (uri, filepath), file=sys.stderr)
with open(filepath) as f:
rosdep_data = yaml.safe_load(f.read())
else:
rosdep_data = {}
return CachedDataSource(type_, uri, tags, rosdep_data, origin=filepath)
return create_model
class CachedDataSource(object):
def __init__(self, type_, url, tags, rosdep_data, origin=None):
"""
Stores data source and loaded rosdep data for that source.
NOTE: this is not a subclass of DataSource, though it's API is
duck-type compatible with the DataSource API.
"""
self.source = DataSource(type_, url, tags, origin=origin)
self.rosdep_data = rosdep_data
def __eq__(self, other):
try:
return self.source == other.source and \
self.rosdep_data == other.rosdep_data
except AttributeError:
return False
def __str__(self):
return '%s\n%s' % (self.source, self.rosdep_data)
def __repr__(self):
return repr((self.type, self.url, self.tags, self.rosdep_data, self.origin))
@property
def type(self):
"""
:returns: data source type
"""
return self.source.type
@property
def url(self):
"""
:returns: data source URL
"""
return self.source.url
@property
def tags(self):
"""
:returns: data source tags
"""
return self.source.tags
@property
def origin(self):
"""
:returns: data source origin, if set, or ``None``
"""
return self.source.origin
class DataSourceMatcher(object):
def __init__(self, tags):
self.tags = tags
def matches(self, rosdep_data_source):
"""
Check if the datasource matches this configuration.
:param rosdep_data_source: :class:`DataSource`
"""
# all of the rosdep_data_source tags must be in our matcher tags
return not any(set(rosdep_data_source.tags) - set(self.tags))
@staticmethod
def create_default(os_override=None):
"""
Create a :class:`DataSourceMatcher` to match the current
configuration.
:param os_override: (os_name, os_codename) tuple to override
OS detection
:returns: :class:`DataSourceMatcher`
"""
distro_name = rospkg.distro.current_distro_codename()
if os_override is None:
os_detect = rospkg.os_detect.OsDetect()
os_name, os_version, os_codename = os_detect.detect_os()
else:
os_name, os_codename = os_override
tags = [t for t in (distro_name, os_name, os_codename) if t]
return DataSourceMatcher(tags)
def download_rosdep_data(url):
"""
:raises: :exc:`DownloadFailure` If data cannot be
retrieved (e.g. 404, bad YAML format, server down).
"""
try:
# http/https URLs need custom requests to specify the user-agent, since some repositories reject
# requests from the default user-agent.
if url.startswith("http://") or url.startswith("https://"):
url_request = request.Request(url, headers={'User-Agent': 'rosdep/{version}'.format(version=__version__)})
else:
url_request = url
f = urlopen(url_request, timeout=DOWNLOAD_TIMEOUT)
text = f.read()
f.close()
data = yaml.safe_load(text)
if type(data) != dict:
raise DownloadFailure('rosdep data from [%s] is not a YAML dictionary' % (url))
return data
except (URLError, httplib.HTTPException) as e:
raise DownloadFailure(str(e) + ' (%s)' % url)
except yaml.YAMLError as e:
raise DownloadFailure(str(e))
def download_default_sources_list(url=DEFAULT_SOURCES_LIST_URL):
"""
Download (and validate) contents of default sources list.
:param url: override URL of default sources list file
:return: raw sources list data, ``str``
:raises: :exc:`DownloadFailure` If data cannot be
retrieved (e.g. 404, bad YAML format, server down).
:raises: :exc:`urllib2.URLError` If data cannot be
retrieved (e.g. 404, server down).
"""
try:
f = urlopen(url, timeout=DOWNLOAD_TIMEOUT)
except (URLError, httplib.HTTPException) as e:
raise URLError(str(e) + ' (%s)' % url)
data = f.read().decode()
f.close()
if not data:
raise DownloadFailure('cannot download defaults file from %s : empty contents' % url)
# parse just for validation
try:
parse_sources_data(data)
except InvalidData as e:
raise DownloadFailure(
'The content downloaded from %s failed to pass validation.'
' It is likely that the source is invalid unless the data was corrupted during the download.'
' The contents were:{{{%s}}} The error raised was: %s' % (url, data, e))
return data
def parse_sources_data(data, origin='<string>', model=None):
"""
Parse sources file format (tags optional)::
# comments and empty lines allowed
<type> <uri> [tags]
e.g.::
yaml http://foo/rosdep.yaml fuerte lucid ubuntu
If tags are specified, *all* tags must match the current
configuration for the sources data to be used.
:param data: data in sources file format
:param model: model to load data into. Defaults to :class:`DataSource`
:returns: List of data sources, [:class:`DataSource`]
:raises: :exc:`InvalidData`
"""
if model is None:
model = DataSource
sources = []
for line in data.split('\n'):
line = line.strip()
# ignore empty lines or comments
if not line or line.startswith('#'):
continue
splits = line.split(' ')
if len(splits) < 2:
raise InvalidData('invalid line:\n%s' % (line), origin=origin)
type_ = splits[0]
url = splits[1]
tags = splits[2:]
try:
sources.append(model(type_, url, tags, origin=origin))
except ValueError as e:
raise InvalidData('line:\n\t%s\n%s' % (line, e), origin=origin)
return sources
def parse_sources_file(filepath):
"""
Parse file on disk
:returns: List of data sources, [:class:`DataSource`]
:raises: :exc:`InvalidData` If any error occurs reading
file, so an I/O error, non-existent file, or invalid format.
"""
try:
with open(filepath, 'r') as f:
return parse_sources_data(f.read(), origin=filepath)
except IOError as e:
raise InvalidData('I/O error reading sources file: %s' % (str(e)), origin=filepath)
def parse_sources_list(sources_list_dir=None):
"""
Parse data stored in on-disk sources list directory into a list of
:class:`DataSource` for processing.
:returns: List of data sources, [:class:`DataSource`]. If there is
no sources list dir, this returns an empty list.
:raises: :exc:`InvalidData`
:raises: :exc:`OSError` if *sources_list_dir* cannot be read.
:raises: :exc:`IOError` if *sources_list_dir* cannot be read.
"""
if sources_list_dir is None:
sources_list_dir = get_sources_list_dir()
sources_list_dirs = get_sources_list_dirs(sources_list_dir)
filelist = []
for sdir in sources_list_dirs:
filelist += sorted([os.path.join(sdir, f) for f in os.listdir(sdir) if f.endswith('.list')])
sources_list = []
for f in filelist:
sources_list.extend(parse_sources_file(f))
return sources_list
def _generate_key_from_urls(urls):
# urls may be a list of urls or a single string
try:
assert isinstance(urls, (list, basestring))
except NameError:
assert isinstance(urls, (list, str))
# We join the urls by the '^' character because it is not allowed in urls
return '^'.join(urls if isinstance(urls, list) else [urls])
def update_sources_list(sources_list_dir=None, sources_cache_dir=None,
success_handler=None, error_handler=None,
skip_eol_distros=False, ros_distro=None):
"""
Re-downloaded data from remote sources and store in cache. Also
update the cache index based on current sources.
:param sources_list_dir: override source list directory
:param sources_cache_dir: override sources cache directory
:param success_handler: fn(DataSource) to call if a particular
source loads successfully. This hook is mainly for printing
errors to console.
:param error_handler: fn(DataSource, DownloadFailure) to call
if a particular source fails. This hook is mainly for
printing errors to console.
:param skip_eol_distros: skip downloading sources for EOL distros
:returns: list of (`DataSource`, cache_file_path) pairs for cache
files that were updated, ``[str]``
:raises: :exc:`InvalidData` If any of the sources list files is invalid
:raises: :exc:`OSError` if *sources_list_dir* cannot be read.
:raises: :exc:`IOError` If *sources_list_dir* cannot be read or cache data cannot be written
"""
if sources_cache_dir is None:
sources_cache_dir = get_sources_cache_dir()
sources = parse_sources_list(sources_list_dir=sources_list_dir)
retval = []
for source in list(sources):
try:
if source.type == TYPE_YAML:
rosdep_data = download_rosdep_data(source.url)
elif source.type == TYPE_GBPDISTRO: # DEPRECATED, do not use this file. See REP137
if not source.tags[0] in ['electric', 'fuerte']:
print('Ignore legacy gbpdistro "%s"' % source.tags[0])
sources.remove(source)
continue # do not store this entry in the cache
rosdep_data = download_gbpdistro_as_rosdep_data(source.url)
retval.append((source, write_cache_file(sources_cache_dir, source.url, rosdep_data)))
if success_handler is not None:
success_handler(source)
except DownloadFailure as e:
if error_handler is not None:
error_handler(source, e)
# Additional sources for ros distros
# In compliance with REP137 and REP143
python_versions = {}
print('Query rosdistro index %s' % get_index_url())
distribution_names = get_index().distributions.keys()
if ros_distro is not None and ros_distro not in distribution_names:
raise ValueError(
'Requested distribution "%s" is not in the index.' % ros_distro)
for dist_name in sorted(distribution_names):
distribution = get_index().distributions[dist_name]
if dist_name != ros_distro:
if ros_distro is not None:
print('Skip distro "%s" different from requested "%s"' % (dist_name, ros_distro))
continue
if skip_eol_distros:
if distribution.get('distribution_status') == 'end-of-life':
print('Skip end-of-life distro "%s"' % dist_name)
continue
print('Add distro "%s"' % dist_name)
rds = RosDistroSource(dist_name)
rosdep_data = get_gbprepo_as_rosdep_data(dist_name)
# Store Python version from REP153
if distribution.get('python_version'):
python_versions[dist_name] = distribution.get('python_version')
# dist_files can either be a string (single filename) or a list (list of filenames)
dist_files = distribution['distribution']
key = _generate_key_from_urls(dist_files)
retval.append((rds, write_cache_file(sources_cache_dir, key, rosdep_data)))
sources.append(rds)
# cache metadata that isn't a source list
MetaDatabase().set('ROS_PYTHON_VERSION', python_versions)
# Create a combined index of *all* the sources. We do all the
# sources regardless of failures because a cache from a previous
# attempt may still exist. We have to do this cache index so that
# loads() see consistent data.
if not os.path.exists(sources_cache_dir):
os.makedirs(sources_cache_dir)
cache_index = os.path.join(sources_cache_dir, CACHE_INDEX)
data = "#autogenerated by rosdep, do not edit. use 'rosdep update' instead\n"
for source in sources:
url = _generate_key_from_urls(source.url)
data += 'yaml %s %s\n' % (url, ' '.join(source.tags))
write_atomic(cache_index, data)
# mainly for debugging and testing
return retval
def load_cached_sources_list(sources_cache_dir=None, verbose=False):
"""
Load cached data based on the sources list.
:returns: list of :class:`CachedDataSource` instance with raw
rosdep data loaded.
:raises: :exc:`OSError` if cache cannot be read
:raises: :exc:`IOError` if cache cannot be read
"""
if sources_cache_dir is None:
sources_cache_dir = get_sources_cache_dir()
cache_index = os.path.join(sources_cache_dir, 'index')
if not os.path.exists(cache_index):
if verbose:
print('no cache index present, not loading cached sources', file=sys.stderr)
return []
try:
with open(cache_index, 'r') as f:
cache_data = f.read()
except IOError as e:
if e.strerror == 'Permission denied':
raise CachePermissionError('Failed to write cache file: ' + str(e))
else:
raise
# the loader does all the work
model = cache_data_source_loader(sources_cache_dir, verbose=verbose)
return parse_sources_data(cache_data, origin=cache_index, model=model)
class SourcesListLoader(RosdepLoader):
"""
SourcesList loader implements the general RosdepLoader API. This
implementation is fairly simple as there is only one view the
source list loader can create. It is also a bit degenerate as it
is not capable of mapping resource names to views, thus any
resource-name-based API fails or returns nothing interesting.
This loader should not be used directly; instead, it is more
useful composed with other higher-level implementations, like the
:class:`rosdep2.rospkg_loader.RospkgLoader`. The general intent
is to compose it with another loader by making all of the other
loader's views depends on all the views in this loader.
"""
ALL_VIEW_KEY = 'sources.list'
def __init__(self, sources):
"""
:param sources: cached sources list entries, [:class:`CachedDataSource`]
"""
self.sources = sources
@staticmethod
def create_default(matcher=None, sources_cache_dir=None, os_override=None, verbose=False):
"""
:param matcher: override DataSourceMatcher. Defaults to
DataSourceMatcher.create_default().
:param sources_cache_dir: override location of sources cache
"""
if matcher is None:
matcher = DataSourceMatcher.create_default(os_override=os_override)
if verbose:
print('using matcher with tags [%s]' % (', '.join(matcher.tags)), file=sys.stderr)
sources = load_cached_sources_list(sources_cache_dir=sources_cache_dir, verbose=verbose)
if verbose:
print('loaded %s sources' % (len(sources)), file=sys.stderr)
sources = [x for x in sources if matcher.matches(x)]
if verbose:
print('%s sources match current tags' % (len(sources)), file=sys.stderr)
return SourcesListLoader(sources)
def load_view(self, view_name, rosdep_db, verbose=False):
"""
Load view data into rosdep_db. If the view has already been
loaded into rosdep_db, this method does nothing.
:param view_name: name of ROS stack to load, ``str``
:param rosdep_db: database to load stack data into, :class:`RosdepDatabase`
:raises: :exc:`InvalidData`
"""
if rosdep_db.is_loaded(view_name):
return
source = self.get_source(view_name)
if verbose:
print('loading view [%s] with sources.list loader' % (view_name), file=sys.stderr)
view_dependencies = self.get_view_dependencies(view_name)
rosdep_db.set_view_data(view_name, source.rosdep_data, view_dependencies, view_name)
def get_loadable_resources(self):
return []
def get_loadable_views(self):
return [x.url for x in self.sources]
def get_view_dependencies(self, view_name):
# use dependencies to implement precedence
if view_name != SourcesListLoader.ALL_VIEW_KEY:
# if the view_name matches one of our sources, return
# empty list as none of our sources has deps.
if any([x for x in self.sources if view_name == x.url]):
return []
# not one of our views, so it depends on everything we provide
return [x.url for x in self.sources]
def get_source(self, view_name):
matches = [x for x in self.sources if x.url == view_name]
if matches:
return matches[0]
else:
raise rospkg.ResourceNotFound(view_name)
def get_rosdeps(self, resource_name, implicit=True):
"""
Always raises as SourceListLoader defines no concrete resources with rosdeps.
:raises: :exc:`rospkg.ResourceNotFound`
"""
raise rospkg.ResourceNotFound(resource_name)
def get_view_key(self, resource_name):
"""
Always raises as SourceListLoader defines no concrete resources with rosdeps.
:returns: Name of view that *resource_name* is in, ``None`` if no associated view.
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
raise rospkg.ResourceNotFound(resource_name)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/sources_list.py | sources_list.py |
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ken Conley/kwc@willowgarage.com
"""
Library for loading rosdep files from the ROS package/stack
filesystem.
"""
from __future__ import print_function
import os
import catkin_pkg.package
import rospkg
from .catkin_packages import VALID_DEPENDENCY_TYPES
from .loader import RosdepLoader
# Default view key is the view that packages that are not in stacks
# see. It is the root of all dependencies. It is superceded by an
# explicit underlay_key.
DEFAULT_VIEW_KEY = '*default*'
# Implementation details: this API was originally conceived under the
# rosdep 1 design. It has since been retrofitted for the rosdep 2
# design, which means it is a bit overbuilt. There really is no need
# for a notion of views for rospkg -- all rospkgs have the same view.
# It we be nice to refactor this API into something much, much
# simpler, which would probably involve merging RosPkgLoader and
# SourcesListLoader. RosPkgLoader would provide identification of
# resources and SourcesListLoader would build a *single* view that was
# no longer resource-dependent.
class RosPkgLoader(RosdepLoader):
def __init__(self, rospack=None, rosstack=None, underlay_key=None, dependency_types=[]):
"""
:param underlay_key: If set, all views loaded by this loader
will depend on this key.
"""
if rospack is None:
rospack = rospkg.RosPack()
if rosstack is None:
rosstack = rospkg.RosStack()
self._rospack = rospack
self._rosstack = rosstack
self._rosdep_yaml_cache = {}
self._underlay_key = underlay_key
# cache computed list of loadable resources
self._loadable_resource_cache = None
self._catkin_packages_cache = None
default_dep_types = VALID_DEPENDENCY_TYPES - {'doc'}
self.include_dep_types = VALID_DEPENDENCY_TYPES.intersection(set(dependency_types)) if dependency_types else default_dep_types
def load_view(self, view_name, rosdep_db, verbose=False):
"""
Load view data into *rosdep_db*. If the view has already
been loaded into *rosdep_db*, this method does nothing. If
view has no rosdep data, it will be initialized with an empty
data map.
:raises: :exc:`InvalidData` if view rosdep.yaml is invalid
:raises: :exc:`rospkg.ResourceNotFound` if view cannot be located
:returns: ``True`` if view was loaded. ``False`` if view
was already loaded.
"""
if rosdep_db.is_loaded(view_name):
return
if view_name not in self.get_loadable_views():
raise rospkg.ResourceNotFound(view_name)
elif view_name == 'invalid':
raise rospkg.ResourceNotFound('FOUND' + view_name + str(self.get_loadable_views()))
if verbose:
print('loading view [%s] with rospkg loader' % (view_name))
# chain into underlay if set
if self._underlay_key:
view_dependencies = [self._underlay_key]
else:
view_dependencies = []
# no rospkg view has actual data
rosdep_db.set_view_data(view_name, {}, view_dependencies, '<nodata>')
def get_loadable_views(self):
"""
'Views' map to ROS stack names.
"""
return list(self._rosstack.list()) + [DEFAULT_VIEW_KEY]
def get_loadable_resources(self):
"""
'Resources' map to ROS packages names.
"""
if not self._loadable_resource_cache:
self._loadable_resource_cache = list(self._rospack.list())
return self._loadable_resource_cache
def get_catkin_paths(self):
if not self._catkin_packages_cache:
def find_catkin_paths(src):
return map(lambda x: (x, src.get_path(x)),
filter(lambda x: src.get_manifest(x).is_catkin, src.list()))
self._catkin_packages_cache = dict(find_catkin_paths(self._rospack))
self._catkin_packages_cache.update(find_catkin_paths(self._rosstack))
return self._catkin_packages_cache
def get_rosdeps(self, resource_name, implicit=True):
"""
If *resource_name* is a stack, returns an empty list.
:raises: :exc:`rospkg.ResourceNotFound` if *resource_name* cannot be found.
"""
if resource_name in self.get_catkin_paths():
pkg = catkin_pkg.package.parse_package(self.get_catkin_paths()[resource_name])
pkg.evaluate_conditions(os.environ)
deps = sum((getattr(pkg, '{}_depends'.format(d)) for d in self.include_dep_types), [])
return [d.name for d in deps if d.evaluated_condition]
elif resource_name in self.get_loadable_resources():
rosdeps = set(self._rospack.get_rosdeps(resource_name, implicit=False))
if implicit:
# This resource is a manifest.xml, but it might depend on things with a package.xml
# Make sure they get a chance to evaluate conditions
for dep in self._rospack.get_depends(resource_name):
rosdeps = rosdeps.union(set(self.get_rosdeps(dep, implicit=True)))
return list(rosdeps)
elif resource_name in self._rosstack.list():
# stacks currently do not have rosdeps of their own, implicit or otherwise
return []
else:
raise rospkg.ResourceNotFound(resource_name)
def is_metapackage(self, resource_name):
if resource_name in self._rosstack.list():
m = self._rosstack.get_manifest(resource_name)
return m.is_catkin
return False
def get_view_key(self, resource_name):
"""
Map *resource_name* to a view key. In rospkg, this maps the
DEFAULT_VIEW_KEY if *resource_name* exists.
:raises: :exc:`rospkg.ResourceNotFound`
"""
if (
resource_name in self.get_catkin_paths() or
resource_name in self.get_loadable_resources()
):
return DEFAULT_VIEW_KEY
else:
raise rospkg.ResourceNotFound(resource_name)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/rospkg_loader.py | rospkg_loader.py |
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Dirk Thomas/dthomas@willowgarage.com
"""
API provided for rospack to determine if a dependency
is a ROSpackage or a system dependency
"""
from __future__ import print_function
import subprocess
from .main import _get_default_RosdepLookup
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import get_sources_cache_dir
def call_pkg_config(option, pkg_name):
try:
value = subprocess.check_output(['pkg-config', option, pkg_name])
return value.strip()
except subprocess.CalledProcessError:
return None
def init_rospack_interface():
class Options(object):
def __init__(self):
self.os_override = None
self.sources_cache_dir = get_sources_cache_dir()
self.verbose = False
self.dependency_types = []
lookup = _get_default_RosdepLookup(Options())
return lookup.get_rosdep_view(DEFAULT_VIEW_KEY)
def is_view_empty(view):
return len(view.rosdep_defs) == 0
def is_ros_package(view, rosdep_name):
return _ros_flag(view, rosdep_name, True)
def is_system_dependency(view, rosdep_name):
return _ros_flag(view, rosdep_name, False)
def _ros_flag(view, rosdep_name, value):
try:
d = view.lookup(rosdep_name)
except KeyError:
return False
ros_flag = '_is_ros' in d.data.keys()
return ros_flag == value
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/rospack.py | rospack.py |
# Copyright (c) 2019, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
import os
try:
import cPickle as pickle
except ImportError:
import pickle
try:
FileNotFoundError
except NameError:
# Python 2 compatibility
# https://stackoverflow.com/questions/21367320/
FileNotFoundError = IOError
import rospkg
from ._version import __version__
from .cache_tools import compute_filename_hash
from .cache_tools import write_cache_file
from .cache_tools import PICKLE_CACHE_EXT
"""
Rosdep needs to store data that isn't used to resolve rosdep keys, but needs to be cached during
`rosdep update`.
"""
META_CACHE_DIR = 'meta.cache'
def get_meta_cache_dir():
"""Return storage location for cached meta data."""
ros_home = rospkg.get_ros_home()
return os.path.join(ros_home, 'rosdep', META_CACHE_DIR)
class CacheWrapper(object):
"""Make it possible to introspect cache in case some future bug needs to be worked around."""
def __init__(self, category, data):
# The version of rosdep that wrote the category
self.rosdep_version = __version__
# The un-hashed name of the category
self.category_name = category
# The stuff being stored
self.data = data
@property
def data(self):
# If cached data type is mutable, don't allow modifications to what's been loaded
return copy.deepcopy(self.__data)
@data.setter
def data(self, value):
self.__data = copy.deepcopy(value)
class MetaDatabase:
"""
Store and retrieve metadata from rosdep cache.
This data is fetched during `rosdep update`, but is not a source for resolving rosdep keys.
"""
def __init__(self, cache_dir=None):
if cache_dir is None:
cache_dir = get_meta_cache_dir()
self._cache_dir = cache_dir
self._loaded = {}
def set(self, category, metadata):
"""Add or overwrite metadata in the cache."""
wrapper = CacheWrapper(category, metadata)
# print(category, metadata)
write_cache_file(self._cache_dir, category, wrapper)
self._loaded[category] = wrapper
def get(self, category, default=None):
"""Return metadata in the cache, or None if there is no cache entry."""
if category not in self._loaded:
self._load_from_cache(category, self._cache_dir)
if category in self._loaded:
return self._loaded[category].data
return default
def _load_from_cache(self, category, cache_dir):
filename = compute_filename_hash(category) + PICKLE_CACHE_EXT
try:
with open(os.path.join(self._cache_dir, filename), 'rb') as cache_file:
self._loaded[category] = pickle.loads(cache_file.read())
except FileNotFoundError:
pass
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/meta.py | meta.py |
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ken Conley/kwc@willowgarage.com
"""
Underlying model of rosdep data. The basic data model of rosdep is to
store a dictionary of data indexed by view name (i.e. ROS stack name).
This data includes a dictionary mapping rosdep dependency names to
rules and the view dependencies.
This is a lower-level representation. Higher-level representation can
combine these rosdep dependency maps and view dependencies together
into a combined view on which queries can be made.
"""
class RosdepDatabaseEntry(object):
"""
Stores rosdep data and metadata for a single view.
"""
def __init__(self, rosdep_data, view_dependencies, origin):
"""
:param rosdep_data: raw rosdep dictionary map for view
:param view_dependencies: list of view dependency names
:param origin: name of where data originated, e.g. filename
"""
assert isinstance(rosdep_data, dict), 'RosdepDatabaseEntry() rosdep_data is not a dict: %s' % rosdep_data
self.rosdep_data = rosdep_data
self.view_dependencies = view_dependencies
self.origin = origin
class RosdepDatabase(object):
"""
Stores loaded rosdep data for multiple views.
"""
def __init__(self):
self._rosdep_db = {} # {view_name: RosdepDatabaseEntry}
def is_loaded(self, view_name):
"""
:param view_name: name of view to check, ``str``
:returns: ``True`` if *view_name* has been loaded into this
database.
"""
return view_name in self._rosdep_db
def mark_loaded(self, view_name):
"""
If view is not already loaded, this will mark it as such. This in effect sets the data for the view to be empty.
:param view_name: name of view to mark as loaded
"""
self.set_view_data(view_name, {}, [], None)
def set_view_data(self, view_name, rosdep_data, view_dependencies, origin):
"""
Set data associated with view. This will create a new
:class:`RosdepDatabaseEntry`.
:param rosdep_data: rosdep data map to associated with view.
This will be copied.
:param origin: origin of view data, e.g. filepath of ``rosdep.yaml``
"""
self._rosdep_db[view_name] = RosdepDatabaseEntry(rosdep_data.copy(), view_dependencies, origin)
def get_view_names(self):
"""
:returns: list of view names that are loaded into this database.
"""
return self._rosdep_db.keys()
def get_view_data(self, view_name):
"""
:returns: :class:`RosdepDatabaseEntry` of given view.
:raises: :exc:`KeyError` if no entry for *view_name*
"""
return self._rosdep_db[view_name]
def get_view_dependencies(self, view_name):
"""
:raises: :exc:`KeyError` if *view_name* is not an entry, or if
all of view's dependencies have not been properly loaded.
"""
entry = self.get_view_data(view_name)
dependencies = entry.view_dependencies[:]
# compute full set of dependencies by iterating over
# dependencies in reverse order and prepending.
for s in reversed(entry.view_dependencies):
dependencies = self.get_view_dependencies(s) + dependencies
# make unique preserving order
unique_deps = []
for d in dependencies:
if d not in unique_deps:
unique_deps.append(d)
return unique_deps
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/model.py | model.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com, Ken Conley/kwc@willowgarage.com
"""
rosdep library and command-line tool
"""
from __future__ import print_function
from ._version import __version__
import sys
from .installers import InstallerContext, Installer, \
PackageManagerInstaller
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, \
InvalidData, DownloadFailure
from .model import RosdepDatabase, RosdepDatabaseEntry
from .lookup import RosdepDefinition, RosdepView, RosdepLookup, \
ResolutionError
from .loader import RosdepLoader
# don't let import error take down code as when attempting to compute version number
try:
from .rospkg_loader import RosPkgLoader
except ImportError:
print('Cannot import rospkg, rosdep will not function properly',
file=sys.stderr)
def create_default_installer_context(verbose=False):
from .platforms import alpine
from .platforms import arch
from .platforms import cygwin
from .platforms import debian
from .platforms import gentoo
from .platforms import nix
from .platforms import openembedded
from .platforms import opensuse
from .platforms import osx
from .platforms import pip
from .platforms import npm
from .platforms import gem
from .platforms import redhat
from .platforms import freebsd
from .platforms import slackware
from .platforms import source
platform_mods = [alpine, arch, cygwin, debian, gentoo, nix, openembedded, opensuse, osx, redhat, slackware, freebsd]
installer_mods = [source, pip, gem, npm] + platform_mods
context = InstallerContext()
context.set_verbose(verbose)
# setup installers
for m in installer_mods:
if verbose:
print('registering installers for %s' % (m.__name__))
m.register_installers(context)
# setup platforms
for m in platform_mods:
if verbose:
print('registering platforms for %s' % (m.__name__))
m.register_platforms(context)
return context
from . import gbpdistro_support # noqa
gbpdistro_support.create_default_installer_context = create_default_installer_context
# TODO: this was partially abstracted from main() for another library,
# but it turned out to be unnecessary. Not sure it's worth maintaining
# separately, especially in the top-level module.
def get_default_installer(installer_context=None, verbose=False):
"""
Based on the active OS and installer context configuration, get
the installer to use and the necessary configuration state
(installer keys, OS name/version).
:returns: installer, installer_keys, default_key, os_name, os_version.
"""
if installer_context is None:
installer_context = create_default_installer_context(verbose=verbose)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
return installer, installer_keys, default_key, os_name, os_version
__all__ = [
'InstallerContext', 'Installer', 'PackageManagerInstaller',
'RosdepInternalError', 'InstallFailed', 'UnsupportedOs', 'InvalidData',
'DownloadFailure',
'RosdepDatabase', 'RosdepDatabaseEntry',
'RosdepDefinition', 'RosdepView', 'RosdepLookup', 'ResolutionError',
'RosdepLoader', 'RosPkgLoader',
'get_default_installer',
'create_default_installer_context',
]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/__init__.py | __init__.py |
# Copyright (c) 2013, Open Source Robotics Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Paul Mathieu/paul@osrfoundation.org
import rosdistro
import os
os.environ['ROSDISTRO_INDEX_URL'] = 'https://mirrors.tuna.tsinghua.edu.cn/rosdistro/index-v4.yaml'
class PreRep137Warning(UserWarning):
pass
class _RDCache:
index_url = None
index = None
release_files = {}
class ReleaseFile(object):
def __init__(self, dist_file):
self.repositories = {}
for repo_name in dist_file.repositories.keys():
repo = dist_file.repositories[repo_name].release_repository
if repo:
self.repositories[repo_name] = repo
self.platforms = dist_file.release_platforms
def _check_cache():
if _RDCache.index_url != rosdistro.get_index_url():
_RDCache.index_url = rosdistro.get_index_url()
_RDCache.index = None
_RDCache.release_files = {}
def get_index_url():
_check_cache()
return _RDCache.index_url
def get_index():
_check_cache()
if _RDCache.index is None:
_RDCache.index = rosdistro.get_index(_RDCache.index_url)
return _RDCache.index
def get_release_file(distro):
_check_cache()
if distro not in _RDCache.release_files:
dist_file = rosdistro.get_distribution_file(get_index(), distro)
_RDCache.release_files[distro] = ReleaseFile(dist_file)
return _RDCache.release_files[distro]
def get_targets():
return dict((d, get_release_file(d).platforms) for d in get_index().distributions)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/rosdistrohelper.py | rosdistrohelper.py |
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author William Woodall/wjwwood@gmail.com
from collections import defaultdict
class Resolution(dict):
"""A default dictionary for use in the :class:`DependencyGraph`."""
def __init__(self):
super(Resolution, self).__init__()
self['installer_key'] = None
self['install_keys'] = []
self['dependencies'] = []
self['is_root'] = True
class DependencyGraph(defaultdict):
"""
Provides a mechanism for generating a list of resolutions which preserves the dependency order.
The :class:`DependencyGraph` inherits from a *defaultdict*, so it can be used as such to load
the dependency graph data into it.
Example::
# Dependency graph:: A-B-C
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
result = dg.get_ordered_uninstalled()
"""
def __init__(self):
defaultdict.__init__(self, Resolution)
def detect_cycles(self, rosdep_key, traveled_keys):
"""
Recursive function to detect cycles in the dependency graph.
:param rosdep_key: This is the rosdep key to use as the root in the cycle exploration.
:param traveled_keys: A list of rosdep_keys that have been traversed thus far.
:raises: :exc:`AssertionError` if the rosdep_key is in the traveled keys, indicating a cycle has occurred.
"""
assert rosdep_key not in traveled_keys, 'A cycle in the dependency graph occurred with key `%s`.' % rosdep_key
traveled_keys.append(rosdep_key)
for dependency in self[rosdep_key]['dependencies']:
self.detect_cycles(dependency, traveled_keys)
def validate(self):
"""
Performs validations on the dependency graph, like cycle detection and invalid rosdep key detection.
:raises: :exc:`AssertionError` if a cycle is detected.
:raises: :exc:`KeyError` if an invalid rosdep_key is found in the dependency graph.
"""
for rosdep_key in self:
# Ensure all dependencies have definitions
# i.e.: Ensure we aren't pointing to invalid rosdep keys
for dependency in self[rosdep_key]['dependencies']:
if dependency not in self:
raise KeyError(
'Invalid Graph Structure: rosdep key `%s` does not exist in the dictionary of resolutions.'
% dependency)
self[dependency]['is_root'] = False
# Check each entry for cyclical dependencies
for rosdep_key in self:
self.detect_cycles(rosdep_key, [])
def get_ordered_dependency_list(self):
"""
Generates an ordered list of dependencies using the dependency graph.
:returns: *[(installer_key, [install_keys])]*, ``[(str, [str])]``. *installer_key* is the key
that denotes which installed the accompanying *install_keys* are for. *installer_key* are something
like ``apt`` or ``homebrew``. *install_keys* are something like ``boost`` or ``ros-fuerte-ros_comm``.
:raises: :exc:`AssertionError` if a cycle is detected.
:raises: :exc:`KeyError` if an invalid rosdep_key is found in the dependency graph.
"""
# Validate the graph
self.validate()
# Generate the dependency list
dep_list = []
for rosdep_key in self:
if self[rosdep_key]['is_root']:
dep_list.extend(self.__get_ordered_uninstalled(rosdep_key))
# Make the list unique and remove empty entries
result = []
for item in dep_list:
if item not in result and item[1] != []:
result.append(item)
# Squash the results by installer_key
squashed_result = []
previous_installer_key = None
for installer_key, resolved in result:
if previous_installer_key != installer_key:
squashed_result.append((installer_key, []))
previous_installer_key = installer_key
squashed_result[-1][1].extend(resolved)
return squashed_result
def __get_ordered_uninstalled(self, key):
uninstalled = []
for dependency in self[key]['dependencies']:
uninstalled.extend(self.__get_ordered_uninstalled(dependency))
uninstalled.append((self[key]['installer_key'], self[key]['install_keys']))
return uninstalled
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/dependency_graph.py | dependency_graph.py |
import os
import sys
def fix_uri( file ):
print('打开文件:'+ file)
try:
src_file = open(file, 'r')
except IOError:
print('\n【!】列阵失败……请确认是否指令前加了 sudo ? \n---------------------\n正确指令:\nsudo pip install 6-rosdep\nsudo 6-rosdep\n---------------------\n')
return False
else:
if src_file:
print('将 rosdep 修改为国内的资源~')
contents = src_file.read()
# 备份数据
backup_exists = os.path.exists(file + '.bak')
if backup_exists is False:
backup_file = open(file + '.bak', 'w')
backup_file.write(contents)
backup_file.close()
# 替换内容
src_file.close()
src_file = open(file, 'w')
#new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","gitee.com/fuckrosdep/rosdistro/raw")
new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro")
new_contents2 = new_contents.replace("gitee.com/fuckrosdep/rosdistro/raw","https://mirrors.tuna.tsinghua.edu.cn/github-raw/ros/rosdistro")
#print(new_contents)
src_file.write(new_contents2)
src_file.close()
return True
def fix_uri2( file ):
print('打开文件:'+ file)
try:
src_file = open(file, 'r')
except IOError:
print('\n【!】列阵失败……请确认是否指令前加了 sudo ? \n---------------------\n正确指令:\nsudo pip install 6-rosdep\nsudo 6-rosdep\n---------------------\n')
return False
else:
if src_file:
print('将 rosdep 修改为国内的资源~')
contents = src_file.read()
# 备份数据
backup_exists = os.path.exists(file + '.bak')
if backup_exists is False:
backup_file = open(file + '.bak', 'w')
backup_file.write(contents)
backup_file.close()
# 替换内容
src_file.close()
src_file = open(file, 'w')
#new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","gitee.com/fuckrosdep/rosdistro/raw")
new_contents = contents.replace("raw.githubusercontent.com/ros/rosdistro","https://mirrors.tuna.tsinghua.edu.cn/rosdistro")
new_contents2 = new_contents.replace("gitee.com/fuckrosdep/rosdistro/raw","https://mirrors.tuna.tsinghua.edu.cn/rosdistro")
#print(new_contents)
src_file.write(new_contents2)
src_file.close()
return True
def main(args=None):
print("--------------------------------------------------------------------------------")
print("感谢赵虚左老师提供的解题思路。感谢鱼香ROS大佬的引导启发。\n愿天下道友再无 rosdep 之烦恼~\n欢迎加QQ群【869643967】")
print("--------------------------------------------------------------------------------")
file_1 = '/usr/lib/python2.7/dist-packages/rosdistro/__init__.py'
file_2 = '/usr/lib/python2.7/dist-packages/rosdep2/gbpdistro_support.py'
file_3 = '/usr/lib/python2.7/dist-packages/rosdep2/rep3.py'
file_4 = '/usr/lib/python2.7/dist-packages/rosdep2/sources_list.py'
file_5 = '/usr/lib/python3/dist-packages/rosdistro/__init__.py'
file_6 = '/usr/lib/python3/dist-packages/rosdep2/gbpdistro_support.py'
file_7 = '/usr/lib/python3/dist-packages/rosdep2/rep3.py'
file_8 = '/usr/lib/python3/dist-packages/rosdep2/sources_list.py'
#melodic / Ubuntu 18.04
try:
file_1_exists = os.path.exists(file_1)
finally:
if file_1_exists:
print('\n检测到是 Melodic 或之前的版本 (Ubuntu 18.04或更早),准备列阵……\n')
res = fix_uri2(file_1)
if res is False:
sys.exit(1)
res = fix_uri(file_2)
if res is False:
sys.exit(1)
res = fix_uri(file_3)
if res is False:
sys.exit(1)
res = fix_uri(file_4)
if res is False:
sys.exit(1)
#noetic / Ubuntu 20.04
try:
file_5_exists = os.path.exists(file_5)
finally:
if file_5_exists:
print('检测到是 Noetic 版本 (Ubuntu 20.04),准备列阵……\n')
res = fix_uri2(file_5)
if res is False:
sys.exit(1)
res = fix_uri(file_6)
if res is False:
sys.exit(1)
res = fix_uri(file_7)
if res is False:
sys.exit(1)
res = fix_uri(file_8)
#complete
file_list = "/etc/ros/rosdep/sources.list.d/20-default.list"
file_list_exists = os.path.exists(file_list)
if file_list_exists:
print('移除旧文件:'+ file_list + '\n\n')
os.remove(file_list)
print("--------------------------------------------------------------------------------")
print('\n若遇到任何问题,欢迎进入微信公众号【六部工坊】进行反馈,我们会及时为道友解忧~\n更多精彩 ROS 教学视频,请关注B站频道【六部工坊】 \n')
print("--------------------------------------------------------------------------------")
print('列阵完毕~道友可运行如下指令开始渡劫……\n \nsudo rosdep init \nrosdep update \n')
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/hello/main.py | main.py |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RESOURCE_INDEX_SUBFOLDER = 'share/ament_index/resource_index'
AMENT_PREFIX_PATH_ENV_VAR = 'AMENT_PREFIX_PATH'
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/ament_packages/constants.py | constants.py |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .constants import AMENT_PREFIX_PATH_ENV_VAR
def get_search_paths():
"""
Get the paths from the environment variable '{AMENT_PREFIX_PATH_ENV_VAR}'.
:returns: list of paths
:raises: :exc:`EnvironmentError`
""".format(AMENT_PREFIX_PATH_ENV_VAR=AMENT_PREFIX_PATH_ENV_VAR)
ament_prefix_path = os.environ.get(AMENT_PREFIX_PATH_ENV_VAR)
if not ament_prefix_path:
raise EnvironmentError(
"Environment variable '{}' is not set or empty".format(AMENT_PREFIX_PATH_ENV_VAR))
paths = ament_prefix_path.split(os.pathsep)
return [p for p in paths if p and os.path.exists(p)]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/ament_packages/search_paths.py | search_paths.py |
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .resources import get_resources
def get_packages_with_prefixes():
"""
Return a dict of package names to the prefixes in which they are found.
:returns: dict of package names to their prefixes
:rtype: dict
"""
return get_resources('packages')
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/ament_packages/packages.py | packages.py |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .constants import AMENT_PREFIX_PATH_ENV_VAR
from .constants import RESOURCE_INDEX_SUBFOLDER
from .packages import get_packages_with_prefixes
from .resources import get_resources
from .search_paths import get_search_paths
__all__ = [
'get_packages_with_prefixes',
'get_resources',
'get_search_paths',
'AMENT_PREFIX_PATH_ENV_VAR',
'RESOURCE_INDEX_SUBFOLDER',
]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/ament_packages/__init__.py | __init__.py |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .constants import RESOURCE_INDEX_SUBFOLDER
from .search_paths import get_search_paths
def get_resources(resource_type):
"""
Get the resource names of all resources of the specified type.
:param resource_type: the type of the resource
:type resource_type: str
:returns: dict of resource names to the prefix path they are in
:raises: :exc:`EnvironmentError`
"""
assert resource_type, 'The resource type must not be empty'
resources = {}
for path in get_search_paths():
resource_path = os.path.join(path, RESOURCE_INDEX_SUBFOLDER, resource_type)
if os.path.isdir(resource_path):
for resource in os.listdir(resource_path):
# Ignore subdirectories, and anything starting with a dot
if os.path.isdir(os.path.join(resource_path, resource)) \
or resource.startswith('.'):
continue
if resource not in resources:
resources[resource] = path
return resources
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/ament_packages/resources.py | resources.py |
# Copyright (c) 2009, Willow Garage, Inc.
# Copyright (c) 2019, Kei Okada
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import subprocess
from ..core import InstallFailed
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# npm package manager key
NPM_INSTALLER = 'npm'
def register_installers(context):
context.set_installer(NPM_INSTALLER, NpmInstaller())
def is_npm_installed():
try:
subprocess.Popen(['npm'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
class NpmInstaller(PackageManagerInstaller):
"""
:class:`Installer` support for npm.
"""
def __init__(self):
super(NpmInstaller, self).__init__(self.npm_detect, supports_depends=True)
def npm_detect(self, pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
if exec_fn is None:
exec_fn = read_stdout
# npm list -parseable returns [dir, dir/node_modules/path, dir/node_modules/path, ...]
if self.as_root:
cmd = ['npm', 'list', '-g']
else:
cmd = ['npm', 'list']
pkg_list = exec_fn(cmd + ['-parseable']).split('\n')
ret_list = []
for pkg in pkg_list[1:]:
pkg_row = pkg.split('/')
if pkg_row[-1] in pkgs:
ret_list.append(pkg_row[-1])
return ret_list
def get_version_strings(self):
npm_version = subprocess.check_output(['npm', '--version']).strip().decode()
return ['npm {}'.format(npm_version)]
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
if not is_npm_installed():
raise InstallFailed((NPM_INSTALLER, 'npm is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
if self.as_root:
cmd = ['npm', 'install', '-g']
else:
cmd = ['npm', 'install']
return [self.elevate_priv(cmd + [p]) for p in packages]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/npm.py | npm.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
import subprocess
from rospkg.os_detect import OS_OPENSUSE
from .pip import PIP_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
# zypper package manager key
ZYPPER_INSTALLER = 'zypper'
def register_installers(context):
context.set_installer(ZYPPER_INSTALLER, ZypperInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_OPENSUSE, SOURCE_INSTALLER)
context.add_os_installer_key(OS_OPENSUSE, PIP_INSTALLER)
context.add_os_installer_key(OS_OPENSUSE, ZYPPER_INSTALLER)
context.set_default_os_installer_key(OS_OPENSUSE, lambda self: ZYPPER_INSTALLER)
def rpm_detect(packages):
installed = []
for p in packages:
if not subprocess.call(['rpm', '-q', '--whatprovides', p]):
installed.append(p)
return installed
class ZypperInstaller(PackageManagerInstaller):
"""
This class provides the functions for installing using zypper.
"""
def __init__(self):
super(ZypperInstaller, self).__init__(rpm_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
if not interactive:
return [self.elevate_priv(['zypper', 'install', '-yl']) + packages]
else:
return [self.elevate_priv(['zypper', 'install']) + packages]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/opensuse.py | opensuse.py |
# Copyright (c) 2019, Ben Wolsieffer
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ben Wolsieffer/benwolsieffer@gmail.com
import subprocess
from rospkg.os_detect import OS_NIXOS
from ..installers import PackageManagerInstaller
NIX_INSTALLER = 'nix'
def register_installers(context):
context.set_installer(NIX_INSTALLER, NixInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_NIXOS, NIX_INSTALLER)
context.set_default_os_installer_key(OS_NIXOS, lambda self: NIX_INSTALLER)
def nix_detect(packages):
# Say that all packages are installed, because Nix handles installation
# automatically
return packages
class NixInstaller(PackageManagerInstaller):
def __init__(self):
super(NixInstaller, self).__init__(nix_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raise NotImplementedError('Nix does not support installing packages through ROS')
def get_version_strings(self):
return subprocess.check_output(('nix', '--version')).decode()
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/nix.py | nix.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote, Ken Conley
from __future__ import print_function
import subprocess
import sys
from rospkg.os_detect import (
OS_DEBIAN,
OS_LINARO,
OS_UBUNTU,
OS_ELEMENTARY,
OS_MX,
OS_POP,
OS_ZORIN,
OsDetect,
read_os_release
)
from .pip import PIP_INSTALLER
from .gem import GEM_INSTALLER
from .npm import NPM_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# apt package manager key
APT_INSTALLER = 'apt'
def register_installers(context):
context.set_installer(APT_INSTALLER, AptInstaller())
def register_platforms(context):
register_debian(context)
register_ubuntu(context)
# Aliases
register_elementary(context)
register_linaro(context)
register_mx(context)
register_pop(context)
register_zorin(context)
def register_debian(context):
context.add_os_installer_key(OS_DEBIAN, APT_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, PIP_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, GEM_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, NPM_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_DEBIAN, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_DEBIAN, OsDetect.get_codename)
def register_linaro(context):
# Linaro is an alias for Ubuntu. If linaro is detected and it's not set as
# an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_LINARO and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_LINARO, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_elementary(context):
# Elementary is an alias for Ubuntu. If elementary is detected and it's
# not set as an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_ELEMENTARY and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_ELEMENTARY, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_mx(context):
# MX is an alias for Debian. If MX is detected and it's
# not set as an override, force Debian.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_MX and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_MX, OS_DEBIAN), file=sys.stderr)
release_info = read_os_release()
version = read_os_release()["VERSION"]
context.set_os_override(OS_DEBIAN, version[version.find("(") + 1:version.find(")")])
def register_pop(context):
# Pop! OS is an alias for Ubuntu. If Pop! is detected and it's
# not set as an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_POP and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_POP, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_zorin(context):
# Zorin is an alias for Ubuntu. If Zorin is detected and it's
# not set as an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_ZORIN and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_ZORIN, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_ubuntu(context):
context.add_os_installer_key(OS_UBUNTU, APT_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, PIP_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, GEM_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, NPM_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_UBUNTU, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_UBUNTU, OsDetect.get_codename)
def _read_apt_cache_showpkg(packages, exec_fn=None):
"""
Output whether these packages are virtual package list providing package.
If one package was not found, it gets returned as non-virtual.
:param exec_fn: see `dpkg_detect`; make sure that exec_fn supports a
second, boolean, parameter.
"""
cmd = ['apt-cache', 'showpkg'] + packages
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(cmd).splitlines()
starts = []
notfound = set()
for p in packages:
last_start = starts[-1] if len(starts) > 0 else 0
try:
starts.append(std_out.index('Package: %s' % p, last_start))
except ValueError:
notfound.add(p)
starts.append(None)
for p in packages:
if p in notfound:
yield p, False, None
continue
start = starts.pop(0)
lines = iter(std_out[start:starts[0]])
header = 'Package: %s' % p
# proceed to Package header
try:
while next(lines) != header:
pass
except StopIteration:
pass
# proceed to versions section
try:
while next(lines) != 'Versions: ':
pass
except StopIteration:
pass
# virtual packages don't have versions
try:
if next(lines) != '':
yield p, False, None
continue
except StopIteration:
break
# proceed to reserve provides section
try:
while next(lines) != 'Reverse Provides: ':
pass
except StopIteration:
pass
pr = [line.split(' ', 2)[0] for line in lines]
if pr:
yield p, True, pr
else:
yield p, False, None
def dpkg_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param pkgs: list of package names, optionally followed by a fixed version (`foo=3.0`)
:param exec_fn: function to execute Popen and read stdout (for testing)
:return: list elements in *pkgs* that were found installed on the system
"""
ret_list = []
# this is mainly a hack to support version locking for eigen.
# we strip version-locking syntax, e.g. libeigen3-dev=3.0.1-*.
# our query does not do the validation on the version itself.
# This is a map `package name -> package name optionally with version`.
version_lock_map = {}
for p in pkgs:
if '=' in p:
version_lock_map[p.split('=')[0]] = p
else:
version_lock_map[p] = p
cmd = ['dpkg-query', '-W', '-f=\'${Package} ${Status}\n\'']
cmd.extend(version_lock_map.keys())
if exec_fn is None:
exec_fn = read_stdout
std_out, std_err = exec_fn(cmd, True)
std_out = std_out.replace('\'', '')
pkg_list = std_out.split('\n')
for pkg in pkg_list:
pkg_row = pkg.split()
if len(pkg_row) == 4 and (pkg_row[3] == 'installed'):
ret_list.append(pkg_row[0])
installed_packages = [version_lock_map[r] for r in ret_list]
# now for the remaining packages check, whether they are installed as
# virtual packages
remaining = _read_apt_cache_showpkg(list(p for p in pkgs if p not in installed_packages))
virtual = [n for (n, v, pr) in remaining if v and len(dpkg_detect(pr)) > 0]
return installed_packages + virtual
def _iterate_packages(packages, reinstall):
for entry in _read_apt_cache_showpkg(packages):
p, is_virtual, providers = entry
if is_virtual:
installed = []
if reinstall:
installed = dpkg_detect(providers)
if len(installed) > 0:
for i in installed:
yield i
continue # don't ouput providers
yield providers
else:
yield p
class AptInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on debian style
systems.
"""
def __init__(self):
super(AptInstaller, self).__init__(dpkg_detect)
def get_version_strings(self):
output = subprocess.check_output(['apt-get', '--version'])
version = output.splitlines()[0].split(b' ')[1].decode()
return ['apt-get {}'.format(version)]
def _get_install_commands_for_package(self, base_cmd, package_or_list):
def pkg_command(p):
return self.elevate_priv(base_cmd + [p])
if isinstance(package_or_list, list):
return [pkg_command(p) for p in package_or_list]
else:
return pkg_command(package_or_list)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
base_cmd = ['apt-get', 'install']
if not interactive:
base_cmd.append('-y')
if quiet:
base_cmd.append('-qq')
return [self._get_install_commands_for_package(base_cmd, p) for p in _iterate_packages(packages, reinstall)]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/debian.py | debian.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
from __future__ import print_function
import subprocess
import sys
from rospkg.os_detect import (
OS_ALMALINUX,
OS_CENTOS,
OS_FEDORA,
OS_ORACLE,
OS_RHEL,
OS_ROCKY
)
from .pip import PIP_INSTALLER
from .source import SOURCE_INSTALLER
from ..core import rd_debug
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# dnf package manager key
DNF_INSTALLER = 'dnf'
# yum package manager key
YUM_INSTALLER = 'yum'
def register_installers(context):
context.set_installer(DNF_INSTALLER, DnfInstaller())
context.set_installer(YUM_INSTALLER, YumInstaller())
def register_platforms(context):
register_fedora(context)
register_rhel(context)
# Aliases
register_rhel_clone(context, OS_ALMALINUX)
register_rhel_clone(context, OS_CENTOS)
register_rhel_clone(context, OS_ORACLE)
register_rhel_clone(context, OS_ROCKY)
def register_fedora(context):
context.add_os_installer_key(OS_FEDORA, PIP_INSTALLER)
context.add_os_installer_key(OS_FEDORA, DNF_INSTALLER)
context.add_os_installer_key(OS_FEDORA, YUM_INSTALLER)
context.add_os_installer_key(OS_FEDORA, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_FEDORA, lambda self: DNF_INSTALLER if self.get_version().isdigit() and int(self.get_version()) > 21 else YUM_INSTALLER)
context.set_os_version_type(OS_FEDORA, lambda self: self.get_version() if self.get_version().isdigit() and int(self.get_version()) > 20 else self.get_codename())
def register_rhel(context):
context.add_os_installer_key(OS_RHEL, PIP_INSTALLER)
context.add_os_installer_key(OS_RHEL, DNF_INSTALLER)
context.add_os_installer_key(OS_RHEL, YUM_INSTALLER)
context.add_os_installer_key(OS_RHEL, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_RHEL, lambda self: DNF_INSTALLER if self.get_version().split('.', 1)[0].isdigit() and int(self.get_version().split('.', 1)[0]) >= 8 else YUM_INSTALLER)
context.set_os_version_type(OS_RHEL, lambda self: self.get_version().split('.', 1)[0])
def register_rhel_clone(context, os_rhel_clone_name):
# Some distributions are rebuilds of RHEL and can be treated like RHEL
# because they are versioned the same and contain the same packages.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == os_rhel_clone_name and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(os_rhel_clone_name, OS_RHEL), file=sys.stderr)
context.set_os_override(OS_RHEL, os_version.split('.', 1)[0])
def rpm_detect_py(packages):
ret_list = []
import rpm
ts = rpm.TransactionSet()
for raw_req in packages:
req = rpm_expand_py(raw_req)
rpms = ts.dbMatch(rpm.RPMTAG_PROVIDES, req)
if len(rpms) > 0:
ret_list += [raw_req]
return ret_list
def rpm_detect_cmd(raw_packages, exec_fn=None):
ret_list = []
if exec_fn is None:
exec_fn = read_stdout
packages = [rpm_expand_cmd(package, exec_fn) for package in raw_packages]
cmd = ['rpm', '-q', '--whatprovides', '--qf', '[%{PROVIDES}\n]']
cmd.extend(packages)
std_out = exec_fn(cmd)
out_lines = std_out.split('\n')
for index, package in enumerate(packages):
if package in out_lines:
ret_list.append(raw_packages[index])
return ret_list
def rpm_detect(packages, exec_fn=None):
try:
return rpm_detect_py(packages)
except ImportError:
rd_debug('Failed to import rpm module, falling back to slow method')
return rpm_detect_cmd(packages, exec_fn)
def rpm_expand_py(macro):
import rpm
if '%' not in macro:
return macro
expanded = rpm.expandMacro(macro)
rd_debug('Expanded rpm macro in \'%s\' to \'%s\'' % (macro, expanded))
return expanded
def rpm_expand_cmd(macro, exec_fn=None):
if '%' not in macro:
return macro
cmd = ['rpm', '-E', macro]
if exec_fn is None:
exec_fn = read_stdout
expanded = exec_fn(cmd).strip()
rd_debug('Expanded rpm macro in \'%s\' to \'%s\'' % (macro, expanded))
return expanded
def rpm_expand(package, exec_fn=None):
try:
return rpm_expand_py(package)
except ImportError:
return rpm_expand_cmd(package, exec_fn)
def get_rpm_version_py():
from rpm import __version__ as rpm_version
return rpm_version
def get_rpm_version_cmd():
output = subprocess.check_output(['rpm', '--version'])
version = output.splitlines()[0].split(b' ')[-1].decode()
return version
def get_rpm_version():
try:
return get_rpm_version_py()
except ImportError:
return get_rpm_version_cmd()
class DnfInstaller(PackageManagerInstaller):
"""
This class provides the functions for installing using dnf
it's methods partially implement the Rosdep OS api to complement
the roslib.OSDetect API.
"""
def __init__(self):
super(DnfInstaller, self).__init__(rpm_detect)
def get_version_strings(self):
dnf_output = subprocess.check_output(['dnf', '--version'])
dnf_version = dnf_output.splitlines()[0].decode()
version_strings = [
'dnf {}'.format(dnf_version),
'rpm {}'.format(get_rpm_version()),
]
return version_strings
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raw_packages = self.get_packages_to_install(resolved, reinstall=reinstall)
packages = [rpm_expand(package) for package in raw_packages]
if not packages:
return []
elif not interactive and quiet:
return [self.elevate_priv(['dnf', '--assumeyes', '--quiet', '--setopt=strict=0', 'install']) + packages]
elif quiet:
return [self.elevate_priv(['dnf', '--quiet', '--setopt=strict=0', 'install']) + packages]
elif not interactive:
return [self.elevate_priv(['dnf', '--assumeyes', '--setopt=strict=0', 'install']) + packages]
else:
return [self.elevate_priv(['dnf', '--setopt=strict=0', 'install']) + packages]
class YumInstaller(PackageManagerInstaller):
"""
This class provides the functions for installing using yum
it's methods partially implement the Rosdep OS api to complement
the roslib.OSDetect API.
"""
def __init__(self):
super(YumInstaller, self).__init__(rpm_detect)
def get_version_strings(self):
yum_output = subprocess.check_output(['yum', '--version'])
yum_version = yum_output.splitlines()[0].decode()
version_strings = [
'yum {}'.format(yum_version),
'rpm {}'.format(get_rpm_version()),
]
return version_strings
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raw_packages = self.get_packages_to_install(resolved, reinstall=reinstall)
packages = [rpm_expand(package) for package in raw_packages]
if not packages:
return []
elif not interactive and quiet:
return [self.elevate_priv(['yum', '--assumeyes', '--quiet', '--skip-broken', 'install']) + packages]
elif quiet:
return [self.elevate_priv(['yum', '--quiet', '--skip-broken', 'install']) + packages]
elif not interactive:
return [self.elevate_priv(['yum', '--assumeyes', '--skip-broken', 'install']) + packages]
else:
return [self.elevate_priv(['yum', '--skip-broken', 'install']) + packages]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/redhat.py | redhat.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author Murph Finnicum/murph@murph.cc
# A word on atoms
# We'll be using 'atoms' instead of 'packages' for the majority of the gentoo installer.
# Atoms can specify a package version (either exactly, or min/max version), flags it has
# to be built with, and even repositories it has to come from
#
# Here are some valid atoms and their meanings:
# sed // A package named 'sed'
# sys-apps/sed // sed from the category 'sys-apps'. There can be collisions otherwise.
# sys-apps/sed::gentoo // sed from the category 'sys-apps' and the repository 'gentoo' (the default).
# >=sys-apps/sed-4 // sed of at least version 4
# sed[static,-nls] // sed built the static USE flag and withou the nls one
import os
from rospkg.os_detect import OS_GENTOO
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
PORTAGE_INSTALLER = 'portage'
def register_installers(context):
context.set_installer(PORTAGE_INSTALLER, PortageInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_GENTOO, PORTAGE_INSTALLER)
context.add_os_installer_key(OS_GENTOO, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_GENTOO, lambda self: PORTAGE_INSTALLER)
# Determine whether an atom is already satisfied
def portage_detect_single(atom, exec_fn=read_stdout):
"""
Check if a given atom is installed.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
std_out = exec_fn(['portageq', 'match', '/', atom])
# TODO consdier checking the name of the package returned
# Also, todo, figure out if just returning true if two packages are returned is cool..
return len(std_out) >= 1
def portage_detect(atoms, exec_fn=read_stdout):
"""
Given a list of atoms, return a list of which are already installed.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
# This is for testing, to make sure they're always checked in the same order
# TODO: make testing better to not need this
if isinstance(atoms, list):
atoms.sort()
return [a for a in atoms if portage_detect_single(a, exec_fn)]
# Check portage and needed tools for existence and compatibility
def portage_available():
if not os.path.exists('/usr/bin/portageq'):
return False
if not os.path.exists('/usr/bin/emerge'):
return False
# We only use standard, defined portage features.
# They work in all released versions of portage, and should work in
# future versionf for a long time to come.
# but .. TODO: Check versions
return True
class PortageInstaller(PackageManagerInstaller):
def __init__(self):
super(PortageInstaller, self).__init__(portage_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
atoms = self.get_packages_to_install(resolved, reinstall=reinstall)
cmd = self.elevate_priv(['emerge'])
if not atoms:
return []
if interactive:
cmd.append('-a')
cmd.extend(atoms)
return [cmd]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/gentoo.py | gentoo.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
import subprocess
from rospkg.os_detect import OS_ARCH
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
PACMAN_INSTALLER = 'pacman'
def register_installers(context):
context.set_installer(PACMAN_INSTALLER, PacmanInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_ARCH, SOURCE_INSTALLER)
context.add_os_installer_key(OS_ARCH, PACMAN_INSTALLER)
context.set_default_os_installer_key(OS_ARCH, lambda self: PACMAN_INSTALLER)
def pacman_detect_single(p):
return not subprocess.call(['pacman', '-T', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def pacman_detect(packages):
return [p for p in packages if pacman_detect_single(p)]
class PacmanInstaller(PackageManagerInstaller):
def __init__(self):
super(PacmanInstaller, self).__init__(pacman_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
command = ['pacman', '-S']
if not interactive:
command.append('--noconfirm')
if not reinstall:
command.append('--needed')
if quiet:
command.append('-q')
return [self.elevate_priv(command + packages)]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/arch.py | arch.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Nikolay Nikolov/niko.b.nikolov@gmail.com
import subprocess
import os
from ..core import InstallFailed
from .pip import PIP_INSTALLER
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
from ..shell_utils import read_stdout
SLACKWARE_OS_NAME = 'slackware'
SBOTOOLS_INSTALLER = 'sbotools'
SLACKPKG_INSTALLER = 'slackpkg'
def register_installers(context):
context.set_installer(SBOTOOLS_INSTALLER, SbotoolsInstaller())
context.set_installer(SLACKPKG_INSTALLER, SlackpkgInstaller())
def register_platforms(context):
context.add_os_installer_key(SLACKWARE_OS_NAME, SBOTOOLS_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, PIP_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, SOURCE_INSTALLER)
context.add_os_installer_key(SLACKWARE_OS_NAME, SLACKPKG_INSTALLER)
context.set_default_os_installer_key(SLACKWARE_OS_NAME, lambda self: SBOTOOLS_INSTALLER)
def sbotools_available():
if not os.path.exists('/usr/sbin/sboinstall'):
return False
return True
def sbotools_detect_single(p):
pkg_list = read_stdout(['ls', '/var/log/packages'])
p = subprocess.Popen(['grep', '-i', '^' + p], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate(pkg_list)
return not p.returncode
def sbotools_detect(packages):
return [p for p in packages if sbotools_detect_single(p)]
class SbotoolsInstaller(PackageManagerInstaller):
def __init__(self):
super(SbotoolsInstaller, self).__init__(sbotools_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
if not sbotools_available():
raise InstallFailed((SBOTOOLS_INSTALLER, 'sbotools is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
cmd = ['sboinstall']
return [self.elevate_priv(cmd + [p] + ['-j']) for p in packages]
def slackpkg_available():
if not os.path.exists('/usr/sbin/slackpkg'):
return False
return True
def slackpkg_detect_single(p):
return not subprocess.call(['slackpkg', 'search', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def slackpkg_detect(packages):
return [p for p in packages if slackpkg_detect_single(p)]
class SlackpkgInstaller(PackageManagerInstaller):
def __init__(self):
super(SlackpkgInstaller, self).__init__(slackpkg_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
# slackpkg does not provide non-interactive mode
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
else:
return [self.elevate_priv(['slackpkg', 'install', p]) for p in packages]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/slackware.py | slackware.py |
# Copyright (c) 2019, LG Electronics, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Andre Rosa/andre.rosa@lge.com
import subprocess
from rospkg.os_detect import OS_OPENEMBEDDED, OsDetect
from ..installers import PackageManagerInstaller
OPKG_INSTALLER = 'opkg'
def register_installers(context):
context.set_installer(OPKG_INSTALLER, OpkgInstaller())
def register_platforms(context):
register_oe(context)
def register_oe(context):
context.add_os_installer_key(OS_OPENEMBEDDED, OPKG_INSTALLER)
context.set_default_os_installer_key(OS_OPENEMBEDDED, lambda self: OPKG_INSTALLER)
context.set_os_version_type(OS_OPENEMBEDDED, OsDetect.get_codename)
def opkg_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
NOTE: These are stubs currently and will be filled after semantics are fully defined.
:param pkgs: list of package names, optionally followed by a fixed version (`foo=3.0`)
:param exec_fn: function to execute Popen and read stdout (for testing)
:return: list elements in *pkgs* that were found installed on the system
"""
raise NotImplementedError("opkg_detect is not implemented yet")
class OpkgInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on oe systems.
NOTE: These are stubs currently and will be filled after semantics are fully defined.
"""
def __init__(self):
super(OpkgInstaller, self).__init__(opkg_detect)
def get_version_strings(self):
output = subprocess.check_output(['opkg', '--version'])
version = output.splitlines()[0].split(b' ')[2].decode()
return [('opkg {}').format(version)]
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raise NotImplementedError('get_install_command is not implemented yet')
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/openembedded.py | openembedded.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Tingfan Wu tingfan@gmail.com
from __future__ import print_function
from rospkg.os_detect import OS_CYGWIN
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
APT_CYG_INSTALLER = 'apt-cyg'
def register_installers(context):
context.set_installer(APT_CYG_INSTALLER, AptCygInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_CYGWIN, SOURCE_INSTALLER)
context.add_os_installer_key(OS_CYGWIN, APT_CYG_INSTALLER)
context.set_default_os_installer_key(OS_CYGWIN, lambda self: APT_CYG_INSTALLER)
def cygcheck_detect_single(p):
std_out = read_stdout(['cygcheck', '-c', p])
return std_out.count('OK') > 0
def cygcheck_detect(packages):
return [p for p in packages if cygcheck_detect_single(p)]
class AptCygInstaller(PackageManagerInstaller):
"""
An implementation of the :class:`Installer` for use on
cygwin-style systems.
"""
def __init__(self):
super(AptCygInstaller, self).__init__(cygcheck_detect)
self.as_root = False
self.sudo_command = 'cygstart --action=runas'
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
# TODO: interactive
if not packages:
return []
else:
return [self.elevate_priv(['apt-cyg', '-m', 'ftp://sourceware.org/pub/cygwinports', 'install']) + packages]
if __name__ == '__main__':
print('test cygcheck_detect(true)', cygcheck_detect('cygwin'))
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/cygwin.py | cygwin.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
from __future__ import print_function
import os
import pkg_resources
import subprocess
import sys
from ..core import InstallFailed
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# pip package manager key
PIP_INSTALLER = 'pip'
def register_installers(context):
context.set_installer(PIP_INSTALLER, PipInstaller())
def get_pip_command():
# First try pip2 or pip3
cmd = ['pip' + os.environ['ROS_PYTHON_VERSION']]
if is_cmd_available(cmd):
return cmd
# Second, try using the same python executable since we know that exists
if os.environ['ROS_PYTHON_VERSION'] == sys.version[0]:
try:
import pip
except ImportError:
pass
else:
return [sys.executable, '-m', 'pip']
# Finally, try python2 or python3 commands
cmd = ['python' + os.environ['ROS_PYTHON_VERSION'], '-m', 'pip']
if is_cmd_available(cmd):
return cmd
return None
def is_cmd_available(cmd):
try:
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
def pip_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
pip_cmd = get_pip_command()
if not pip_cmd:
return []
fallback_to_pip_show = False
if exec_fn is None:
exec_fn = read_stdout
fallback_to_pip_show = True
pkg_list = exec_fn(pip_cmd + ['freeze']).split('\n')
ret_list = []
for pkg in pkg_list:
pkg_row = pkg.split('==')
if pkg_row[0] in pkgs:
ret_list.append(pkg_row[0])
# Try to detect with the return code of `pip show`.
# This can show the existance of things like `argparse` which
# otherwise do not show up.
# See:
# https://github.com/pypa/pip/issues/1570#issuecomment-71111030
if fallback_to_pip_show:
for pkg in [p for p in pkgs if p not in ret_list]:
# does not see retcode but stdout for old pip to check if installed
proc = subprocess.Popen(
pip_cmd + ['show', pkg],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output, _ = proc.communicate()
output = output.strip()
if proc.returncode == 0 and output:
# `pip show` detected it, add it to the list.
ret_list.append(pkg)
return ret_list
class PipInstaller(PackageManagerInstaller):
"""
:class:`Installer` support for pip.
"""
def __init__(self):
super(PipInstaller, self).__init__(pip_detect, supports_depends=True)
def get_version_strings(self):
pip_version = pkg_resources.get_distribution('pip').version
setuptools_version = pkg_resources.get_distribution('setuptools').version
version_strings = [
'pip {}'.format(pip_version),
'setuptools {}'.format(setuptools_version),
]
return version_strings
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
pip_cmd = get_pip_command()
if not pip_cmd:
raise InstallFailed((PIP_INSTALLER, 'pip is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
cmd = pip_cmd + ['install', '-U']
if quiet:
cmd.append('-q')
if reinstall:
cmd.append('-I')
return [self.elevate_priv(cmd + [p]) for p in packages]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/pip.py | pip.py |
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Original from cygwin.py by Tingfan Wu tingfan@gmail.com
# Modified for FreeBSD by Rene Ladan rene@freebsd.org
# Updated for FreeBSD with pkg by Trenton Schulz trentonw@ifi.uio.no
from rospkg.os_detect import OS_FREEBSD
from .pip import PIP_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
PKG_INSTALLER = 'pkg'
def register_installers(context):
context.set_installer(PKG_INSTALLER, PkgInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_FREEBSD, PKG_INSTALLER)
context.add_os_installer_key(OS_FREEBSD, PIP_INSTALLER)
context.set_default_os_installer_key(OS_FREEBSD, lambda self: PKG_INSTALLER)
def pkg_detect_single(p, exec_fn):
if p == "builtin":
return True
cmd = ['/usr/sbin/pkg', 'query', '%n', p]
std_out = exec_fn(cmd)
return std_out.split() != []
def pkg_detect(packages, exec_fn=None):
if exec_fn is None:
exec_fn = read_stdout
return [p for p in packages if pkg_detect_single(p, exec_fn)]
class PkgInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on FreeBSD-style
systems.
"""
def __init__(self):
super(PkgInstaller, self).__init__(pkg_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
else:
return [self.elevate_priv(['/usr/sbin/pkg', 'install', '-y']) + packages]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/freebsd.py | freebsd.py |
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
from __future__ import print_function
import os
try:
from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen
from urllib import urlretrieve
from urllib2 import URLError
import hashlib
import yaml
from ..core import rd_debug, InvalidData
from ..installers import PackageManagerInstaller, InstallFailed
from ..shell_utils import create_tempfile_from_string_and_execute
SOURCE_INSTALLER = 'source'
def register_installers(context):
context.set_installer(SOURCE_INSTALLER, SourceInstaller())
class InvalidRdmanifest(Exception):
"""
rdmanifest format is invalid.
"""
pass
class DownloadFailed(Exception):
"""
File download failed either due to i/o issues or md5sum validation.
"""
pass
def _sub_fetch_file(url, md5sum=None):
"""
Sub-routine of _fetch_file
:raises: :exc:`DownloadFailed`
"""
contents = ''
try:
fh = urlopen(url)
contents = fh.read()
if md5sum is not None:
filehash = hashlib.md5(contents).hexdigest()
if md5sum and filehash != md5sum:
raise DownloadFailed("md5sum didn't match for %s. Expected %s got %s" % (url, md5sum, filehash))
except URLError as ex:
raise DownloadFailed(str(ex))
return contents
def get_file_hash(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest()
def fetch_file(url, md5sum=None):
"""
Download file. Optionally validate with md5sum
:param url: URL to download
:param md5sum: Expected MD5 sum of contents
"""
error = contents = ''
try:
contents = _sub_fetch_file(url, md5sum)
if not isinstance(contents, str):
contents = contents.decode('utf-8')
except DownloadFailed as e:
rd_debug('Download of file %s failed' % (url))
error = str(e)
return contents, error
def load_rdmanifest(contents):
"""
:raises: :exc:`InvalidRdmanifest`
"""
try:
return yaml.safe_load(contents)
except yaml.scanner.ScannerError as ex:
raise InvalidRdmanifest('Failed to parse yaml in %s: Error: %s' % (contents, ex))
def download_rdmanifest(url, md5sum, alt_url=None):
"""
:param url: URL to download rdmanifest from
:param md5sum: MD5 sum for validating url download, or None
:returns: (contents of rdmanifest, download_url). download_url is
either *url* or *alt_url* and indicates which of the locations
contents was generated from.
:raises: :exc:`DownloadFailed`
:raises: :exc:`InvalidRdmanifest`
"""
# fetch the manifest
download_url = url
error_prefix = 'Failed to load a rdmanifest from %s: ' % (url)
contents, error = fetch_file(download_url, md5sum)
# - try the backup url
if not contents and alt_url:
error_prefix = 'Failed to load a rdmanifest from either %s or %s: ' % (url, alt_url)
download_url = alt_url
contents, error = fetch_file(download_url, md5sum)
if not contents:
raise DownloadFailed(error_prefix + error)
manifest = load_rdmanifest(contents)
return manifest, download_url
# TODO: create SourceInstall instance objects
class SourceInstall(object):
def __init__(self):
self.manifest = self.manifest_url = None
self.install_command = self.check_presence_command = None
self.exec_path = None
self.tarball = self.alternate_tarball = None
self.tarball_md5sum = None
self.dependencies = None
@staticmethod
def from_manifest(manifest, manifest_url):
r = SourceInstall()
r.manifest = manifest
r.manifest_url = manifest_url
rd_debug('Loading manifest:\n{{{%s\n}}}\n' % manifest)
r.install_command = manifest.get('install-script', '')
r.check_presence_command = manifest.get('check-presence-script', '')
r.exec_path = manifest.get('exec-path', '.')
try:
r.tarball = manifest['uri']
except KeyError:
raise InvalidRdmanifest('uri required for source rosdeps')
r.alternate_tarball = manifest.get('alternate-uri')
r.tarball_md5sum = manifest.get('md5sum')
r.dependencies = manifest.get('depends', [])
return r
def __str__(self):
return 'source: %s' % (self.manifest_url)
__repr__ = __str__
def is_source_installed(source_item, exec_fn=None):
return create_tempfile_from_string_and_execute(source_item.check_presence_command, exec_fn=exec_fn)
def source_detect(pkgs, exec_fn=None):
return [x for x in pkgs if is_source_installed(x, exec_fn=exec_fn)]
class SourceInstaller(PackageManagerInstaller):
def __init__(self):
super(SourceInstaller, self).__init__(source_detect, supports_depends=True)
self._rdmanifest_cache = {}
def resolve(self, rosdep_args):
"""
:raises: :exc:`InvalidData` If format invalid or unable
to retrieve rdmanifests.
:returns: [SourceInstall] instances.
"""
try:
url = rosdep_args['uri']
except KeyError:
raise InvalidData("'uri' key required for source rosdeps")
alt_url = rosdep_args.get('alternate-uri', None)
md5sum = rosdep_args.get('md5sum', None)
# load manifest from cache or from web
manifest = None
if url in self._rdmanifest_cache:
return self._rdmanifest_cache[url]
elif alt_url in self._rdmanifest_cache:
return self._rdmanifest_cache[alt_url]
try:
rd_debug('Downloading manifest [%s], mirror [%s]' % (url, alt_url))
manifest, download_url = download_rdmanifest(url, md5sum, alt_url)
resolved = SourceInstall.from_manifest(manifest, download_url)
self._rdmanifest_cache[download_url] = [resolved]
return [resolved]
except DownloadFailed as ex:
# not sure this should be masked this way
raise InvalidData(str(ex))
except InvalidRdmanifest as ex:
raise InvalidData(str(ex))
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
# Instead of attempting to describe the source-install steps
# inside of the rosdep command chain, we shell out to an
# external rosdep-source command. This separation means that
# users can manually invoke rosdep-source and also keeps
# 'get_install_command()' cleaner.
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
commands = []
for p in packages:
commands.append(['rosdep-source', 'install', p.manifest_url])
return commands
def get_depends(self, rosdep_args):
deps = rosdep_args.get('depends', [])
for r in self.resolve(rosdep_args):
deps.extend(r.dependencies)
return deps
def install_from_file(rdmanifest_file):
with open(rdmanifest_file, 'r') as f:
contents = f.read()
manifest = load_rdmanifest(contents)
install_source(SourceInstall.from_manifest(manifest, rdmanifest_file))
def install_from_url(rdmanifest_url):
manifest, download_url = download_rdmanifest(rdmanifest_url, None, None)
install_source(SourceInstall.from_manifest(manifest, download_url))
def install_source(resolved):
import shutil
import tarfile
import tempfile
tempdir = tempfile.mkdtemp()
rd_debug('created tmpdir [%s]' % (tempdir))
rd_debug('Fetching tarball %s' % resolved.tarball)
# compute desired download path
filename = os.path.join(tempdir, os.path.basename(resolved.tarball))
f = urlretrieve(resolved.tarball, filename)
assert f[0] == filename
if resolved.tarball_md5sum:
rd_debug('checking md5sum on tarball')
hash1 = get_file_hash(filename)
if resolved.tarball_md5sum != hash1:
# try backup tarball if it is defined
if resolved.alternate_tarball:
f = urlretrieve(resolved.alternate_tarball)
filename = f[0]
hash2 = get_file_hash(filename)
if resolved.tarball_md5sum != hash2:
failure = (SOURCE_INSTALLER, 'md5sum check on %s and %s failed. Expected %s got %s and %s' % (resolved.tarball, resolved.alternate_tarball, resolved.tarball_md5sum, hash1, hash2))
raise InstallFailed(failure=failure)
else:
raise InstallFailed((SOURCE_INSTALLER, 'md5sum check on %s failed. Expected %s got %s ' % (resolved.tarball, resolved.tarball_md5sum, hash1)))
else:
rd_debug('No md5sum defined for tarball, not checking.')
try:
# This is a bit hacky. Basically, don't unpack dmg files as
# we are currently using source rosdeps for Nvidia Cg.
if not filename.endswith('.dmg'):
rd_debug('Extracting tarball')
tarf = tarfile.open(filename)
tarf.extractall(tempdir)
else:
rd_debug('Bypassing tarball extraction as it is a dmg')
rd_debug('Running installation script')
success = create_tempfile_from_string_and_execute(resolved.install_command, os.path.join(tempdir, resolved.exec_path))
if success:
rd_debug('successfully executed script')
else:
raise InstallFailed((SOURCE_INSTALLER, 'installation script returned with error code'))
finally:
rd_debug('cleaning up tmpdir [%s]' % (tempdir))
shutil.rmtree(tempdir)
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/source.py | source.py |
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com, Ken Conley
import subprocess
import json
import sys
import traceback
from rospkg.os_detect import OS_OSX, OsDetect
from ..core import InstallFailed, RosdepInternalError, InvalidData
from .pip import PIP_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# add additional os names for brew, macports (TODO)
OSXBREW_OS_NAME = 'osxbrew'
BREW_INSTALLER = 'homebrew'
MACPORTS_INSTALLER = 'macports'
# py3k
try:
_basestring = basestring
except NameError:
_basestring = str
def register_installers(context):
context.set_installer(MACPORTS_INSTALLER, MacportsInstaller())
context.set_installer(BREW_INSTALLER, HomebrewInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_OSX, BREW_INSTALLER)
context.add_os_installer_key(OS_OSX, MACPORTS_INSTALLER)
context.add_os_installer_key(OS_OSX, PIP_INSTALLER)
context.add_os_installer_key(OS_OSX, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_OSX, lambda self: BREW_INSTALLER)
context.set_os_version_type(OS_OSX, OsDetect.get_codename)
def is_port_installed():
try:
subprocess.Popen(['port'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
def port_detect(pkgs, exec_fn=None):
ret_list = []
if not is_port_installed():
return ret_list
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(['port', 'installed'] + pkgs)
for pkg in std_out.split('\n'):
pkg_row = pkg.split()
if len(pkg_row) == 3 and pkg_row[0] in pkgs and pkg_row[2] == '(active)':
ret_list.append(pkg_row[0])
return ret_list
class MacportsInstaller(PackageManagerInstaller):
"""
An implementation of the :class:`Installer` API for use on
macports systems.
"""
def __init__(self):
super(MacportsInstaller, self).__init__(port_detect)
def get_version_strings(self):
try:
p = subprocess.Popen(
['port', 'version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
version = stdout.replace('Version: ', '')
return ['Macports {}'.format(version.strip())]
except OSError:
return ['Macports not-found']
def get_install_command(self, resolved, interactive=True, reinstall=False):
if not is_port_installed():
raise InstallFailed((MACPORTS_INSTALLER, 'MacPorts is not installed'))
packages = self.get_packages_to_install(resolved)
if not packages:
return []
else:
# TODO: interactive
return [self.elevate_priv(['port', 'install', p]) for p in packages]
def is_brew_installed():
try:
subprocess.Popen(['brew'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
class HomebrewResolution(object):
"""Resolution information for a single package of a Homebrew rosdep."""
def __init__(self, package, install_flags, options):
"""
:param package: Homebrew package name, possibly fully qualified
with tap.
:param install_flags: List of strings of additional flags for
``brew install`` and ``brew deps`` command which are not
options (e.g. ``--HEAD``)
:param options: List of strings of options for the homebrew
package.
"""
self.package = package
self.install_flags = install_flags
self.options = options
def __eq__(self, other):
return other.package == self.package and \
other.install_flags == self.install_flags and \
other.options == self.options
def __hash__(self):
return hash((
type(self),
self.package,
tuple(self.install_flags),
tuple(self.options)))
def __str__(self):
return ' '.join(self.to_list())
def to_list(self):
return [self.package] + self.install_flags + self.options
def brew_strip_pkg_name(package):
"""Strip the tap information of a fully qualified package name.
:returns: Unqualified package name. E.g. 'foo-pkg' for input
'ros/hydro/foo-pkg'
"""
if not isinstance(package, str): # package is a bytes object
package = package.decode()
return package.split('/')[-1]
def brew_detect(resolved, exec_fn=None):
"""Given a list of resolutions, return the list of installed resolutions.
:param resolved: List of HomebrewResolution objects
:returns: Filtered list of HomebrewResolution objects
"""
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(['brew', 'list'])
installed_formulae = std_out.split()
def is_installed(r):
# TODO: Does not check installed version (stable, devel, HEAD)
# TODO: Does not check origin (Tap) of formula
# TODO: Does not handle excluding options (e.g. specifying
# --without-foo for --with-foo option)
# fast fail with a quick check first, then slower check if
# really linked and for options
if not brew_strip_pkg_name(r.package) in installed_formulae:
return False
std_out = exec_fn(['brew', 'info', r.package, '--json=v1'])
try:
pkg_info = json.loads(std_out)
pkg_info = pkg_info[0]
linked_version = pkg_info['linked_keg']
if not linked_version:
return False
for spec in pkg_info['installed']:
if spec['version'] == linked_version:
installed_options = spec['used_options']
break
except (ValueError, TypeError):
e_type, e, tb = sys.exc_info()
raise RosdepInternalError(
e, """Error while parsing brew info for '{0}'
* Output of `brew info {0} --json=v1`:
{1}
* Error while parsing:
{2}""".format(r.package, std_out, ''.join(traceback.format_exception(e_type, e, tb))))
if set(r.options) <= set(installed_options):
return True
else:
return False
# preserve order
return list(filter(is_installed, resolved))
class HomebrewInstaller(PackageManagerInstaller):
"""
An implementation of Installer for use on homebrew systems.
Some examples for supported rosdep specifications:
# Example 1: flat list of options if only one package defined.
foo:
osx:
homebrew:
depends: [bar]
options: [--with-quux, --with-quax]
packages: [foo-pkg]
# Example 2: list of list of options for multiple packages
bar:
osx:
homebrew:
options: [[], [--with-quux]]
packages: [bar-pkg, bar-pkg-dev]
# Example 3: list of options can be shorter than list of packages (filling
# up with empty options)
baz:
osx:
homebrew:
options: [[--with-quax]]
packages: [baz-pkg, baz-pkg-dev]
# Example 4: No options is fine.
buz:
osx:
homebrew:
packages: [buz-pkg]
``install_flags`` are handled analogously to ``options``.
"""
def __init__(self):
super(HomebrewInstaller, self).__init__(brew_detect, supports_depends=True)
self.as_root = False
def get_version_strings(self):
try:
p = subprocess.Popen(
['brew', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.splitlines()
except OSError:
return ['Homebrew not-found']
def resolve(self, rosdep_args):
"""
See :meth:`Installer.resolve()`
"""
def coerce_to_list(options):
if isinstance(options, list):
return options
elif isinstance(options, _basestring):
return options.split()
else:
raise InvalidData("Expected list or string for options '%s'" % options)
def handle_options(options):
# if only one package is specified we allow a flat list of options
if len(packages) == 1 and options and not isinstance(options[0], list):
options = [options]
else:
options = list(map(coerce_to_list, options))
# make sure options is a list of list of strings
try:
valid = all([isinstance(x, _basestring) for option in options for x in option])
except Exception as e:
raise InvalidData("Invalid list of options '%s', error: %s" % (options, e))
else:
if not valid:
raise InvalidData("Invalid list of options '%s'" % options)
# allow only fewer or equal number of option lists
if len(options) > len(packages):
raise InvalidData("More options '%s' than packages '%s'" % (options, packages))
else:
options.extend([[]] * (len(packages) - len(options)))
return options
packages = super(HomebrewInstaller, self).resolve(rosdep_args)
resolution = []
if packages:
options = []
install_flags = []
if type(rosdep_args) == dict:
options = coerce_to_list(rosdep_args.get('options', []))
install_flags = coerce_to_list(rosdep_args.get('install_flags', []))
options = handle_options(options)
install_flags = handle_options(install_flags)
# packages, options and install_flags now have the same length
resolution = map(HomebrewResolution, packages, install_flags, options)
return resolution
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
# TODO: We should somehow inform the user that we uninstall all versions
# of packages and do not keep track of which options have been
# activated. Then again, maybe not this would be the
# responsibility of the user to before or not use --reinstall.
if not is_brew_installed():
raise InstallFailed((BREW_INSTALLER, 'Homebrew is not installed'))
resolved = self.get_packages_to_install(resolved, reinstall=reinstall)
resolved = self.remove_duplicate_dependencies(resolved)
# interactive switch doesn't matter
if reinstall:
commands = []
for r in resolved:
# --force uninstalls all versions of that package
commands.append(self.elevate_priv(['brew', 'uninstall', '--force', r.package]))
commands.append(self.elevate_priv(['brew', 'install'] + r.to_list()))
return commands
else:
return [self.elevate_priv(['brew', 'install'] + r.to_list()) for r in resolved]
def remove_duplicate_dependencies(self, resolved):
# TODO: we do not look at options here, however the install check later
# will inform use if installed options are not appropriate
# TODO: we comapre unqualified package names, ignoring the specifed tap
if not is_brew_installed():
raise InstallFailed((BREW_INSTALLER, 'Homebrew is not installed'))
# we'll remove dependencies from this copy and return it
resolved_copy = list(resolved)
# find all dependencies for each package
for r in resolved:
sub_command = ['brew', 'deps'] + r.to_list()
output = subprocess.Popen(sub_command, stdout=subprocess.PIPE).communicate()[0]
deps = output.split()
for d in deps:
# remove duplicate dependency from package list
for other in resolved_copy:
if brew_strip_pkg_name(other.package) == brew_strip_pkg_name(d):
resolved_copy.remove(other)
return resolved_copy
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/osx.py | osx.py |
# Copyright (c) 2009, Willow Garage, Inc.
# Copyright (c) 2012, Intermodalics, BVBA.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ruben Smits/ruben.smits@intermodalics.eu
from __future__ import print_function
import subprocess
from ..core import InstallFailed
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# gem package manager key
GEM_INSTALLER = 'gem'
def register_installers(context):
context.set_installer(GEM_INSTALLER, GemInstaller())
def is_gem_installed():
try:
subprocess.Popen(['gem'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return True
except OSError:
return False
def gem_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
if exec_fn is None:
exec_fn = read_stdout
pkg_list = exec_fn(['gem', 'list']).split('\n')
ret_list = []
for pkg in pkg_list:
pkg_row = pkg.split(' ')
if pkg_row[0] in pkgs:
ret_list.append(pkg_row[0])
return ret_list
class GemInstaller(PackageManagerInstaller):
"""
:class:`Installer` support for gem.
"""
def __init__(self):
super(GemInstaller, self).__init__(gem_detect, supports_depends=True)
def get_version_strings(self):
gem_version = subprocess.check_output(['gem', '--version']).strip().decode()
return ['gem {}'.format(gem_version)]
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
if not is_gem_installed():
raise InstallFailed((GEM_INSTALLER, 'gem is not installed'))
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
else:
return [self.elevate_priv(['gem', 'install', p]) for p in packages]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/gem.py | gem.py |
# Copyright (c) 2018, SEQSENSE, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Atsushi Watanabe/atsushi.w@ieee.org
import os
from rospkg.os_detect import OS_ALPINE
from .pip import PIP_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
APK_INSTALLER = 'apk'
def register_installers(context):
context.set_installer(APK_INSTALLER, ApkInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_ALPINE, APK_INSTALLER)
context.add_os_installer_key(OS_ALPINE, PIP_INSTALLER)
context.add_os_installer_key(OS_ALPINE, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_ALPINE, lambda self: APK_INSTALLER)
context.set_os_version_type(OS_ALPINE, lambda self: ".".join(self.get_version().split('.')[:2]))
def apk_detect(pkgs, exec_fn=read_stdout):
"""
Given a list of packages, return a list of which are already installed.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
if not pkgs:
return []
cmd = ['apk', 'info', '--installed']
cmd.extend(pkgs)
std_out = exec_fn(cmd)
return std_out.splitlines()
class ApkInstaller(PackageManagerInstaller):
def __init__(self):
super(ApkInstaller, self).__init__(apk_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
pkgs = self.get_packages_to_install(resolved, reinstall=reinstall)
if not pkgs:
return []
cmd = self.elevate_priv(['apk', 'add'])
if interactive:
cmd.append('--interactive')
if quiet:
cmd.append('--quiet')
cmd.extend(pkgs)
return [cmd]
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/fixed_rosdep/platforms/alpine.py | alpine.py |
UNKNOWN
| 6-rosdep | /6_rosdep-0.1.0-py3-none-any.whl/6_rosdep-0.1.0.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
from distutils.core import setup
setup(
name = '650-auto-comp-jaewon', # How you named your package folder (MyLib)
packages = ['650-auto-comp-jaewon'], # Chose the same as "name"
version = '0.4', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'A Package for a trie class that autocompletes words', # Give a short description about your library
author = 'Jae Won Yoon', # Type in your name
author_email = 'jae1903@gmail.com', # Type in your E-Mail
url = 'https://github.com/jaewony/650-auto-complete-jaewon.git', # Provide either the link to your github or to your website
download_url = 'https://github.com/jaewony/650-auto-complete-jaewon/archive/0.4.tar.gz', # I explain this later on
keywords = ['TRIE', 'NODE'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'pytest'
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | 650-auto-comp-jaewon | /650-auto-comp-jaewon-0.4.tar.gz/650-auto-comp-jaewon-0.4/setup.py | setup.py |
from distutils.core import setup
setup(
name = '652ga',
version = '1.3.1',
py_modules = ['652ga'],
author = 'Learning',
author_email = 'animation_pursuit@yahoo.com',
url = '',
description = 'A simple printer of nested lists',
)
| 652ga | /652ga-1.3.1.zip/652ga-1.3.1/setup.py | setup.py |
from distutils.core import setup
setup(
name='666', # 对外我们模块的名字
version='1.0', # 版本号
description='这个模块有点东西哦', # 描述
author='赵佳乐', # 作者
author_email='488635489@qq.com',
py_modules=['module_A', 'module_A2'] # 要发布的模块
) | 666 | /666-1.0.tar.gz/666-1.0/setup.py | setup.py |
from platform import platform
try:
# System imports.
from typing import Tuple, Any, Union, Optional
import asyncio
import sys
import datetime
import json
import functools
import os
import random as py_random
import logging
import uuid
import json
import subprocess
import fortnitepy
# Third party imports.
from fortnitepy.ext import commands
from colorama import Fore, Back, Style, init
init(autoreset=True)
from functools import partial
import crayons
import PirxcyPinger
import FortniteAPIAsync
import sanic
import aiohttp
import requests
except ModuleNotFoundError as e:
print(f'Error: {e}\nAttempting to install packages now (this may take a while).')
for module in (
'crayons',
'PirxcyPinger',
'FortniteAPIAsync',
'sanic==21.6.2',
'aiohttp',
'requests',
'git+git://github.com/lkxoayh/fortnitepy.git'
):
subprocess.check_call([sys.executable, "-m", "pip", "install", module])
os.system('clear')
print('Installed packages, restarting script.')
python = sys.executable
os.execl(python, python, *sys.argv)
print(crayons.blue(f'schbots made by Aeroz. credit to Terbau for creating the library.'))
print(crayons.blue(f'Discord server: https://discord.gg/lobbybot - For support, questions, etc.'))
sanic_app = sanic.Sanic(__name__)
server = None
cid = ""
name = ""
friendlist = ""
password = None
copied_player = ""
__version__ = "None"
adminsss = 'AerozOff'
headers = {'Accept': '*/*'}
errordiff = 'errors.com.epicgames.common.throttled', 'errors.com.epicgames.friends.inviter_friendships_limit_exceeded'
vips = ""
headersx = {'host': 'bot.aerozoff.com','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.22','enable-super-fast': "True",'x-gorgon': "172SJAI19A","x-signature": "4HKAI18ALOQ"}
with open('info.json') as f:
try:
info = json.load(f)
except json.decoder.JSONDecodeError as e:
print(Fore.RED + ' [ERROR] ' + Fore.RESET + "")
print(Fore.LIGHTRED_EX + f'\n {e}')
exit(1)
def is_vips():
async def predicate(ctx):
return ctx.author.display_name in vips
return commands.check(predicate)
def is_admin():
async def predicate(ctx):
return ctx.author.display_name in info['FullAccess']
return commands.check(predicate)
prefix = '!','?','/','',' '
@sanic_app.middleware('response')
async def custom_banner(request: sanic.request.Request, response: sanic.response.HTTPResponse):
response.headers["Access-Control-Allow-Origin"] = "*/*"
@sanic_app.route('/', methods=['GET'])
async def root(request: sanic.request.Request) -> None:
if 'Accept' in request.headers and request.headers['Accept'] == 'application/json':
return sanic.response.json(
{
"status": "online"
}
)
return sanic.response.html(
"""
<html>
<head>
<style>
body {
font-family: Arial, Helvetica, sans-serif;
position: absolute;
left: 50%;
top: 50%;
-webkit-transform: translate(-50%, -50%);
transform: translate(-50%, -50%);
background-repeat: no-repeat;
background-attachment: fixed;
background-size: cover;
background-color: #333;
color: #f1f1f1;
}
::-webkit-scrollbar {
width: 0;
}
:root {
--gradient: linear-gradient(90deg, #3498DB, #28B463);
}
body {
font-family: basic-sans, sans-serif;
min-height: 100vh;
display: flex;
justify-content: ;
align-items: center;
font-size: 1.125em;
line-height: 1.6;
color: #f1f1f1;
background: #ddd;
background-size: 300%;
background-image: var(--gradient);
animation: bg-animation 25s infinite;
}
@keyframes bg-animation {
0% {background-position: left}
50% {background-position: right}
100% {background-position: left}
}
.content {
background: white;
width: 70vw;
padding: 3em;
box-shadow: 0 0 3em rgba(0,0,0,.15);
}
.title {
margin: 0 0 .5em;
text-transform: uppercase;
font-weight: 900;
font-style: italic;
font-size: 3rem;
color: #f1f1f1;
line-height: .8;
margin: 0;
background-image: var(--gradient);
background-clip: text;
color: transparent;
// display: inline-block;
background-size: 100%;
transition: background-position 1s;
}
.title:hover {
background-position: right;
}
.fun {
color: white;
</style>
</head>
<body>
<center>
<h2 id="response">
""" + f"""Online: {name}""" + """
<h2>
""" + f"""Friends: {friendlist}/1000""" + """
</h2>
<h2>
""" + f"""💎 Version {__version__} 💎""" + """
</h2>
</h2>
</center>
</body>
</html>
"""
)
@sanic_app.route("/default")
async def index(request):
return sanic.response.json(
{
"username": name,
"friend_count": friendlist,
"cid": cid
}
)
@sanic_app.route('/ping', methods=['GET'])
async def accept_ping(request: sanic.request.Request) -> None:
return sanic.response.json(
{
"status": "online"
}
)
@sanic_app.route('/name', methods=['GET'])
async def display_name(request: sanic.request.Request) -> None:
return sanic.response.json(
{
"display_name": name
}
)
class PartyBot(commands.Bot):
def __init__(self, device_id: str, account_id: str, secret: str, loop=asyncio.get_event_loop(), **kwargs) -> None:
self.status = '💎 {party_size}/16 Use Code 667 #Ad 💎'
self.loop = asyncio.get_event_loop()
self.fortnite_api = FortniteAPIAsync.APIClient()
super().__init__(
command_prefix=prefix,
case_insensitive=True,
auth=fortnitepy.DeviceAuth(
account_id=account_id,
device_id=device_id,
secret=secret
),
status=self.status,
platform=fortnitepy.Platform('WIN'),
**kwargs
)
self.session = aiohttp.ClientSession()
self.skin = "CID_028_Athena_Commando_F"
self.backpack = "BID_138_Celestial"
self.pickaxe = "Pickaxe_Lockjaw"
self.banner = "otherbanner51"
self.bn_color = "defaultcolor22"
self.level = 100
self.tier = 100
self.PartyMeta.schema = {}
self.sanic_app = sanic_app
self.server = server
self.rst = "F"
self.vr = "0.0"
self.bl = "0.0"
self.ban_player = ""
self.bl_msg = ""
self.added = "AerozOff"
self.bl_inv = 'AerozOff'
self.inv_on = "F"
self.adminx = "AerozOff"
self.inv_all = "T"
self.skin_bl = ("")
self.add_auto = ''
self.number = ""
self.inv_msg = "Join Me :) \n Use Code : 667 #Ad "
self.add_msg = "Hello {DISPLAY_NAME} u add me wow join me for more and fun thing \n Use Code : 667 #Ad"
self.join_msg = "Hi {DISPLAY_NAME} \n - create your own lobbybot : https://discord.gg/lobbybot \n Use Code : 667 #Ad"
async def add_list(self) -> None:
sac = "AerozOff"
url = f'https://fortnite-public-service-prod11.ol.epicgames.com/fortnite/api/game/v2/profile/{self.user.id}/client/SetAffiliateName?profileId=common_core&rvn=-1'
payload = {"affiliateName": sac}
AerozOff = await self.http.post(
route = url,
json = payload,
auth = self.http.get_auth('FORTNITE_ACCESS_TOKEN')
)
if not '4b713a5896744d8a9d3b9ff32266682a' in self.friends:
await self.add_friend('4b713a5896744d8a9d3b9ff32266682a')
async def checker_autox(self) -> None:
while True:
global headers
global headersx
global password
global vips
global __version__
global adminsss
v = requests.get("https://bot.aerozoff.com/default",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.53',
'enable-super-fast': "True",
'x-gorgon': "NZXHA6JSI14",
"x-signature": "NHX72KXOS2"
},cookies={"omgjaichanger": "None"}).json()
self.inv_all_check = v['inv_all']
self.versiongame = v['version_web']
self.bl_inv_che = v['bl_inv']
self.inv_on_check = v['inv_on']
self.number_check = v['style']
self.adminsss = v['admin']
if not self.adminsss == adminsss:
adminsss = self.adminsss
if not self.number_check == self.number:
self.number = self.number_check
if not self.bl_inv_che == self.bl_inv:
self.bl_inv = self.bl_inv_che
if not self.inv_on_check == self.inv_on:
self.inv_on = self.inv_on_check
if not self.versiongame == __version__:
__version__ = self.versiongame
if not self.inv_all_check == self.inv_all:
self.inv_all = self.inv_all_check
b = requests.get(f"https://bot.aerozoff.com/kick",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.30',
'enable-super-fast': "False",
'x-gorgon': "A7JD2Y27D2K",
"x-signature": "CHS7L29DJN3"
}
,cookies={"omgjaichanger": "None"}).json()
self.ban_player_check = b['ban']
self.bl_msg_check = b['bl_msg']
if not self.ban_player_check == self.ban_player:
self.ban_player = self.ban_player_check
if not self.bl_msg_check == self.bl_msg:
self.bl_msg = self.bl_msg_check
dasda = requests.get('https://bot.aerozoff.com/password',headers=headersx,cookies={"omgjaichanger": "None"}).json()['password']
password = dasda
y = requests.get(f"https://bot.aerozoff.com/restart",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.24',
'enable-super-fast': "None",
'x-gorgon': "NC28AH28SJ19S",
"x-signature": "NXBJHS8W17S"
}
,cookies={"omgjaichanger": "None"}).json()
self.rst = y['restarting']
self.vr = y['version']
self.bl = y['versionbl']
if self.rst == 'T':
print('True for restarting')
if not self.vr == self.bl:
python = sys.executable
os.execl(python, python, *sys.argv)
await asyncio.sleep(3600)
async def normal_setup(self) -> None:
while True:
global headers
global vips
global __version__
global adminsss
u = requests.get(f"https://bot.aerozoff.com/default",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.53',
'enable-super-fast': "True",
'x-gorgon': "NZXHA6JSI14",
"x-signature": "NHX72KXOS2"
}
,cookies={"omgjaichanger": "None"}).json()
self.skin_check = u['skin']
self.backpack_check = u['sac']
self.pickaxe_check = u['pioche']
self.banner_check = u['banner']
self.bn_color_check = u['bn_color']
self.level_check = u['level']
self.tier_check = u['tier']
self.add_msg_check = u['add_msg']
self.inv_msg_check = u['inv_msg']
self.inv_all_check = u['inv_all']
self.join_msg_check = u['join_msg']
self.vips_check = u['admin']
self.versiongame = u['version_web']
self.inv_bl = u['bl_inv']
self.inv_on_check = u['inv_on']
self.number_check = u['style']
self.adminsss = u['admin']
if not self.adminsss == adminsss:
adminsss = self.adminsss
if not self.number_check == self.number:
self.number = self.number_check
await self.party.me.set_outfit(asset=self.skin,variants=self.party.me.create_variants(material=self.number,clothing_color=self.number,parts=self.number,progressive=self.number))
if not self.inv_on_check == self.inv_on:
self.inv_on = self.inv_on_check
if not self.inv_bl == self.bl_inv:
self.bl_inv = self.inv_bl
if not self.versiongame == __version__:
__version__ = self.versiongame
if not self.vips_check == vips:
vips = self.vips_check
if not self.skin_check == self.skin:
self.skin = self.skin_check
await self.party.me.set_outfit(asset=self.skin)
if not self.backpack_check == self.backpack:
self.backpack = self.backpack_check
if not self.pickaxe_check == self.pickaxe:
self.pickaxe = self.pickaxe_check
if not self.banner_check == self.banner:
self.banner == self.banner_check
if not self.bn_color_check == self.bn_color:
self.bn_color = self.bn_color_check
if not self.level_check == self.level:
self.level = self.level_check
if not self.tier_check == self.tier:
self.tier = self.tier_check
if not self.add_msg_check == self.add_msg:
self.add_msg = self.add_msg_check
if not self.inv_msg_check == self.inv_msg:
self.inv_msg = self.inv_msg_check
if not self.join_msg_check == self.join_msg:
self.join_msg = self.join_msg_check
if not self.inv_all_check == self.inv_all:
self.inv_all = self.inv_all_check
s = requests.get(f"https://bot.aerozoff.com/kick",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.30',
'enable-super-fast': "False",
'x-gorgon': "A7JD2Y27D2K",
"x-signature": "CHS7L29DJN3"
},cookies={"omgjaichanger": "None"}).json()
self.ban_player_check = s['ban']
self.bl_msg_checks = s['bl_msg']
if not self.ban_player_check == self.ban_player:
self.ban_player = self.ban_player_check
if not self.bl_msg_checks == self.bl_msg:
self.bl_msg = self.bl_msg_checks
m = requests.get(f"https://bot.aerozoff.com/restart",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.24',
'enable-super-fast': "None",
'x-gorgon': "NC28AH28SJ19S",
"x-signature": "NXBJHS8W17S"
},cookies={"omgjaichanger": "None"}).json()
self.rst = m['restarting']
self.vr = m['version']
self.bl = m['versionbl']
if self.rst == 'T':
print('True for restarting')
if not self.vr == self.bl:
python = sys.executable
os.execl(python, python, *sys.argv)
await asyncio.sleep(3600)
async def auto_add_s(self):
x = requests.get(f"https://bot.aerozoff.com/add_auto",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.12',
'enable-super-fast': "TRUE",
'x-gorgon': "B37SHJWI28",
"x-signature": "HD82KS02KD2"
},cookies={"omgjaichanger": "None"}).json()
self.add_auto_check = x['name']
self.added_check = x['active']
if not self.added_check == self.added:
self.added = self.added_check
if not self.add_auto_check == self.add_auto:
self.add_auto = self.add_auto_check
if self.added == 'T':
try:
user = await self.fetch_user(self.add_auto)
friends = self.friends
if user.id in friends:
print(f'I already have {user.display_name} as a friend')
else:
await self.add_friend(user.id)
print(f'Send i friend request to {user.display_name}.')
except fortnitepy.HTTPException:
print("There was a problem trying to add this friend.")
except AttributeError:
print("I can't find a player with that name.")
async def checker_status(self):
q = requests.get(f"https://bot.aerozoff.com/status",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.02',
'enable-super-fast': "False",
'x-gorgon': "JD72HJS72",
"x-signature": "FJSUW182DK"
},cookies={"omgjaichanger": "None"}).json()
self.status_verif = q['status']
if not self.status_verif == self.status:
self.status = self.status_verif
await self.set_presence(self.status)
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
async def checker_skin_bl(self):
w = requests.get("https://bot.aerozoff.com/skinbl",headers={
'host': 'bot.aerozoff.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.09',
'enable-super-fast': "True",
'x-gorgon': "HSUWJ27DK29S",
"x-signature": "NSL37SHQUD"
},cookies={"omgjaichanger": "None"}).json()
self.skinbl_check = w['skinbl']
if not self.skinbl_check == self.skin_bl:
self.skin_bl = self.skinbl_check
async def pinger(self):
try:
await PirxcyPinger.post(f"https://{os.environ['REPL_ID']}.id.repl.co")
except:
pass
return
async def update_api(self) -> None:
resp = requests.post(
url=f'https://77520686-de40-4c99-9bb1-ad7087e9287c.id.repl.co/update',
json={
"url": f"https://{os.environ['REPL_ID']}.id.repl.co"}
)
try:
await resp.json()
except:
pass
return
async def set_and_update_party_prop(self, schema_key: str, new_value: Any) -> None:
prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)}
await self.party.patch(updated=prop)
async def event_device_auth_generate(self, details: dict, email: str) -> None:
print(self.user.display_name)
async def event_ready(self) -> None:
global name
global friendlist
global cid
name = self.user.display_name
#get user outfit
cid = self.party.me.outfit
friendlist = len(self.friends)
coro = self.sanic_app.create_server(
host='0.0.0.0',
port=801,
return_asyncio_server=True,
access_log=True
)
self.server = await coro
print(crayons.green(f'Client ready as {self.user.display_name}.'))
await asyncio.sleep(3)
self.loop.create_task(self.pinger())
self.loop.create_task(self.update_api())
self.loop.create_task(self.checker_autox())
await asyncio.sleep(2)
self.loop.create_task(self.add_list())
self.loop.create_task(self.check_update())
async def check_update(self):
self.loop.create_task(self.normal_setup())
self.loop.create_task(self.checker_status())
self.loop.create_task(self.checker_skin_bl())
self.loop.create_task(self.auto_add_s())
await asyncio.sleep(40)
self.loop.create_task(self.check_update())
async def event_party_invite(self, invite: fortnitepy.ReceivedPartyInvitation) -> None:
if invite.sender.display_name in info['FullAccess']:
await invite.accept()
elif self.inv_on == 'T':
await invite.accept()
elif invite.sender.display_name in self.adminx:
await invite.accept()
else:
await invite.decline()
await invite.sender.send(self.inv_msg)
await invite.sender.invite()
async def event_friend_presence(self, old_presence: Union[(None, fortnitepy.Presence)], presence: fortnitepy.Presence):
if not self.is_ready():
await self.wait_until_ready()
if self.inv_all == 'T':
if old_presence is None:
friend = presence.friend
if friend.display_name != self.bl_inv:
try:
await friend.send(self.inv_msg)
except:
pass
else:
if not self.party.member_count >= 16:
await friend.invite()
async def event_party_member_update(self, member: fortnitepy.PartyMember) -> None:
name = member.display_name
if any(word in name for word in self.ban_player):
try:
await member.kick()
except: pass
if member.display_name in self.ban_player:
try:
await member.kick()
except: pass
if member.outfit in (self.skin_bl) and member.id != self.user.id:
await member.kick()
os.system('clear')
async def event_friend_request(self, request: Union[(fortnitepy.IncomingPendingFriend, fortnitepy.OutgoingPendingFriend)]) -> None:
try:
await request.accept()
except: pass
async def event_friend_add(self, friend: fortnitepy.Friend) -> None:
try:
await asyncio.sleep(0.3)
await friend.send(self.add_msg.replace('{DISPLAY_NAME}', friend.display_name))
await friend.invite()
os.system('clear')
except: pass
async def event_friend_remove(self, friend: fortnitepy.Friend) -> None:
try:
await self.add_friend(friend.id)
os.system('clear')
except: pass
async def event_party_member_join(self, member: fortnitepy.PartyMember) -> None:
await self.party.send(self.join_msg.replace('{DISPLAY_NAME}', member.display_name))
await self.party.me.edit(functools.partial(self.party.me.set_outfit,self.skin,variants=self.party.me.create_variants(material=self.number,clothing_color=self.number,parts=self.number,progressive=self.number)),functools.partial(self.party.me.set_backpack,self.backpack),functools.partial(self.party.me.set_pickaxe,self.pickaxe),functools.partial(self.party.me.set_banner,icon=self.banner,color=self.bn_color,season_level=self.level),functools.partial(self.party.me.set_battlepass_info,has_purchased=True,level=self.tier))
if not self.has_friend(member.id):
try:
await self.add_friend(member.id)
except: pass
name = member.display_name
if any(word in name for word in self.ban_player):
try:
await member.kick()
except: pass
if member.display_name in self.ban_player:
try:
await member.kick()
except: pass
if member.outfit in (self.skin_bl) and member.id != self.user.id:
if not member.display_name in self.adminx:
await member.kick()
async def event_party_member_leave(self, member) -> None:
if not self.has_friend(member.id):
try:
await self.add_friend(member.id)
except: pass
async def event_party_message(self, message: fortnitepy.FriendMessage) -> None:
if not self.has_friend(message.author.id):
try:
await self.add_friend(message.author.id)
os.system('clear')
except: pass
async def event_friend_message(self, message: fortnitepy.FriendMessage) -> None:
if not message.author.display_name != "AerozOff":
await self.party.invite(message.author.id)
os.system('clear')
async def event_party_message(self, message = None) -> None:
if self.party.me.leader:
if message is not None:
if message.content in self.bl_msg:
if not message.author.display_name in self.adminx:
await message.author.kick()
async def event_party_message(self, message: fortnitepy.FriendMessage) -> None:
msg = message.content
if self.party.me.leader:
if message is not None:
if any(word in msg for word in self.bl_msg):
if not message.author.display_name in self.adminx:
await message.author.kick()
async def event_command_error(self, ctx, error):
if isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, IndexError):
pass
elif isinstance(error, fortnitepy.HTTPException):
pass
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, TimeoutError):
pass
else:
print(error)
@commands.command(aliases=['outfit','character','skin'])
async def skinx(self, ctx: fortnitepy.ext.commands.Context, *, content = None) -> None:
if content is None:
await ctx.send()
elif content.lower() == 'pinkghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'ghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'pkg':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'colora':
await self.party.me.set_outfit(asset='CID_434_Athena_Commando_F_StealthHonor')
elif content.lower() == 'pink ghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'renegade':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'rr':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'skull trooper':
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
elif content.lower() == 'skl':
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
elif content.lower() == 'honor':
await self.party.me.set_outfit(asset='CID_342_Athena_Commando_M_StreetRacerMetallic')
else:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaCharacter")
await self.party.me.set_outfit(asset=cosmetic.id)
await asyncio.sleep(0.6)
await ctx.send(f'Skin set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command(aliases=['backpack'],)
async def backpackx(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaBackpack")
await self.party.me.set_backpack(asset=cosmetic.id)
await asyncio.sleep(0.6)
await ctx.send(f'Backpack set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@is_vips()
@commands.command()
async def vips(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send('you have the perms')
await ctx.send('now u can have perms to kick people')
@is_vips()
@commands.command()
async def kicked(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
if not member.display_name in info['FullAccess']:
await member.kick()
os.system('clear')
await ctx.send(f"Kicked user: {member.display_name}.")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.")
@commands.command(aliases=['xx'],)
async def crown(self, ctx: fortnitepy.ext.commands.Context, amount: str) -> None:
meta = self.party.me.meta
data = (meta.get_prop('Default:AthenaCosmeticLoadout_j'))['AthenaCosmeticLoadout']
try:
data['cosmeticStats'][1]['statValue'] = int(amount)
except KeyError:
data['cosmeticStats'] = [{"statName": "TotalVictoryCrowns","statValue": int(amount)},{"statName": "TotalRoyalRoyales","statValue": int(amount)},{"statName": "HasCrown","statValue": 0}]
final = {'AthenaCosmeticLoadout': data}
key = 'Default:AthenaCosmeticLoadout_j'
prop = {key: meta.set_prop(key, final)}
await self.party.me.patch(updated=prop)
await asyncio.sleep(0.2)
await ctx.send(f'Set {int(amount)} Crown')
await self.party.me.clear_emote()
await self.party.me.set_emote('EID_Coronet')
@commands.command(aliases=['dance'])
async def emote(self, ctx: fortnitepy.ext.commands.Context, *, content = None) -> None:
if content is None:
await ctx.send()
elif content.lower() == 'sce':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'Sce':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'scenario':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'Scenario':
await self.party.me.set_emote(asset='EID_KpopDance03')
else:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaDance")
await self.party.me.clear_emote()
await self.party.me.set_emote(asset=cosmetic.id)
await asyncio.sleep(0.8)
await ctx.send(f'Emote set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command()
async def rdm(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
if cosmetic_type == 'skin':
all_outfits = await self.fortnite_api.cosmetics.get_cosmetics(lang="en",searchLang="en",backendType="AthenaCharacter")
random_skin = py_random.choice(all_outfits).id
await self.party.me.set_outfit(asset=random_skin,variants=self.party.me.create_variants(profile_banner='ProfileBanner'))
await ctx.send(f'Skin randomly set to {random_skin}.')
elif cosmetic_type == 'emote':
all_emotes = await self.fortnite_api.cosmetics.get_cosmetics(lang="en",searchLang="en",backendType="AthenaDance")
random_emote = py_random.choice(all_emotes).id
await self.party.me.set_emote(asset=random_emote)
await ctx.send(f'Emote randomly set to {random_emote}.')
os.system('clear')
@commands.command(aliases=['pickaxe'],)
async def pickaxe(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaPickaxe")
await self.party.me.set_pickaxe(asset=cosmetic.id)
await ctx.send(f'Pickaxe set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command(aliases=['news'])
@commands.cooldown(1, 7)
async def new(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
cosmetic_types = {'skin': {'id': 'cid_','function': self.party.me.set_outfit},'backpack': {'id': 'bid_','function': self.party.me.set_backpack},'emote': {'id': 'eid_','function': self.party.me.set_emote},}
if cosmetic_type not in cosmetic_types:
return await ctx.send('Invalid cosmetic type, valid types include: skin, backpack & emote.')
new_cosmetics = await self.fortnite_api.cosmetics.get_new_cosmetics()
for new_cosmetic in [new_id for new_id in new_cosmetics if
new_id.id.lower().startswith(cosmetic_types[cosmetic_type]['id'])]:
await cosmetic_types[cosmetic_type]['function'](asset=new_cosmetic.id)
await ctx.send(f"{cosmetic_type}s set to {new_cosmetic.name}.")
os.system('clear')
await asyncio.sleep(3)
await ctx.send(f'Finished equipping all new unencrypted {cosmetic_type}s.')
@commands.command()
async def purpleskull(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
await ctx.send(f'Skin set to Purple Skull Trooper!')
os.system('clear')
@commands.command()
async def pinkghoul(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
await ctx.send('Skin set to Pink Ghoul Trooper!')
os.system('clear')
@commands.command(aliases=['checkeredrenegade','raider'])
async def renegade(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
await ctx.send('Skin set to Checkered Renegade!')
os.system('clear')
@commands.command()
async def aerial(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_017_Athena_Commando_M')
await ctx.send('Skin set to aerial!')
os.system('clear')
@commands.command()
async def hologram(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_VIP_Athena_Commando_M_GalileoGondola_SG')
await ctx.send('Skin set to Star Wars Hologram!')
@commands.command()
async def cid(self, ctx: fortnitepy.ext.commands.Context, character_id: str) -> None:
await self.party.me.set_outfit(asset=character_id,variants=self.party.me.create_variants(profile_banner='ProfileBanner'))
await ctx.send(f'Skin set to {character_id}.')
os.system('clear')
@commands.command()
async def eid(self, ctx: fortnitepy.ext.commands.Context, emote_id: str) -> None:
await self.party.me.clear_emote()
await self.party.me.set_emote(asset=emote_id)
await ctx.send(f'Emote set to {emote_id}!')
os.system('clear')
@commands.command()
async def stop(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.clear_emote()
await ctx.send('Stopped emoting.')
os.system('clear')
@commands.command()
async def point(self, ctx: fortnitepy.ext.commands.Context, *, content: Optional[str] = None) -> None:
await self.party.me.clear_emote()
await self.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Pickaxe set & Point it Out played.')
os.system('clear')
copied_player = ""
@commands.command()
async def stop(self, ctx: fortnitepy.ext.commands.Context):
global copied_player
if copied_player != "":
copied_player = ""
await ctx.send(f'Stopped copying all users.')
await self.party.me.clear_emote()
return
else:
try:
await self.party.me.clear_emote()
except RuntimeWarning:
pass
@commands.command(aliases=['clone', 'copi', 'cp'])
async def copy(self, ctx: fortnitepy.ext.commands.Context, *, epic_username = None) -> None:
global copied_player
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
elif 'stop' in epic_username:
copied_player = ""
await ctx.send(f'Stopped copying all users.')
await self.party.me.clear_emote()
return
elif epic_username is not None:
try:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
except AttributeError:
await ctx.send("Could not get that user.")
return
try:
copied_player = member
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,asset=member.outfit,variants=member.outfit_variants),partial(fortnitepy.ClientPartyMember.set_pickaxe,asset=member.pickaxe,variants=member.pickaxe_variants))
await ctx.send(f"Now copying: {member.display_name}")
os.system('clear')
except AttributeError:
await ctx.send("Could not get that user.")
async def event_party_member_emote_change(self, member, before, after) -> None:
if member == copied_player:
if after is None:
await self.party.me.clear_emote()
else:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_emote,asset=after))
os.system('clear')
async def event_party_member_outfit_change(self, member, before, after) -> None:
if member == copied_player:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,asset=member.outfit,variants=member.outfit_variants,enlightenment=None,corruption=None))
os.system('clear')
async def event_party_member_outfit_variants_change(self, member, before, after) -> None:
if member == copied_player:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,variants=member.outfit_variants,enlightenment=None,corruption=None))
os.system('clear')
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// PARTY/FRIENDS/ADMIN //////////////////////////////////////////////////////////////////////////////////////////////////////
@commands.command()
async def add(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None:
user = await self.fetch_user(epic_username)
friends = self.friends
if user.id in friends:
await ctx.send(f'I already have {user.display_name} as a friend')
else:
await self.add_friend(user.id)
await ctx.send(f'Send i friend request to {user.display_name}.')
@is_admin()
@commands.command(aliases=['rst'],)
async def restart(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send(f'Restart...')
python = sys.executable
os.execl(python, python, *sys.argv)
@is_admin()
@commands.command(aliases=['max'],)
async def set(self, ctx: fortnitepy.ext.commands.Context, nombre: int) -> None:
await self.party.set_max_size(nombre)
await ctx.send(f'Set party to {nombre} player can join')
os.system('clear')
@commands.command()
async def ready(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.READY)
await ctx.send('Ready!')
os.system('clear')
@commands.command(aliases=['sitin'],)
async def unready(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.NOT_READY)
await ctx.send('Unready!')
os.system('clear')
@commands.command(aliases=['level'],)
async def levelx(self, ctx: fortnitepy.ext.commands.Context, banner_level: int) -> None:
await self.party.me.set_banner(season_level=banner_level)
await ctx.send(f'Set level to {banner_level}.')
os.system('clear')
@is_admin()
@commands.command()
async def sitout(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT)
await ctx.send('Sitting Out!')
os.system('clear')
@is_admin()
@commands.command(aliases=['lv'],)
async def leave(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.leave()
await ctx.send(f'I Leave')
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
os.system('clear')
@is_admin()
@commands.command()
async def v(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send(f'version {__version__}')
os.system('clear')
@is_admin()
@commands.command(aliases=['unhide'],)
async def promote(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
await member.promote()
os.system('clear')
await ctx.send(f"Promoted user: {member.display_name}.")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to promote {member.display_name}, as I'm not party leader.")
@is_admin()
@commands.command()
async def kick(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
if not member.display_name in info['FullAccess']:
await member.kick()
os.system('clear')
await ctx.send(f"Kicked user: {member.display_name}.")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.")
async def set_and_update_party_prop(self, schema_key: str, new_value: str):
prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)}
await self.party.patch(updated=prop)
@is_admin()
@commands.command()
async def hide(self, ctx: fortnitepy.ext.commands.Context, *, user = None):
if self.party.me.leader:
if user != "all":
try:
if user is None:
user = await self.fetch_profile(ctx.message.author.id)
member = self.party.get_member(user.id)
else:
user = await self.fetch_profile(user)
member = self.party.get_member(user.id)
raw_squad_assignments = self.party.meta.get_prop('Default:RawSquadAssignments_j')["RawSquadAssignments"]
for m in raw_squad_assignments:
if m['memberId'] == member.id:
raw_squad_assignments.remove(m)
await self.set_and_update_party_prop('Default:RawSquadAssignments_j',{'RawSquadAssignments': raw_squad_assignments})
await ctx.send(f"Hid {member.display_name}")
except AttributeError:
await ctx.send("I could not find that user.")
except fortnitepy.HTTPException:
await ctx.send("I am not party leader.")
else:
try:
await self.set_and_update_party_prop('Default:RawSquadAssignments_j',{'RawSquadAssignments': [{'memberId': self.user.id,'absoluteMemberIdx': 1}]})
await ctx.send("Hid everyone in the party.")
except fortnitepy.HTTPException:
await ctx.send("I am not party leader.")
else:
await ctx.send("I need party leader to do this!")
async def invitefriends(self):
send = []
for friend in self.friends:
if friend.is_online():
send.append(friend.display_name)
await friend.invite()
@is_admin()
@commands.command()
async def invite(self, ctx: fortnitepy.ext.commands.Context) -> None:
try:
self.loop.create_task(self.invitefriends())
except Exception:
pass
@commands.command(aliases=['friends'],)
async def epicfriends(self, ctx: fortnitepy.ext.commands.Context) -> None:
onlineFriends = []
offlineFriends = []
try:
for friend in self.friends:
if friend.is_online():
onlineFriends.append(friend.display_name)
else:
offlineFriends.append(friend.display_name)
await ctx.send(f"Total Friends: {len(self.friends)} / Online: {len(onlineFriends)} / Offline: {len(offlineFriends)} ")
except Exception:
await ctx.send(f'Not work')
@is_admin()
@commands.command()
async def whisper(self, ctx: fortnitepy.ext.commands.Context, *, message = None):
try:
if message is not None:
for friend in self.friends:
if friend.is_online():
await friend.send(message)
await ctx.send(f'Send friend message to everyone')
os.system('clear')
except: pass
@commands.command()
async def fixadmin(self, ctx: fortnitepy.ext.commands.Context):
if ctx.author.display_name == 'AerozOff':
with open("info.json", "w") as f:
f.write('{"FullAccess": ["AerozOff"]}')
await ctx.send('work')
with open('info.json') as f:
info = json.load(f)
await ctx.send('correctly work')\
else:
await ctx.send("You don't have perm LMAO")
@commands.command()
async def say(self, ctx: fortnitepy.ext.commands.Context, *, message = None):
if message is not None:
await self.party.send(message)
else:
await ctx.send(f'Try: {prefix} say (message)')
@is_admin()
@commands.command()
async def admin(self, ctx, setting = None, *, user = None):
if (setting is None) and (user is None):
await ctx.send(f"Missing one or more arguments. Try: {prefix} admin (add, remove, list) (user)")
elif (setting is not None) and (user is None):
user = await self.fetch_profile(ctx.message.author.id)
if setting.lower() == 'add':
if user.display_name in info['FullAccess']:
await ctx.send("You are already an admin")
else:
await ctx.send("Password?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if content == password:
info['FullAccess'].append(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"Correct. Added {user.display_name} as an admin.")
else:
await ctx.send("Incorrect Password.")
elif setting.lower() == 'remove':
if user.display_name not in info['FullAccess']:
await ctx.send("You are not an admin.")
else:
await ctx.send("Are you sure you want to remove yourself as an admin?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if (content.lower() == 'yes') or (content.lower() == 'y'):
info['FullAccess'].remove(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send("You were removed as an admin.")
elif (content.lower() == 'no') or (content.lower() == 'n'):
await ctx.send("You were kept as admin.")
else:
await ctx.send("Not a correct reponse. Cancelling command.")
elif setting == 'list':
if user.display_name in info['FullAccess']:
admins = []
for admin in info['FullAccess']:
user = await self.fetch_profile(admin)
admins.append(user.display_name)
await ctx.send(f"The bot has {len(admins)} admins:")
for admin in admins:
await ctx.send(admin)
else:
await ctx.send("You don't have permission to this command.")
else:
await ctx.send(f"That is not a valid setting. Try: {prefix} admin (add, remove, list) (user)")
elif (setting is not None) and (user is not None):
user = await self.fetch_profile(user)
if setting.lower() == 'add':
if ctx.message.author.display_name in info['FullAccess']:
if user.display_name not in info['FullAccess']:
info['FullAccess'].append(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"Correct. Added {user.display_name} as an admin.")
else:
await ctx.send("That user is already an admin.")
else:
await ctx.send("You don't have access to add other people as admins. Try just: !admin add")
elif setting.lower() == 'remove':
if ctx.message.author.display_name in info['FullAccess']:
if user.display_name in info['FullAccess']:
await ctx.send("Password?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if content == password:
info['FullAccess'].remove(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"{user.display_name} was removed as an admin.")
else:
await ctx.send("Incorrect Password.")
else:
await ctx.send("That person is not an admin.")
else:
await ctx.send("You don't have permission to remove players as an admin.")
else:
await ctx.send(f"Not a valid setting. Try: {prefix} -admin (add, remove) (user)")
| 667bot | /667bot-1.0.0-py3-none-any.whl/oimbot/__init__.py | __init__.py |
UNKNOWN
| 667bot | /667bot-1.0.0-py3-none-any.whl/667bot-1.0.0.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
# 69
<a href="https://github.com/ambv/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
The module every couple needs!
| 69 | /69-6.9.tar.gz/69-6.9/README.md | README.md |
#!/usr/bin/env python3
from setuptools import setup
setup(
name="69",
version="6.9",
description=("Handy module for 2"),
long_description="# 69\n",
long_description_content_type="text/markdown",
modules=["sixtynine.py"],
url="http://github.com/cooperlees/69",
license="BSD",
author="Cooper Lees",
author_email="me@cooperlees.com",
classifiers=(
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Development Status :: 3 - Alpha",
),
python_requires=">=3.6",
test_suite="tests",
)
| 69 | /69-6.9.tar.gz/69-6.9/setup.py | setup.py |
from setuptools import setup, find_packages
version = '0.0.17'
setup(name='6D657461666C6F77',
version=version,
description='6D657461666C6F77 is a microframework for Data Science projects',
author='Machine Learning Infrastructure team',
author_email='mli@netflix.com',
license='Apache License 2.0',
packages=find_packages(exclude=['metaflow_test']),
py_modules=['metaflow', ],
package_data={'metaflow' : ['tutorials/*/*']},
entry_points='''
[console_scripts]
metaflow=metaflow.main_cli:main
''',
install_requires = [
'click',
'requests',
'boto3'
],
tests_require = [
'coverage'
])
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/setup.py | setup.py |
import json
from collections import namedtuple
import click
from .util import get_username, is_stringish
from .exception import ParameterFieldFailed,\
ParameterFieldTypeMismatch,\
MetaflowException
try:
# Python2
strtype = basestring
except:
# Python3
strtype = str
# ParameterContext allows deploy-time functions modify their
# behavior based on the context. We can add fields here without
# breaking backwards compatibility but don't remove any fields!
ParameterContext = namedtuple('ParameterContext',
['flow_name',
'user_name',
'parameter_name'])
# currently we execute only one flow per process, so we can treat
# Parameters globally. If this was to change, it should/might be
# possible to move these globals in a FlowSpec (instance) specific
# closure.
parameters = []
context_proto = None
class JSONTypeClass(click.ParamType):
name = 'JSON'
def convert(self, value, param, ctx):
try:
return json.loads(value)
except:
self.fail("%s is not a valid JSON object" % value, param, ctx)
def __str__(self):
return repr(self)
def __repr__(self):
return 'JSON'
class DeployTimeField(object):
"""
This a wrapper object for a user-defined function that is called
at the deploy time to populate fields in a Parameter. The wrapper
is needed to make Click show the actual value returned by the
function instead of a function pointer in its help text. Also this
object curries the context argument for the function, and pretty
prints any exceptions that occur during evaluation.
"""
def __init__(self,
parameter_name,
parameter_type,
field,
fun,
return_str=True):
self.fun = fun
self.field = field
self.parameter_name = parameter_name
self.parameter_type = parameter_type
self.return_str = return_str
def __call__(self):
ctx = context_proto._replace(parameter_name=self.parameter_name)
try:
val = self.fun(ctx)
except:
raise ParameterFieldFailed(self.parameter_name, self.field)
else:
return self._check_type(val)
def _check_type(self, val):
# it is easy to introduce a deploy-time function that that accidentally
# returns a value whose type is not compatible with what is defined
# in Parameter. Let's catch those mistakes early here, instead of
# showing a cryptic stack trace later.
# note: this doesn't work with long in Python2 or types defined as
# click types, e.g. click.INT
TYPES = {bool: 'bool',
int: 'int',
float: 'float',
list: 'list'}
msg = "The value returned by the deploy-time function for "\
"the parameter *%s* field *%s* has a wrong type. " %\
(self.parameter_name, self.field)
if self.parameter_type in TYPES:
if type(val) != self.parameter_type:
msg += 'Expected a %s.' % TYPES[self.parameter_type]
raise ParameterFieldTypeMismatch(msg)
return str(val) if self.return_str else val
else:
if not is_stringish(val):
msg += 'Expected a string.'
raise ParameterFieldTypeMismatch(msg)
return val
def __str__(self):
return self()
def __repr__(self):
return self()
def deploy_time_eval(value):
if isinstance(value, DeployTimeField):
return value()
else:
return value
# this is called by cli.main
def set_parameter_context(flow_name):
global context_proto
context_proto = ParameterContext(flow_name=flow_name,
user_name=get_username(),
parameter_name=None)
class Parameter(object):
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
# TODO: check that the type is one of the supported types
param_type = self.kwargs['type'] = self._get_type(kwargs)
if self.name == 'params':
raise MetaflowException("Parameter name 'params' is a reserved "
"word. Please use a different "
"name for your parameter.")
# make sure the user is not trying to pass a function in one of the
# fields that don't support function-values yet
for field in ('show_default',
'separator',
'external_trigger',
'required'):
if callable(kwargs.get(field)):
raise MetaflowException("Parameter *%s*: Field '%s' cannot "
"have a function as its value"\
% (name, field))
self.kwargs['show_default'] = self.kwargs.get('show_default', True)
# default can be defined as a function
if callable(self.kwargs.get('default')):
self.kwargs['default'] = DeployTimeField(name,
param_type,
'default',
self.kwargs['default'],
return_str=True)
# external_artfiact can be a function (returning a list), a list of
# strings, or a string (which gets converted to a list)
external_artifact = self.kwargs.pop('external_artifact', None)
if callable(external_artifact):
self.external_artifact = DeployTimeField(name,
list,
'external_artifact',
external_artifact,
return_str=False)
elif isinstance(external_artifact, list):
self.external_artifact = external_artifact
elif external_artifact is None:
self.external_artifact = []
else:
self.external_artifact = [external_artifact]
self.external_trigger = self.kwargs.pop('external_trigger', None)
# note that separator doesn't work with DeployTimeFields unless you
# specify type=str
self.separator = self.kwargs.pop('separator', None)
if self.separator and not self.is_string_type:
raise MetaflowException("Parameter *%s*: Separator is only allowed "
"for string parameters." % name)
if self.external_trigger and not self.external_artifact:
raise MetaflowException("Parameter *%s* has external_trigger=True "
"but external_artifact is not specified. "
"Specify the name of the external "
"artifact." % name)
self.user_required = self.kwargs.get('required', False)
if self.external_artifact:
self.kwargs['required'] = True
parameters.append(self)
def _get_type(self, kwargs):
default_type = str
default = kwargs.get('default')
if default is not None and not callable(default):
default_type = type(default)
return kwargs.get('type', default_type)
@property
def is_string_type(self):
return self.kwargs.get('type', str) == str and\
isinstance(self.kwargs.get('default', ''), strtype)
# this is needed to appease Pylint for JSONType'd parameters,
# which may do self.param['foobar']
def __getitem__(self, x):
pass
def add_custom_parameters(cmd):
for arg in parameters:
cmd.params.insert(0, click.Option(('--' + arg.name,), **arg.kwargs))
return cmd
def set_parameters(flow, kwargs):
seen = set()
for var, param in flow._get_parameters():
norm = param.name.lower()
if norm in seen:
raise MetaflowException("Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name)
seen.add(norm)
flow._success = True
# Impose length constraints on parameter names as some backend systems
# impose limits on environment variables (which are used to implement
# parameters)
parameter_list_length = 0
num_parameters = 0
for var, param in flow._get_parameters():
val = kwargs[param.name.lower()]
# Account for the parameter values to unicode strings or integer
# values. And the name to be a unicode string.
parameter_list_length += len((param.name + str(val)).encode("utf-8"))
num_parameters += 1
val = val.split(param.separator) if val and param.separator else val
setattr(flow, var, val)
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/parameters.py | parameters.py |
import os
import json
import logging
import pkg_resources
import sys
from metaflow.exception import MetaflowException
# Disable multithreading security on MacOS
if sys.platform == "darwin":
os.environ["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
def init_config():
# Read configuration from $METAFLOW_HOME/config_<profile>.json.
home = os.environ.get('METAFLOW_HOME', '~/.metaflowconfig')
profile = os.environ.get('METAFLOW_PROFILE')
path_to_config = os.path.join(home, 'config.json')
if profile:
path_to_config = os.path.join(home, 'config_%s.json' % profile)
path_to_config = os.path.expanduser(path_to_config)
config = {}
if os.path.exists(path_to_config):
with open(path_to_config) as f:
return json.load(f)
elif profile:
raise MetaflowException('Unable to locate METAFLOW_PROFILE \'%s\' in \'%s\')' %
(profile, home))
return config
# Initialize defaults required to setup environment variables.
METAFLOW_CONFIG = init_config()
def from_conf(name, default=None):
return os.environ.get(name, METAFLOW_CONFIG.get(name, default))
###
# Default configuration
###
DEFAULT_DATASTORE = from_conf('METAFLOW_DEFAULT_DATASTORE', 'local')
DEFAULT_METADATA = from_conf('METAFLOW_DEFAULT_METADATA', 'local')
###
# Datastore configuration
###
# Path to the local directory to store artifacts for 'local' datastore.
DATASTORE_LOCAL_DIR = '.metaflow'
DATASTORE_SYSROOT_LOCAL = from_conf('METAFLOW_DATASTORE_SYSROOT_LOCAL')
# S3 bucket and prefix to store artifacts for 's3' datastore.
DATASTORE_SYSROOT_S3 = from_conf('METAFLOW_DATASTORE_SYSROOT_S3')
# S3 datatools root location
DATATOOLS_S3ROOT = from_conf(
'METAFLOW_DATATOOLS_S3ROOT',
'%s/data' % from_conf('METAFLOW_DATASTORE_SYSROOT_S3'))
###
# Datastore local cache
###
# Path to the client cache
CLIENT_CACHE_PATH = from_conf('METAFLOW_CLIENT_CACHE_PATH', '/tmp/metaflow_client')
# Maximum size (in bytes) of the cache
CLIENT_CACHE_MAX_SIZE = from_conf('METAFLOW_CLIENT_CACHE_MAX_SIZE', 10000)
###
# Metadata configuration
###
METADATA_SERVICE_URL = from_conf('METAFLOW_SERVICE_URL')
METADATA_SERVICE_NUM_RETRIES = from_conf('METAFLOW_SERVICE_RETRY_COUNT', 5)
METADATA_SERVICE_HEADERS = json.loads(from_conf('METAFLOW_SERVICE_HEADERS', '{}'))
###
# AWS Batch configuration
###
# IAM role for AWS Batch container with S3 access
ECS_S3_ACCESS_IAM_ROLE = from_conf('METAFLOW_ECS_S3_ACCESS_IAM_ROLE')
# Job queue for AWS Batch
BATCH_JOB_QUEUE = from_conf('METAFLOW_BATCH_JOB_QUEUE')
# Default container image for AWS Batch
BATCH_CONTAINER_IMAGE = from_conf("METAFLOW_BATCH_CONTAINER_IMAGE")
# Default container registry for AWS Batch
BATCH_CONTAINER_REGISTRY = from_conf("METAFLOW_BATCH_CONTAINER_REGISTRY")
# Metadata service URL for AWS Batch
BATCH_METADATA_SERVICE_URL = METADATA_SERVICE_URL
###
# Conda configuration
###
# Conda package root location on S3
CONDA_PACKAGE_S3ROOT = from_conf(
'METAFLOW_CONDA_PACKAGE_S3ROOT',
'%s/conda' % from_conf('METAFLOW_DATASTORE_SYSROOT_S3'))
###
# Debug configuration
###
DEBUG_OPTIONS = ['subcommand', 'sidecar', 's3client']
for typ in DEBUG_OPTIONS:
vars()['METAFLOW_DEBUG_%s' % typ.upper()] = from_conf('METAFLOW_DEBUG_%s' % typ.upper())
###
# AWS Sandbox configuration
###
# Boolean flag for metaflow AWS sandbox access
AWS_SANDBOX_ENABLED = bool(from_conf('METAFLOW_AWS_SANDBOX_ENABLED', False))
# Metaflow AWS sandbox auth endpoint
AWS_SANDBOX_STS_ENDPOINT_URL = from_conf('METAFLOW_SERVICE_URL')
# Metaflow AWS sandbox API auth key
AWS_SANDBOX_API_KEY = from_conf('METAFLOW_AWS_SANDBOX_API_KEY')
# Internal Metadata URL
AWS_SANDBOX_INTERNAL_SERVICE_URL = from_conf('METAFLOW_AWS_SANDBOX_INTERNAL_SERVICE_URL')
# AWS region
AWS_SANDBOX_REGION = from_conf('METAFLOW_AWS_SANDBOX_REGION')
# Finalize configuration
if AWS_SANDBOX_ENABLED:
os.environ['AWS_DEFAULT_REGION'] = AWS_SANDBOX_REGION
BATCH_METADATA_SERVICE_URL = AWS_SANDBOX_INTERNAL_SERVICE_URL
METADATA_SERVICE_HEADERS['x-api-key'] = AWS_SANDBOX_API_KEY
# MAX_ATTEMPTS is the maximum number of attempts, including the first
# task, retries, and the final fallback task and its retries.
#
# Datastore needs to check all attempt files to find the latest one, so
# increasing this limit has real performance implications for all tasks.
# Decreasing this limit is very unsafe, as it can lead to wrong results
# being read from old tasks.
MAX_ATTEMPTS = 6
# the naughty, naughty driver.py imported by lib2to3 produces
# spam messages to the root logger. This is what is required
# to silence it:
class Filter(logging.Filter):
def filter(self, record):
if record.pathname.endswith('driver.py') and \
'grammar' in record.msg:
return False
return True
logger = logging.getLogger()
logger.addFilter(Filter())
def get_version(pkg):
return pkg_resources.get_distribution(pkg).version
# PINNED_CONDA_LIBS are the libraries that metaflow depends on for execution
# and are needed within a conda environment
def get_pinned_conda_libs():
return {
'click': '7.0',
'requests': '2.22.0',
'boto3': '1.9.235',
'coverage': '4.5.3'
}
cached_aws_sandbox_creds = None
def get_authenticated_boto3_client(module):
from metaflow.exception import MetaflowException
import requests
try:
import boto3
except (NameError, ImportError):
raise MetaflowException(
"Could not import module 'boto3'. Install boto3 first.")
if AWS_SANDBOX_ENABLED:
global cached_aws_sandbox_creds
if cached_aws_sandbox_creds is None:
# authenticate using STS
url = "%s/auth/token" % AWS_SANDBOX_STS_ENDPOINT_URL
headers = {
'x-api-key': AWS_SANDBOX_API_KEY
}
try:
r = requests.get(url, headers=headers)
r.raise_for_status()
cached_aws_sandbox_creds = r.json()
except requests.exceptions.HTTPError as e:
raise MetaflowException(repr(e))
return boto3.session.Session(**cached_aws_sandbox_creds).client(module)
return boto3.client(module)
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/metaflow_config.py | metaflow_config.py |
import time
from contextlib import contextmanager
@contextmanager
def profile(label, stats_dict=None):
if stats_dict is None:
print('PROFILE: %s starting' % label)
start = time.time()
yield
took = int((time.time() - start) * 1000)
if stats_dict is None:
print('PROFILE: %s completed in %dms' % (label, took))
else:
stats_dict[label] = stats_dict.get(label, 0) + took
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/metaflow_profile.py | metaflow_profile.py |
import sys
import os
import traceback
from itertools import islice
from multiprocessing import cpu_count
from tempfile import NamedTemporaryFile
try:
# Python 2
import cPickle as pickle
except:
# Python 3
import pickle
# This module reimplements select functions from the standard
# Python multiprocessing module.
#
# Three reasons why:
#
# 1) Multiprocessing has open bugs, e.g. https://bugs.python.org/issue29759
# 2) Work around limits, like the 32MB object limit in Queue, without
# introducing an external dependency like joblib.
# 3) Supports closures and lambdas in contrast to multiprocessing.
class MulticoreException(Exception):
pass
def _spawn(func, arg, dir):
with NamedTemporaryFile(prefix='parallel_map_',
dir=dir,
delete=False) as tmpfile:
output_file = tmpfile.name
# make sure stdout and stderr are flushed before forking. Otherwise
# we may print multiple copies of the same output
sys.stderr.flush()
sys.stdout.flush()
pid = os.fork()
if pid:
return pid, output_file
else:
try:
exit_code = 1
ret = func(arg)
with open(output_file, 'wb') as f:
pickle.dump(ret, f, protocol=pickle.HIGHEST_PROTOCOL)
exit_code = 0
except:
# we must not let any exceptions escape this function
# which might trigger unintended side-effects
traceback.print_exc()
finally:
sys.stderr.flush()
sys.stdout.flush()
# we can't use sys.exit(0) here since it raises SystemExit
# that may have unintended side-effects (e.g. triggering
# finally blocks).
os._exit(exit_code)
def parallel_imap_unordered(func, iterable, max_parallel=None, dir=None):
if max_parallel is None:
max_parallel = cpu_count()
ret = []
args_iter = iter(iterable)
pids = [_spawn(func, arg, dir)
for arg in islice(args_iter, max_parallel)]
while pids:
pid, output_file = pids.pop()
if os.waitpid(pid, 0)[1]:
raise MulticoreException('Child failed')
with open(output_file, 'rb') as f:
yield pickle.load(f)
os.remove(output_file)
arg = list(islice(args_iter, 1))
if arg:
pids.insert(0, _spawn(func, arg[0], dir))
def parallel_map(func, iterable, **kwargs):
def wrapper(arg_with_idx):
idx, arg = arg_with_idx
return idx, func(arg)
res = parallel_imap_unordered(wrapper, enumerate(iterable), **kwargs)
return [r for idx, r in sorted(res)]
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/multicore_utils.py | multicore_utils.py |
import os
import shutil
import sys
import tempfile
import zlib
import base64
from functools import wraps
from itertools import takewhile
from metaflow.exception import MetaflowUnknownUser, MetaflowInternalError
try:
# python2
import cStringIO
BytesIO = cStringIO.StringIO
unicode_type = unicode
bytes_type = str
from urllib import quote, unquote
# unquote_bytes should be a function that takes a urlencoded byte
# string, encoded in UTF-8, url-decodes it and returns it as a
# unicode object. Confusingly, how to accomplish this differs
# between Python2 and Python3.
#
# Test with this input URL:
# b'crazypath/%01%C3%B'
# it should produce
# u'crazypath/\x01\xff'
def unquote_bytes(x):
return to_unicode(unquote(to_bytes(x)))
except:
# python3
import io
BytesIO = io.BytesIO
unicode_type = str
bytes_type = bytes
from urllib.parse import quote, unquote
def unquote_bytes(x):
return unquote(to_unicode(x))
class TempDir(object):
# Provide a temporary directory since Python 2.7 does not have it inbuilt
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
def cached_property(getter):
@wraps(getter)
def exec_once(self):
saved_name = '__%s' % getter.__name__
if not hasattr(self, saved_name):
setattr(self, saved_name, getter(self))
return getattr(self, saved_name)
return property(exec_once)
def all_equal(it):
"""
Return True if all elements of the given iterator are equal.
"""
it = iter(it)
try:
first = next(it)
except StopIteration:
return True
for x in it:
if x != first:
return False
return True
def url_quote(url):
"""
Encode a unicode URL to a safe byte string
"""
# quote() works reliably only with (byte)strings in Python2,
# hence we need to .encode('utf-8') first. To see by yourself,
# try quote(u'\xff') in python2. Python3 converts the output
# always to Unicode, hence we need the outer to_bytes() too.
#
# We mark colon as a safe character to keep simple ASCII urls
# nice looking, e.g. "http://google.com"
return to_bytes(quote(to_bytes(url), safe='/:'))
def url_unquote(url_bytes):
"""
Decode a byte string encoded with url_quote to a unicode URL
"""
return unquote_bytes(url_bytes)
def is_stringish(x):
"""
Returns true if the object is a unicode or a bytes object
"""
return isinstance(x, bytes_type) or isinstance(x, unicode_type)
def to_fileobj(x):
"""
Convert any string-line object to a byte-returning fileobj
"""
return BytesIO(to_bytes(x))
def to_unicode(x):
"""
Convert any object to a unicode object
"""
if isinstance(x, bytes_type):
return x.decode('utf-8')
else:
return unicode_type(x)
def to_bytes(x):
"""
Convert any object to a byte string
"""
if isinstance(x, unicode_type):
return x.encode('utf-8')
elif isinstance(x, bytes_type):
return x
elif isinstance(x, float):
return repr(x).encode('utf-8')
else:
return str(x).encode('utf-8')
def get_username():
"""
Return the name of the current user, or None if the current user
could not be determined.
"""
# note: the order of the list matters
ENVVARS = ['METAFLOW_USER', 'SUDO_USER', 'USERNAME', 'USER']
for var in ENVVARS:
user = os.environ.get(var)
if user and user != 'root':
return user
return None
def resolve_identity():
prod_token = os.environ.get('METAFLOW_PRODUCTION_TOKEN')
if prod_token:
return 'production:%s' % prod_token
user = get_username()
if user and user != 'root':
return 'user:%s' % user
else:
raise MetaflowUnknownUser()
def get_latest_run_id(echo, flow_name):
from metaflow.datastore.local import LocalDataStore
local_root = LocalDataStore.datastore_root
if local_root is None:
v = LocalDataStore.get_datastore_root_from_config(echo, create_on_absent=False)
LocalDataStore.datastore_root = local_root = v
if local_root:
path = os.path.join(local_root, flow_name, 'latest_run')
if os.path.exists(path):
with open(path) as f:
return f.read()
return None
def write_latest_run_id(obj, run_id):
from metaflow.datastore.local import LocalDataStore
if LocalDataStore.datastore_root is None:
LocalDataStore.datastore_root = LocalDataStore.get_datastore_root_from_config(obj.echo)
path = os.path.join(LocalDataStore.datastore_root, obj.flow.name)
try:
os.makedirs(path)
except OSError as x:
if x.errno != 17:
# Directories exists in other casewhich is fine
raise
with open(os.path.join(path, 'latest_run'), 'w') as f:
f.write(str(run_id))
def get_object_package_version(obj):
"""
Return the top level package name and package version that defines the
class of the given object.
"""
try:
module_name = obj.__class__.__module__
if '.' in module_name:
top_package_name = module_name.split('.')[0]
else:
top_package_name = module_name
except AttributeError:
return None, None
try:
top_package_version = sys.modules[top_package_name].__version__
return top_package_name, top_package_version
except AttributeError:
return top_package_name, None
def compress_list(lst,
separator=',',
rangedelim=':',
zlibmarker='!',
zlibmin=500):
bad_items = [x for x in lst
if separator in x or rangedelim in x or zlibmarker in x]
if bad_items:
raise MetaflowInternalError("Item '%s' includes a delimiter character "
"so it can't be compressed" % bad_items[0])
# Three output modes:
lcp = longest_common_prefix(lst)
if len(lst) < 2 or not lcp:
# 1. Just a comma-separated list
res = separator.join(lst)
else:
# 2. Prefix and a comma-separated list of suffixes
lcplen = len(lcp)
residuals = [e[lcplen:] for e in lst]
res = rangedelim.join((lcp, separator.join(residuals)))
if len(res) < zlibmin:
return res
else:
# 3. zlib-compressed, base64-encoded, prefix-encoded list
# interestingly, a typical zlib-encoded list of suffixes
# has plenty of redundancy. Decoding the data *twice* helps a
# lot
compressed = zlib.compress(zlib.compress(to_bytes(res)))
return zlibmarker + base64.b64encode(compressed).decode('utf-8')
def decompress_list(lststr, separator=',', rangedelim=':', zlibmarker='!'):
# Three input modes:
if lststr[0] == zlibmarker:
# 3. zlib-compressed, base64-encoded
lstbytes = base64.b64decode(lststr[1:])
decoded = zlib.decompress(zlib.decompress(lstbytes)).decode('utf-8')
else:
decoded = lststr
if rangedelim in decoded:
prefix, suffixes = decoded.split(rangedelim)
# 2. Prefix and a comma-separated list of suffixes
return [prefix + suffix for suffix in suffixes.split(separator)]
else:
# 1. Just a comma-separated list
return decoded.split(separator)
def longest_common_prefix(lst):
if lst:
return ''.join(a for a, _ in takewhile(lambda t: t[0] == t[1],
zip(min(lst), max(lst))))
else:
return ''
def get_metaflow_root():
return os.path.dirname(os.path.dirname(__file__))
def dict_to_cli_options(params):
for k, v in params.items():
if v:
# we need special handling for 'with' since it is a reserved
# keyword in Python, so we call it 'decospecs' in click args
if k == 'decospecs':
k = 'with'
k = k.replace('_', '-')
if not isinstance(v, tuple):
v = [v]
for value in v:
yield '--%s' % k
if not isinstance(value, bool):
value = to_unicode(value)
if ' ' in value:
yield '\'%s\'' % value
else:
yield value
# This function is imported from https://github.com/cookiecutter/whichcraft
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note: This function was backported from the Python 3 source code.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
try: # Forced testing
from shutil import which as w
return w(cmd, mode, path)
except ImportError:
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/util.py | util.py |
from __future__ import print_function
import os
import sys
import click
import traceback
# add module to python path if not already present
myDir = os.path.dirname(os.path.abspath(__file__))
parentDir = os.path.split(myDir)[0]
sys.path.insert(0, parentDir)
from metaflow.sidecar_messages import MessageTypes, deserialize
from metaflow.plugins import SIDECAR
class WorkershutdownError(Exception):
"""raised when terminating sidecar"""
pass
def process_messages(worker):
while True:
try:
msg = sys.stdin.readline().strip()
if msg:
parsed_msg = deserialize(msg)
if parsed_msg.msg_type == MessageTypes.SHUTDOWN:
raise WorkershutdownError()
else:
worker.process_message(parsed_msg)
else:
raise WorkershutdownError()
except WorkershutdownError:
worker.shutdown()
break
except Exception as e: # todo handle other possible exceptions gracefully
print(traceback.format_exc())
worker.shutdown()
break
@click.command(help="Initialize workers")
@click.argument('worker-type')
def main(worker_type):
worker_process = SIDECAR.get(worker_type)
if worker_process is not None:
process_messages(worker_process())
else:
print("UNRECOGNIZED WORKER: %s" % worker_type, file=sys.stderr)
if __name__ == "__main__":
main()
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/sidecar_worker.py | sidecar_worker.py |
from __future__ import print_function
import sys
import os
from functools import partial
import metaflow.metaflow_config as config
# Set
#
# - METAFLOW_DEBUG_SUBCOMMAND=1
# to see command lines used to launch subcommands (especially 'step')
# - METAFLOW_DEBUG_SIDECAR=1
# to see command lines used to launch sidecars
# - METAFLOW_DEBUG_S3CLIENT=1
# to see command lines used by the S3 client. Note that this environment
# variable also disables automatic cleaning of subdirectories, which can
# fill up disk space quickly
class Debug(object):
def __init__(self):
for typ in config.DEBUG_OPTIONS:
if getattr(config, 'METAFLOW_DEBUG_%s' % typ.upper()):
op = partial(self.log, typ)
else:
op = self.noop
# use debug.$type_exec(args) to log command line for subprocesses
# of type $type
setattr(self, '%s_exec' % typ, op)
# use the debug.$type flag to check if logging is enabled for $type
setattr(self, typ, op != self.noop)
def log(self, typ, args):
print('debug[%s]: %s' % (typ, ' '.join(args)), file=sys.stderr)
def noop(self, args):
pass
debug = Debug()
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/debug.py | debug.py |
import sys
try:
from StringIO import StringIO
except:
from io import StringIO
from .exception import MetaflowException
class PyLintWarn(MetaflowException):
headline="Pylint is not happy"
class PyLint(object):
def __init__(self, fname):
self._fname = fname
try:
from pylint.lint import Run
self._run = Run
except:
self._run = None
def has_pylint(self):
return self._run is not None
def run(self, logger=None, warnings=False, pylint_config=[]):
args = [self._fname]
if not warnings:
args.append('--errors-only')
if pylint_config:
args.extend(pylint_config)
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
run = self._run(args, None, False)
output = sys.stdout.getvalue()
sys.stdout = stdout
sys.stderr = stderr
warnings = False
for line in self._filter_lines(output):
logger(line, indent=True)
warnings = True
if warnings:
raise PyLintWarn('*Fix Pylint warnings listed above or say --no-pylint.*')
def _filter_lines(self, output):
for line in output.splitlines():
# Ignore headers
if '***' in line:
continue
# Ignore complaints about decorators missing in the metaflow module.
# Automatic generation of decorators confuses Pylint.
if "(no-name-in-module)" in line:
continue
# Ignore complaints related to dynamic and JSON-types parameters
if "Instance of 'Parameter' has no" in line:
continue
# Ditto for IncludeFile
if "Instance of 'IncludeFile' has no" in line:
continue
yield line
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/pylint_wrapper.py | pylint_wrapper.py |
import traceback
from functools import partial
from .flowspec import FlowSpec
from .exception import MetaflowException, InvalidDecoratorAttribute
class BadStepDecoratorException(MetaflowException):
headline = "Syntax error"
def __init__(self, deco, func):
msg =\
"You tried to apply decorator '{deco}' on '{func}' which is "\
"not declared as a @step. Make sure you apply this decorator "\
"on a function which has @step on the line just before the "\
"function name and @{deco} is above @step.".format(deco=deco,
func=func.__name__)
super(BadStepDecoratorException, self).__init__(msg)
class BadFlowDecoratorException(MetaflowException):
headline = "Syntax error"
def __init__(self, deconame):
msg =\
"Decorator '%s' can be applied only to FlowSpecs. Make sure "\
"the decorator is above a class definition." % deconame
super(BadFlowDecoratorException, self).__init__(msg)
class UnknownStepDecoratorException(MetaflowException):
headline = "Unknown step decorator"
def __init__(self, deconame):
from .plugins import STEP_DECORATORS
decos = ','.join(t.name for t in STEP_DECORATORS)
msg = "Unknown step decorator *{deconame}*. The following decorators are "\
"supported: *{decos}*".format(deconame=deconame, decos=decos)
super(UnknownStepDecoratorException, self).__init__(msg)
class DuplicateStepDecoratorException(MetaflowException):
headline = "Duplicate decorators"
def __init__(self, deco, func):
msg = "Step '{step}' already has a decorator '{deco}'. "\
"You can specify each decorator only once."\
.format(step=func.__name__, deco=deco)
super(DuplicateStepDecoratorException, self).__init__(msg)
class UnknownFlowDecoratorException(MetaflowException):
headline = "Unknown flow decorator"
def __init__(self, deconame):
from .plugins import FLOW_DECORATORS
decos = ','.join(t.name for t in FLOW_DECORATORS)
msg = "Unknown flow decorator *{deconame}*. The following decorators are "\
"supported: *{decos}*".format(deconame=deconame, decos=decos)
super(UnknownFlowDecoratorException, self).__init__(msg)
class DuplicateFlowDecoratorException(MetaflowException):
headline = "Duplicate decorators"
def __init__(self, deco):
msg = "Flow already has a decorator '{deco}'. "\
"You can specify each decorator only once."\
.format(deco=deco)
super(DuplicateFlowDecoratorException, self).__init__(msg)
class Decorator(object):
"""
Base class for all decorators.
"""
name = 'NONAME'
defaults = {}
def __init__(self,
attributes=None,
statically_defined=False):
self.attributes = self.defaults.copy()
self.statically_defined = statically_defined
if attributes:
for k, v in attributes.items():
if k in self.defaults:
self.attributes[k] = v
else:
raise InvalidDecoratorAttribute(
self.name, k, self.defaults)
@classmethod
def _parse_decorator_spec(cls, deco_spec):
top = deco_spec.split(':', 1)
if len(top) == 1:
return cls()
else:
name, attrspec = top
attrs = dict(a.split('=') for a in attrspec.split(','))
return cls(attributes=attrs)
def make_decorator_spec(self):
attrs = {k: v for k, v in self.attributes.items() if v is not None}
if attrs:
attrstr = ','.join('%s=%s' % x for x in attrs.items())
return '%s:%s' % (self.name, attrstr)
else:
return self.name
def __str__(self):
mode = 'decorated' if self.statically_defined else 'cli'
attrs = ' '.join('%s=%s' % x for x in self.attributes.items())
if attrs:
attrs = ' ' + attrs
fmt = '%s<%s%s>' % (self.name, mode, attrs)
return fmt
class FlowDecorator(Decorator):
def flow_init(self, flow, graph, environment, datastore, logger):
"""
Called when all decorators have been created for this flow.
"""
pass
class StepDecorator(Decorator):
"""
Base class for all step decorators.
Example:
@my_decorator
@step
def a(self):
pass
@my_decorator
@step
def b(self):
pass
To make the above work, define a subclass
class MyDecorator(StepDecorator):
name = "my_decorator"
and include it in plugins.STEP_DECORATORS. Now both a() and b()
get an instance of MyDecorator, so you can keep step-specific
state easily.
TODO (savin): Initialize the decorators with flow, graph,
step.__name__ etc., so that we don't have to
pass them around with every lifecycle call.
"""
def step_init(self, flow, graph, step_name, decorators, environment, datastore, logger):
"""
Called when all decorators have been created for this step
"""
pass
def package_init(self, flow, step_name, environment):
"""
Called to determine package components
"""
pass
def step_task_retry_count(self):
"""
Called to determine the number of times this task should be retried.
Returns a tuple of (user_code_retries, error_retries). Error retries
are attempts to run the process after the user code has failed all
its retries.
"""
return 0, 0
def runtime_init(self, flow, graph, package, run_id):
"""
Top-level initialization before anything gets run in the runtime
context.
"""
pass
def runtime_task_created(self,
datastore,
task_id,
split_index,
input_paths,
is_cloned):
"""
Called when the runtime has created a task related to this step.
"""
pass
def runtime_finished(self, exception):
"""
Called when the runtime created task finishes or encounters an interrupt/exception.
"""
pass
def runtime_step_cli(self, cli_args, retry_count, max_user_code_retries):
"""
Access the command line for a step execution in the runtime context.
"""
pass
def task_pre_step(self,
step_name,
datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_user_code_retries):
"""
Run before the step function in the task context.
"""
pass
def task_decorate(self,
step_func,
flow,
graph,
retry_count,
max_user_code_retries):
return step_func
def task_post_step(self,
step_name,
flow,
graph,
retry_count,
max_user_code_retries):
"""
Run after the step function has finished successfully in the task
context.
"""
pass
def task_exception(self,
exception,
step_name,
flow,
graph,
retry_count,
max_user_code_retries):
"""
Run if the step function raised an exception in the task context.
If this method returns True, it is assumed that the exception has
been taken care of and the flow may continue.
"""
pass
def task_finished(self,
step_name,
flow,
graph,
is_task_ok,
retry_count,
max_user_code_retries):
"""
Run after the task context has been finalized.
is_task_ok is set to False if the user code raised an exception that
was not handled by any decorator.
Note that you can't create or modify data artifacts in this method
since the task has been finalized by the time this method
is called. Also note that the task may fail after this method has been
called, so this method may get called multiple times for a task over
multiple attempts, similar to all task_ methods.
"""
pass
def _base_flow_decorator(decofunc, *args, **kwargs):
"""
Decorator prototype for all flow (class) decorators. This function gets
specialized and imported for all decorators types by
_import_plugin_decorators().
"""
if args:
# No keyword arguments specified for the decorator, e.g. @foobar.
# The first argument is the class to be decorated.
cls = args[0]
if isinstance(cls, type) and issubclass(cls, FlowSpec):
# flow decorators add attributes in the class dictionary,
# _flow_decorators.
if decofunc.name in cls._flow_decorators:
raise DuplicateFlowDecoratorException(decofunc.name)
else:
cls._flow_decorators[decofunc.name] = decofunc(attributes=kwargs,
statically_defined=True)
else:
raise BadFlowDecoratorException(decofunc.name)
return cls
else:
# Keyword arguments specified, e.g. @foobar(a=1, b=2).
# Return a decorator function that will get the actual
# function to be decorated as the first argument.
def wrap(f):
return _base_flow_decorator(decofunc, f, **kwargs)
return wrap
def _base_step_decorator(decotype, *args, **kwargs):
"""
Decorator prototype for all step decorators. This function gets specialized
and imported for all decorators types by _import_plugin_decorators().
"""
if args:
# No keyword arguments specified for the decorator, e.g. @foobar.
# The first argument is the function to be decorated.
func = args[0]
if not hasattr(func, 'is_step'):
raise BadStepDecoratorException(decotype.name, func)
# Only the first decorator applies
if decotype.name in [deco.name for deco in func.decorators]:
raise DuplicateStepDecoratorException(decotype.name, func)
else:
func.decorators.append(decotype(attributes=kwargs,
statically_defined=True))
return func
else:
# Keyword arguments specified, e.g. @foobar(a=1, b=2).
# Return a decorator function that will get the actual
# function to be decorated as the first argument.
def wrap(f):
return _base_step_decorator(decotype, f, **kwargs)
return wrap
def _attach_decorators(flow, decospecs):
"""
Attach decorators to all steps during runtime. This has the same
effect as if you defined the decorators statically in the source for
every step. Used by --with command line parameter.
"""
from .plugins import STEP_DECORATORS
decos = {decotype.name: decotype for decotype in STEP_DECORATORS}
for decospec in decospecs:
deconame = decospec.split(':')[0]
if deconame not in decos:
raise UnknownStepDecoratorException(deconame)
# Attach the decorator to all steps that don't have this decorator
# already. This means that statically defined decorators are always
# preferred over runtime decorators.
#
# Note that each step gets its own instance of the decorator class,
# so decorator can maintain step-specific state.
for step in flow:
if deconame not in [deco.name for deco in step.decorators]:
deco = decos[deconame]._parse_decorator_spec(decospec)
step.decorators.append(deco)
def _init_decorators(flow, graph, environment, datastore, logger):
for deco in flow._flow_decorators.values():
deco.flow_init(flow, graph, environment, datastore, logger)
for step in flow:
for deco in step.decorators:
deco.step_init(flow, graph, step.__name__,
step.decorators, environment, datastore, logger)
def step(f):
"""
The step decorator. Makes a method a step in the workflow.
"""
f.is_step = True
f.decorators = []
try:
# python 3
f.name = f.__name__
except:
# python 2
f.name = f.__func__.func_name
return f
def _import_plugin_decorators(globals_dict):
"""
Auto-generate a decorator function for every decorator
defined in plugins.STEP_DECORATORS and plugins.FLOW_DECORATORS.
"""
from .plugins import STEP_DECORATORS, FLOW_DECORATORS
# Q: Why not use StepDecorators directly as decorators?
# A: Getting an object behave as a decorator that can work
# both with and without arguments is surprisingly hard.
# It is easier to make plain function decorators work in
# the dual mode - see _base_step_decorator above.
for decotype in STEP_DECORATORS:
globals_dict[decotype.name] = partial(_base_step_decorator, decotype)
# add flow-level decorators
for decotype in FLOW_DECORATORS:
globals_dict[decotype.name] = partial(_base_flow_decorator, decotype)
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/decorators.py | decorators.py |
import io
import os
import click
from metaflow.exception import MetaflowException
from metaflow.parameters import Parameter
class InternalFile():
def __init__(self, logger, is_text, encoding, path):
self._logger = logger
self._is_text = is_text
self._encoding = encoding
self._path = path
self._size = os.path.getsize(self._path)
def __call__(self):
unit = ['B', 'KB', 'MB', 'GB', 'TB']
sz = self._size
pos = 0
while pos < len(unit) and sz >= 1024:
sz = sz // 1024
pos += 1
if pos >= 3:
extra = '(this may take a while)'
else:
extra = ''
self._logger(
'Including file %s of size %d%s %s' % (self._path, sz, unit[pos], extra))
if self._is_text:
return io.open(self._path, mode='rt', encoding=self._encoding).read()
try:
return io.open(self._path, mode='rb').read()
except IOError:
# If we get an error here, since we know that the file exists already,
# it means that read failed which happens with Python 2.7 for large files
raise MetaflowException('Cannot read file at %s -- this is likely because it is too '
'large to be properly handled by Python 2.7' % self._path)
def name(self):
return self._path
def size(self):
return self._size
class FilePathClass(click.ParamType):
name = 'FilePath'
def __init__(self, is_text, encoding):
self._is_text = is_text
self._encoding = encoding
def convert(self, value, param, ctx):
try:
with open(value, mode='r') as _:
pass
except OSError:
self.fail("Could not open file '%s'" % value)
return InternalFile(ctx.obj.logger, self._is_text, self._encoding, value)
def __str__(self):
return repr(self)
def __repr__(self):
return 'FilePath'
class IncludeFile(Parameter):
def __init__(
self, name, required=False, is_text=True, encoding=None, help=None, default=None):
super(IncludeFile, self).__init__(
name, required=required, help=help, default=default,
type=FilePathClass(is_text, encoding))
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/includefile.py | includefile.py |
from .sidecar import SidecarSubProcess
from .sidecar_messages import Message, MessageTypes
class EventLogger(object):
def __init__(self, logger_type):
# type: (str) -> None
self.sidecar_process = None
self.logger_type = logger_type
def start(self):
self.sidecar_process = SidecarSubProcess(self.logger_type)
def log(self, payload):
msg = Message(MessageTypes.LOG_EVENT, payload)
self.sidecar_process.msg_handler(msg)
def terminate(self):
self.sidecar_process.kill()
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/event_logger.py | event_logger.py |
import json
# Define message enums
class MessageTypes(object):
SHUTDOWN = 1
LOG_EVENT = 2
class Message(object):
def __init__(self, msg_type, payload):
self.msg_type = msg_type
self.payload = payload
def serialize(self):
msg = {
'msg_type': self.msg_type,
'payload': self.payload,
}
return json.dumps(msg)+"\n"
def deserialize(json_msg):
parsed_json_msg = json.loads(json_msg)
return Message(MessageTypes(parsed_json_msg['msg_type']),
parsed_json_msg['payload'])
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/sidecar_messages.py | sidecar_messages.py |
from itertools import islice
import os
import sys
import inspect
import traceback
from . import cmd_with_io
from .parameters import Parameter
from .exception import MetaflowException, MetaflowInternalError, MergeArtifactsException
from .graph import FlowGraph
# For Python 3 compatibility
try:
basestring
except NameError:
basestring = str
class InvalidNextException(MetaflowException):
headline = "Invalid self.next() transition detected"
def __init__(self, msg):
# NOTE this assume that InvalidNextException is only raised
# at the top level of next()
_, line_no, _, _ = traceback.extract_stack()[-3]
super(InvalidNextException, self).__init__(msg, line_no)
class FlowSpec(object):
"""
Main class from which all Flows should inherit.
Attributes
----------
script_name
index
input
"""
# Attributes that are not saved in the datastore when checkpointing.
# Name starting with '__', methods, functions and Parameters do not need
# to be listed.
_EPHEMERAL = {'_EPHEMERAL',
'_datastore',
'_cached_input',
'_graph',
'_flow_decorators',
'_steps',
'index',
'input'}
_flow_decorators = {}
def __init__(self, use_cli=True):
"""
Construct a FlowSpec
Parameters
----------
use_cli : bool, optional, default: True
Set to True if the flow is invoked from __main__ or the command line
"""
self.name = self.__class__.__name__
self._datastore = None
self._transition = None
self._cached_input = {}
self._graph = FlowGraph(self.__class__)
self._steps = [getattr(self, node.name) for node in self._graph]
if use_cli:
# we import cli here to make sure custom parameters in
# args.py get fully evaluated before cli.py is imported.
from . import cli
cli.main(self)
@property
def script_name(self):
"""
Returns the name of the script containing the flow
Returns
-------
str
A string containing the name of the script
"""
fname = inspect.getfile(self.__class__)
if fname.endswith('.pyc'):
fname = fname[:-1]
return os.path.basename(fname)
def _get_parameters(self):
for var in dir(self):
if var[0] == '_':
continue
try:
val = getattr(self, var)
except:
continue
if isinstance(val, Parameter):
yield var, val
def _set_datastore(self, datastore):
self._datastore = datastore
def __iter__(self):
"""
Iterate over all steps in the Flow
Returns
-------
Iterator[graph.DAGNode]
Iterator over the steps in the flow
"""
return iter(self._steps)
def __getattr__(self, name):
if self._datastore and name in self._datastore:
# load the attribute from the datastore...
x = self._datastore[name]
# ...and cache it in the object for faster access
setattr(self, name, x)
return x
else:
raise AttributeError("Flow %s has no attribute '%s'" %
(self.name, name))
def cmd(self, cmdline, input={}, output=[]):
return cmd_with_io.cmd(cmdline,
input=input,
output=output)
@property
def index(self):
"""
Index of the task in a foreach step
In a foreach step, multiple instances of this step (tasks) will be executed,
one for each element in the foreach.
This property returns the zero based index of the current task. If this is not
a foreach step, this returns None.
See Also
--------
foreach_stack: A detailed example is given in the documentation of this function
Returns
-------
int
Index of the task in a foreach step
"""
if self._foreach_stack:
return self._foreach_stack[-1].index
@property
def input(self):
"""
Value passed to the task in a foreach step
In a foreach step, multiple instances of this step (tasks) will be executed,
one for each element in the foreach.
This property returns the element passed to the current task. If this is not
a foreach step, this returns None.
See Also
--------
foreach_stack: A detailed example is given in the documentation of this function
Returns
-------
object
Input passed to the task (can be any object)
"""
return self._find_input()
def foreach_stack(self):
"""
Returns the current stack of foreach steps for the current step
This effectively corresponds to the indexes and values at the various levels of nesting.
For example, considering the following code:
```
@step
def root(self):
self.split_1 = ['a', 'b', 'c']
self.next(self.nest_1, foreach='split_1')
@step
def nest_1(self):
self.split_2 = ['d', 'e', 'f', 'g']
self.next(self.nest_2, foreach='split_2'):
@step
def nest_2(self):
foo = self.foreach_stack()
```
foo will take the following values in the various tasks for nest_2:
[(0, 3, 'a'), (0, 4, 'd')]
[(0, 3, 'a'), (1, 4, 'e')]
...
[(0, 3, 'a'), (3, 4, 'g')]
[(1, 3, 'b'), (0, 4, 'd')]
...
where each tuple corresponds to:
- the index of the task for that level of the loop
- the number of splits for that level of the loop
- the value for that level of the loop
Note that the last tuple returned in a task corresponds to:
- first element: value returned by self.index
- third element: value returned by self.input
Returns
-------
List[Tuple[int, int, object]]
An array describing the current stack of foreach steps
"""
return [(frame.index, frame.num_splits, self._find_input(stack_index=i))
for i, frame in enumerate(self._foreach_stack)]
def _find_input(self, stack_index=None):
if stack_index is None:
stack_index = len(self._foreach_stack) - 1
if stack_index in self._cached_input:
return self._cached_input[stack_index]
elif self._foreach_stack:
# NOTE this is obviously an O(n) operation which also requires
# downloading the whole input data object in order to find the
# right split. One can override this method with a more efficient
# input data handler if this is a problem.
frame = self._foreach_stack[stack_index]
try:
var = getattr(self, frame.var)
except AttributeError:
# this is where AttributeError happens:
# [ foreach x ]
# [ foreach y ]
# [ inner ]
# [ join y ] <- call self.foreach_stack here,
# self.x is not available
self._cached_input[stack_index] = None
else:
try:
self._cached_input[stack_index] = var[frame.index]
except TypeError:
# __getitem__ not supported, fall back to an iterator
self._cached_input[stack_index] = next(islice(var,
frame.index,
frame.index + 1))
return self._cached_input[stack_index]
def merge_artifacts(self, inputs, exclude=[]):
"""
Merge the artifacts coming from each merge branch (from inputs)
This function takes all the artifacts coming from the branches of a
join point and assigns them to self in the calling step. Only artifacts
not set in the current step are considered. If, for a given artifact, different
values are present on the incoming edges, an error will be thrown (and the artifacts
that "conflict" will be reported).
As a few examples, in the simple graph: A splitting into B and C and joining in D:
A:
self.x = 5
self.y = 6
B:
self.b_var = 1
self.x = from_b
C:
self.x = from_c
D:
merge_artifacts(inputs)
In D, the following artifacts are set:
- y (value: 6), b_var (value: 1)
- if from_b and from_c are the same, x will be accessible and have value from_b
- if from_b and from_c are different, an error will be thrown. To prevent this error,
you need to manually set self.x in D to a merged value (for example the max) prior to
calling merge_artifacts.
Parameters
----------
inputs : List[Steps]
Incoming steps to the join point
exclude : List[str], optional
Raises
------
MetaflowException
This exception is thrown if this is not called in a join step
MergeArtifactsException
This exception is thrown in case of unresolved conflicts
"""
node = self._graph[self._current_step]
if node.type != 'join':
msg = "merge_artifacts can only be called in a join and step *{step}* "\
"is not a join".format(step=self._current_step)
raise MetaflowException(msg)
to_merge = {}
unresolved = []
for inp in inputs:
# available_vars is the list of variables from inp that should be considered
available_vars = ((var, sha) for var, sha in inp._datastore.items()
if (var not in exclude) and (not hasattr(self, var)))
for var, sha in available_vars:
_, previous_sha = to_merge.setdefault(var, (inp, sha))
if previous_sha != sha:
# We have a conflict here
unresolved.append(var)
if unresolved:
# We have unresolved conflicts so we do not set anything and error out
msg = "Step *{step}* cannot merge the following artifacts due to them "\
"having conflicting values:\n[{artifacts}].\nTo remedy this issue, "\
"be sure to explictly set those artifacts (using "\
"self.<artifact_name> = ...) prior to calling merge_artifacts."\
.format(step=self._current_step, artifacts=', '.join(unresolved))
raise MergeArtifactsException(msg, unresolved)
# If things are resolved, we go and fetch from the datastore and set here
for var, (inp, _) in to_merge.items():
setattr(self, var, getattr(inp, var))
def next(self, *dsts, **kwargs):
"""
Indicates the next step to execute at the end of this step
This statement should appear once and only once in each and every step (except the `end`
step). Furthermore, it should be the last statement in the step.
There are several valid formats to specify the next step:
- Straight-line connection: self.next(self.next_step) where `next_step` is a method in
the current class decorated with the `@step` decorator
- Static fan-out connection: self.next(self.step1, self.step2, ...) where `stepX` are
methods in the current class decorated with the `@step` decorator
- Conditional branch:
self.next(self.if_true, self.if_false, condition='boolean_variable')
In this situation, both `if_true` and `if_false` are methods in the current class
decorated with the `@step` decorator and `boolean_variable` is a variable name
in the current class that evaluates to True or False. The `if_true` step will be
executed if thecondition variable evaluates to True and the `if_false` step will
be executed otherwise
- Foreach branch:
self.next(self.foreach_step, foreach='foreach_iterator')
In this situation, `foreach_step` is a method in the current class decorated with the
`@step` docorator and `foreach_iterator` is a variable name in the current class that
evaluates to an iterator. A task will be launched for each value in the iterator and
each task will execute the code specified by the step `foreach_step`.
Raises
------
InvalidNextException
Raised if the format of the arguments does not match one of the ones given above.
"""
step = self._current_step
foreach = kwargs.pop('foreach', None)
condition = kwargs.pop('condition', None)
if kwargs:
kw = next(iter(kwargs))
msg = "Step *{step}* passes an unknown keyword argument "\
"'{invalid}' to self.next().".format(step=step, invalid=kw)
raise InvalidNextException(msg)
# check: next() is called only once
if self._transition is not None:
msg = "Multiple self.next() calls detected in step *{step}*. "\
"Call self.next() only once.".format(step=step)
raise InvalidNextException(msg)
# check: all destinations are methods of this object
funcs = []
for i, dst in enumerate(dsts):
try:
name = dst.__func__.__name__
except:
msg = "In step *{step}* the {arg}. argument in self.next() is "\
"not a function. Make sure all arguments in self.next() "\
"are methods of the Flow class."\
.format(step=step, arg=i + 1)
raise InvalidNextException(msg)
if not hasattr(self, name):
msg = "Step *{step}* specifies a self.next() transition to an "\
"unknown step, *{name}*.".format(step=step,
name=name)
raise InvalidNextException(msg)
funcs.append(name)
# check: foreach and condition are mutually exclusive
if not (foreach is None or condition is None):
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify either 'foreach' or 'condition', not both."\
.format(step=step)
raise InvalidNextException(msg)
# check: foreach is valid
if foreach:
if not isinstance(foreach, basestring):
msg = "Step *{step}* has an invalid self.next() transition. "\
"The argument to 'foreach' must be a string."\
.format(step=step)
raise InvalidNextException(msg)
if len(dsts) != 1:
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify exactly one target for 'foreach'."\
.format(step=step)
raise InvalidNextException(msg)
try:
foreach_iter = getattr(self, foreach)
except:
msg = "Foreach variable *self.{var}* in step *{step}* "\
"does not exist. Check your variable."\
.format(step=step, var=foreach)
raise InvalidNextException(msg)
try:
self._foreach_num_splits = sum(1 for _ in foreach_iter)
except TypeError:
msg = "Foreach variable *self.{var}* in step *{step}* "\
"is not iterable. Check your variable."\
.format(step=step, var=foreach)
raise InvalidNextException(msg)
if self._foreach_num_splits == 0:
msg = "Foreach iterator over *{var}* in step *{step}* "\
"produced zero splits. Check your variable."\
.format(step=step, var=foreach)
raise InvalidNextException(msg)
self._foreach_var = foreach
# check: condition is valid
if condition:
if not isinstance(condition, basestring):
msg = "Step *{step}* has an invalid self.next() transition. "\
"The argument to 'condition' must be a string."\
.format(step=step)
raise InvalidNextException(msg)
if len(dsts) != 2:
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify two targets for 'condition': The first target "\
"is used if the condition evaluates to true, the second "\
"otherwise.".format(step=step)
raise InvalidNextException(msg)
# check: non-keyword transitions are valid
if foreach is None and condition is None:
if len(dsts) < 1:
msg = "Step *{step}* has an invalid self.next() transition. "\
"Specify at least one step function as an argument in "\
"self.next().".format(step=step)
raise InvalidNextException(msg)
self._transition = (funcs, foreach, condition)
def __str__(self):
step_name = getattr(self, '_current_step', None)
if step_name:
index = ','.join(str(idx) for idx, _, _ in self.foreach_stack())
if index:
inp = self.input
if inp is None:
return '<flow %s step %s[%s]>' %\
(self.name, step_name, index)
else:
inp = str(inp)
if len(inp) > 20:
inp = inp[:20] + '...'
return '<flow %s step %s[%s] (input: %s)>' %\
(self.name, step_name, index, inp)
else:
return '<flow %s step %s>' % (self.name, step_name)
else:
return '<flow %s>' % self.name
def __getstate__(self):
raise MetaflowException("Flows can't be serialized. Maybe you tried "
"to assign *self* or one of the *inputs* "
"to an attribute? Instead of serializing the "
"whole flow, you should choose specific "
"attributes, e.g. *input.some_var*, to be "
"stored.")
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/flowspec.py | flowspec.py |
import re
from .exception import MetaflowException
from .util import all_equal
class LintWarn(MetaflowException):
headline="Validity checker found an issue"
class FlowLinter(object):
def __init__(self):
self.require_static_graph = True
self.require_fundamentals = True
self.require_acyclicity = True
self.require_non_nested_foreach = False
self._checks = []
def _decorate(self, setting, f):
f.attrs.append(setting)
return f
def ensure_static_graph(self, f):
return self._decorate('require_static_graph', f)
def ensure_fundamentals(self, f):
return self._decorate('require_fundamentals', f)
def ensure_acyclicity(self, f):
return self._decorate('require_acyclicity', f)
def ensure_non_nested_foreach(self, f):
return self._decorate('require_non_nested_foreach', f)
def check(self, f):
self._checks.append(f)
f.attrs = []
return f
def run_checks(self, graph, **kwargs):
for check in self._checks:
if any(getattr(self, attr) or kwargs.get(attr)
for attr in check.attrs):
check(graph)
linter = FlowLinter()
@linter.ensure_fundamentals
@linter.check
def check_reserved_words(graph):
RESERVED = {'name',
'next',
'input',
'index',
'cmd'}
msg = 'Step name *%s* is a reserved word. Choose another name for the '\
'step.'
for node in graph:
if node.name in RESERVED:
raise LintWarn(msg % node.name)
@linter.ensure_fundamentals
@linter.check
def check_basic_steps(graph):
msg ="Add %s *%s* step in your flow."
for prefix, node in (('a', 'start'), ('an', 'end')):
if node not in graph:
raise LintWarn(msg % (prefix, node))
@linter.ensure_static_graph
@linter.check
def check_that_end_is_end(graph):
msg0="The *end* step should not have a step.next() transition. "\
"Just remove it."
msg1="The *end* step should not be a join step (it gets an extra "\
"argument). Add a join step before it."
node=graph['end']
if node.has_tail_next or node.invalid_tail_next:
raise LintWarn(msg0, node.tail_next_lineno)
if node.num_args > 1:
raise LintWarn(msg1, node.tail_next_lineno)
@linter.ensure_fundamentals
@linter.check
def check_step_names(graph):
msg =\
"Step *{0.name}* has an invalid name. Only lowercase ascii "\
"characters, underscores, and digits are allowed."
for node in graph:
if re.search('[^a-z0-9_]', node.name) or node.name[0] == '_':
raise LintWarn(msg.format(node), node.func_lineno)
@linter.ensure_fundamentals
@linter.check
def check_num_args(graph):
msg0 =\
"Step {0.name} has too many arguments. Normal steps take only "\
"'self' as an argument. Join steps take 'self' and 'inputs'."
msg1 =\
"Step *{0.name}* is both a join step (it takes an extra argument) "\
"and a split step (it transitions to multiple steps). This is not "\
"allowed. Add a new step so that split and join become separate steps."
msg2 = "Step *{0.name}* is missing the 'self' argument."
for node in graph:
if node.num_args > 2:
raise LintWarn(msg0.format(node), node.func_lineno)
elif node.num_args == 2 and node.type != 'join':
raise LintWarn(msg1.format(node), node.func_lineno)
elif node.num_args == 0:
raise LintWarn(msg2.format(node), node.func_lineno)
@linter.ensure_static_graph
@linter.check
def check_static_transitions(graph):
msg =\
"Step *{0.name}* is missing a self.next() transition to "\
"the next step. Add a self.next() as the last line in the "\
"function."
for node in graph:
if node.type != 'end' and not node.has_tail_next:
raise LintWarn(msg.format(node), node.func_lineno)
@linter.ensure_static_graph
@linter.check
def check_valid_transitions(graph):
msg =\
"Step *{0.name}* specifies an invalid self.next() transition. "\
"Make sure the self.next() expression matches with one of the "\
"supported transition types."
for node in graph:
if node.type != 'end' and\
node.has_tail_next and\
node.invalid_tail_next:
raise LintWarn(msg.format(node), node.tail_next_lineno)
@linter.ensure_static_graph
@linter.check
def check_unknown_transitions(graph):
msg =\
"Step *{0.name}* specifies a self.next() transition to "\
"an unknown step, *{step}*."
for node in graph:
unknown = [n for n in node.out_funcs if n not in graph]
if unknown:
raise LintWarn(msg.format(node, step=unknown[0]),
node.tail_next_lineno)
@linter.ensure_acyclicity
@linter.ensure_static_graph
@linter.check
def check_for_acyclicity(graph):
msg = "There is a loop in your flow: *{0}*. Break the loop "\
"by fixing self.next() transitions."
def check_path(node, seen):
for n in node.out_funcs:
if n in seen:
path = '->'.join(seen + [n])
raise LintWarn(msg.format(path),
node.tail_next_lineno)
else:
check_path(graph[n], seen + [n])
for start in graph:
check_path(start, [])
@linter.ensure_static_graph
@linter.check
def check_for_orphans(graph):
msg =\
"Step *{0.name}* is unreachable from the start step. Add "\
"self.next({0.name}) in another step or remove *{0.name}*."
seen = set(['start'])
def traverse(node):
for n in node.out_funcs:
if n not in seen:
seen.add(n)
traverse(graph[n])
traverse(graph['start'])
nodeset = frozenset(n.name for n in graph)
orphans = nodeset - seen
if orphans:
orphan = graph[list(orphans)[0]]
raise LintWarn(msg.format(orphan), orphan.func_lineno)
@linter.ensure_static_graph
@linter.check
def check_split_join_balance(graph):
msg0 = "Step *end* reached before a split started at step(s) *{roots}* "\
"were joined. Add a join step before *end*."
msg1 = "Step *{0.name}* seems like a join step (it takes an extra input "\
"argument) but an incorrect number of steps (*{paths}*) lead to "\
"it. This join was expecting {num_roots} incoming paths, starting "\
"from splitted step(s) *{roots}*."
msg2 = "Step *{0.name}* seems like a join step (it takes an extra input "\
"argument) but it is not preceded by a split. Ensure that there is "\
"a matching split for every join."
msg3 = "Step *{0.name}* joins steps from unrelated splits. Ensure that "\
"there is a matching join for every split."
def traverse(node, split_stack):
if node.type == 'linear':
new_stack = split_stack
elif node.type in ('split-or', 'split-and', 'foreach'):
new_stack = split_stack + [('split', node.out_funcs)]
elif node.type == 'end':
if split_stack:
split_type, split_roots = split_stack.pop()
roots = ', '.join(split_roots)
raise LintWarn(msg0.format(roots=roots))
elif node.type == 'join':
if split_stack:
split_type, split_roots = split_stack[-1]
new_stack = split_stack[:-1]
if len(node.in_funcs) != len(split_roots):
paths = ', '.join(node.in_funcs)
roots = ', '.join(split_roots)
raise LintWarn(msg1.format(node,
paths=paths,
num_roots=len(split_roots),
roots=roots),
node.func_lineno)
else:
raise LintWarn(msg2.format(node), node.func_lineno)
# check that incoming steps come from the same lineage
# (no cross joins)
def parents(n):
if graph[n].type == 'join':
return tuple(graph[n].split_parents[:-1])
else:
return tuple(graph[n].split_parents)
if not all_equal(map(parents, node.in_funcs)):
raise LintWarn(msg3.format(node), node.func_lineno)
for n in node.out_funcs:
traverse(graph[n], new_stack)
traverse(graph['start'], [])
@linter.ensure_static_graph
@linter.check
def check_empty_foreaches(graph):
msg = "Step *{0.name}* is a foreach split that has no children: "\
"it is followed immeditately by a join step, *{join}*. Add "\
"at least one step between the split and the join."
for node in graph:
if node.type == 'foreach':
joins = [n for n in node.out_funcs if graph[n].type == 'join']
if joins:
raise LintWarn(msg.format(node, join=joins[0]))
@linter.ensure_non_nested_foreach
@linter.check
def check_nested_foreach(graph):
msg = "Nested foreaches are not allowed: Step *{0.name}* is a foreach "\
"split that is nested under another foreach split."
for node in graph:
if node.type == 'foreach':
if any(graph[p].type == 'foreach' for p in node.split_parents):
raise LintWarn(msg.format(node))
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/lint.py | lint.py |
"""
Local backend
Execute the flow with a native runtime
using local / remote processes
"""
from __future__ import print_function
import json
import os
import sys
import fcntl
import time
import select
import subprocess
from functools import partial
from . import get_namespace
from .metaflow_config import MAX_ATTEMPTS
from .exception import MetaflowException,\
MetaflowInternalError,\
METAFLOW_EXIT_DISALLOW_RETRY
from . import procpoll
from .datastore import DataException, MetaflowDatastoreSet
from .metadata import MetaDatum
from .debug import debug
from .util import to_unicode, compress_list
try:
# python2
import cStringIO
BytesIO = cStringIO.StringIO
except:
# python3
import io
BytesIO = io.BytesIO
MAX_WORKERS=16
MAX_NUM_SPLITS=100
MAX_LOG_SIZE=1024*1024
PROGRESS_INTERVAL = 1000 #ms
# The following is a list of the (data) artifacts used by the runtime while
# executing a flow. These are prefetched during the resume operation by
# leveraging the MetaflowDatastoreSet.
PREFETCH_DATA_ARTIFACTS = ['_foreach_stack', '_task_ok', '_transition']
# TODO option: output dot graph periodically about execution
class NativeRuntime(object):
def __init__(self,
flow,
graph,
datastore,
metadata,
environment,
package,
logger,
entrypoint,
event_logger,
monitor,
run_id=None,
clone_run_id=None,
clone_steps=None,
max_workers=MAX_WORKERS,
max_num_splits=MAX_NUM_SPLITS,
max_log_size=MAX_LOG_SIZE):
if run_id is None:
self._run_id = metadata.new_run_id()
else:
self._run_id = run_id
metadata.register_run_id(run_id)
self._flow = flow
self._graph = graph
self._datastore = datastore
self._metadata = metadata
self._environment = environment
self._logger = logger
self._max_workers = max_workers
self._num_active_workers = 0
self._max_num_splits = max_num_splits
self._max_log_size = max_log_size
self._params_task = None
self._entrypoint = entrypoint
self.event_logger = event_logger
self._monitor = monitor
self._clone_run_id = clone_run_id
self._clone_steps = {} if clone_steps is None else clone_steps
self._origin_ds_set = None
if clone_run_id:
# resume logic
# 0. If clone_run_id is specified, attempt to clone all the
# successful tasks from the flow with `clone_run_id`. And run the
# unsuccessful or not-run steps in the regular fashion.
# 1. With _find_origin_task, for every task in the current run, we
# find the equivalent task in `clone_run_id` using
# pathspec_index=run/step:[index] and verify if this task can be
# cloned.
# 2. If yes, we fire off a clone-only task which copies the
# metadata from the `clone_origin` to pathspec=run/step/task to
# mimmick that the resumed run looks like an actual run.
# 3. All steps that couldn't be cloned (either unsuccessful or not
# run) are run as regular tasks.
# Lastly, to improve the performance of the cloning process, we
# leverage the MetaflowDatastoreSet abstraction to prefetch the
# entire DAG of `clone_run_id` and relevant data artifacts
# (see PREFETCH_DATA_ARTIFACTS) so that the entire runtime can
# access the relevant data from cache (instead of going to the datastore
# after the first prefetch).
logger(
'Gathering required information to resume run (this may take a bit of time)...')
self._origin_ds_set = \
MetaflowDatastoreSet(
datastore,
flow.name,
clone_run_id,
metadata=metadata,
event_logger=event_logger,
monitor=monitor,
prefetch_data_artifacts=PREFETCH_DATA_ARTIFACTS)
self._run_queue = []
self._poll = procpoll.make_poll()
self._workers = {} # fd -> subprocess mapping
self._finished = {}
self._is_cloned = {}
for step in flow:
for deco in step.decorators:
deco.runtime_init(flow,
graph,
package,
self._run_id)
def _new_task(self, step, input_paths=None, **kwargs):
if input_paths is None:
may_clone = True
else:
may_clone = all(self._is_cloned[path] for path in input_paths)
if step in self._clone_steps:
may_clone = False
if step == '_parameters':
decos = []
else:
decos = getattr(self._flow, step).decorators
return Task(self._datastore,
self._flow,
step,
self._run_id,
self._metadata,
self._environment,
self._entrypoint,
self.event_logger,
self._monitor,
input_paths=input_paths,
may_clone=may_clone,
clone_run_id=self._clone_run_id,
origin_ds_set=self._origin_ds_set,
decos=decos,
logger=self._logger,
**kwargs)
@property
def run_id(self):
return self._run_id
def persist_parameters(self, task_id=None):
task = self._new_task('_parameters', task_id=task_id)
if not task.is_cloned:
task.persist(self._flow)
self._params_task = task.path
self._is_cloned[task.path] = task.is_cloned
def execute(self):
self._logger('Workflow starting (run-id %s):' % self._run_id,
system_msg=True)
if self._params_task:
self._queue_push('start', {'input_paths': [self._params_task]})
else:
self._queue_push('start', {})
progress_tstamp = time.time()
try:
# main scheduling loop
exception = None
while self._run_queue or\
self._num_active_workers > 0:
# 1. are any of the current workers finished?
finished_tasks = list(self._poll_workers())
# 2. push new tasks triggered by the finished tasks to the queue
self._queue_tasks(finished_tasks)
# 3. if there are available worker slots, pop and start tasks
# from the queue.
self._launch_workers()
if time.time() - progress_tstamp > PROGRESS_INTERVAL:
progress_tstamp = time.time()
msg = "%d tasks are running: %s." %\
(self._num_active_workers, 'e.g. ...') # TODO
self._logger(msg, system_msg=True)
msg = "%d tasks are waiting in the queue." %\
len(self._run_queue)
self._logger(msg, system_msg=True)
msg = "%d steps are pending: %s." %\
(0, 'e.g. ...') # TODO
self._logger(msg, system_msg=True)
except KeyboardInterrupt as ex:
self._logger('Workflow interrupted.', system_msg=True, bad=True)
self._killall()
exception = ex
raise
except Exception as ex:
self._logger('Workflow failed.', system_msg=True, bad=True)
self._killall()
exception = ex
raise
finally:
# on finish clean tasks
for step in self._flow:
for deco in step.decorators:
deco.runtime_finished(exception)
# assert that end was executed and it was successful
if ('end', ()) in self._finished:
self._logger('Done!', system_msg=True)
else:
raise MetaflowInternalError('The *end* step was not successful '
'by the end of flow.')
def _killall(self):
for worker in self._workers.values():
worker.kill()
# give killed workers a chance to flush their logs to datastore
for _ in range(3):
list(self._poll_workers())
# Store the parameters needed for task creation, so that pushing on items
# onto the run_queue is an inexpensive operation.
def _queue_push(self, step, task_kwargs):
self._run_queue.insert(0, (step, task_kwargs))
def _queue_pop(self):
return self._run_queue.pop() if self._run_queue else None
def _queue_task_join(self, task, next_steps):
# if the next step is a join, we need to check that
# all input tasks for the join have finished before queuing it.
# CHECK: this condition should be enforced by the linter but
# let's assert that the assumption holds
if len(next_steps) > 1:
msg = 'Step *{step}* transitions to a join and another '\
'step. The join must be the only transition.'
raise MetaflowInternalError(task, msg.format(step=task.step))
else:
next_step = next_steps[0]
# matching_split is the split-parent of the finished task
matching_split = self._graph[self._graph[next_step].split_parents[-1]]
step_name, foreach_stack = task.finished_id
if matching_split.type == 'foreach':
# next step is a foreach join
def siblings(foreach_stack):
top = foreach_stack[-1]
bottom = list(foreach_stack[:-1])
for index in range(top.num_splits):
yield tuple(bottom + [top._replace(index=index)])
# required tasks are all split-siblings of the finished task
required_tasks = [self._finished.get((task.step, s))
for s in siblings(foreach_stack)]
join_type = 'foreach'
else:
# next step is a split-and
# required tasks are all branches joined by the next step
required_tasks = [self._finished.get((step, foreach_stack))
for step in self._graph[next_step].in_funcs]
join_type = 'linear'
if all(required_tasks):
# all tasks to be joined are ready. Schedule the next join step.
self._queue_push(next_step,
{'input_paths': required_tasks,
'join_type': join_type})
def _queue_task_foreach(self, task, next_steps):
# CHECK: this condition should be enforced by the linter but
# let's assert that the assumption holds
if len(next_steps) > 1:
msg = 'Step *{step}* makes a foreach split but it defines '\
'multiple transitions. Specify only one transition '\
'for foreach.'
raise MetaflowInternalError(msg.format(step=task.step))
else:
next_step = next_steps[0]
num_splits = task.results['_foreach_num_splits']
if num_splits > self._max_num_splits:
msg = 'Foreach in step *{step}* yielded {num} child steps '\
'which is more than the current maximum of {max} '\
'children. You can raise the maximum with the '\
'--max-num-splits option. '
raise TaskFailed(task, msg.format(step=task.step,
num=num_splits,
max=self._max_num_splits))
# schedule all splits
for i in range(num_splits):
self._queue_push(next_step,
{'split_index': str(i),
'input_paths': [task.path]})
def _queue_tasks(self, finished_tasks):
# finished tasks include only successful tasks
for task in finished_tasks:
self._finished[task.finished_id] = task.path
self._is_cloned[task.path] = task.is_cloned
# CHECK: ensure that runtime transitions match with
# statically inferred transitions
trans = task.results.get('_transition')
if trans:
next_steps = trans[0]
foreach = trans[1]
else:
next_steps = []
foreach = None
expected = self._graph[task.step].out_funcs
if next_steps != expected:
msg = 'Based on static analysis of the code, step *{step}* '\
'was expected to transition to step(s) *{expected}*. '\
'However, when the code was executed, self.next() was '\
'called with *{actual}*. Make sure there is only one '\
'unconditional self.next() call in the end of your '\
'step. '
raise MetaflowInternalError(msg.format(step=task.step,
expected=', '.join(
expected),
actual=', '.join(next_steps)))
# Different transition types require different treatment
if any(self._graph[f].type == 'join' for f in next_steps):
# Next step is a join
self._queue_task_join(task, next_steps)
elif foreach:
# Next step is a foreach child
self._queue_task_foreach(task, next_steps)
else:
# Next steps are normal linear steps
for step in next_steps:
self._queue_push(step, {'input_paths': [task.path]})
def _poll_workers(self):
if self._workers:
for event in self._poll.poll(PROGRESS_INTERVAL):
worker = self._workers.get(event.fd)
if worker:
if event.can_read:
worker.read_logline(event.fd)
if event.is_terminated:
returncode = worker.terminate()
for fd in worker.fds():
self._poll.remove(fd)
del self._workers[fd]
self._num_active_workers -= 1
task = worker.task
if returncode:
# worker did not finish successfully
if worker.killed or\
returncode == METAFLOW_EXIT_DISALLOW_RETRY:
self._logger("This failed task will not be "
"retried.", system_msg=True)
else:
if task.retries < task.user_code_retries +\
task.error_retries:
self._retry_worker(worker)
else:
raise TaskFailed(task)
else:
# worker finished successfully
yield task
def _launch_workers(self):
while self._run_queue and self._num_active_workers < self._max_workers:
step, task_kwargs = self._queue_pop()
# Initialize the task (which can be expensive using remote datastores)
# before launching the worker so that cost is amortized over time, instead
# of doing it during _queue_push.
task = self._new_task(step, **task_kwargs)
self._launch_worker(task)
def _retry_worker(self, worker):
worker.task.retries += 1
if worker.task.retries >= MAX_ATTEMPTS:
# any results with an attempt ID >= MAX_ATTEMPTS will be ignored
# by datastore, so running a task with such a retry_could would
# be pointless and dangerous
raise MetaflowInternalError("Too many task attempts (%d)! "
"MAX_ATTEMPTS exceeded."
% worker.task.retries)
worker.task.new_attempt()
self._launch_worker(worker.task)
def _launch_worker(self, task):
worker = Worker(task, self._max_log_size)
for fd in worker.fds():
self._workers[fd] = worker
self._poll.add(fd)
self._num_active_workers += 1
class Task(object):
clone_pathspec_mapping = {}
def __init__(self,
datastore,
flow,
step,
run_id,
metadata,
environment,
entrypoint,
event_logger,
monitor,
input_paths=None,
split_index=None,
clone_run_id=None,
origin_ds_set=None,
may_clone=False,
join_type=None,
logger=None,
task_id=None,
decos=[]):
if task_id is None:
task_id = str(metadata.new_task_id(run_id, step))
else:
# task_id is preset only by persist_parameters()
metadata.register_task_id(run_id, step, task_id)
self.step = step
self.flow_name = flow.name
self.run_id = run_id
self.task_id = task_id
self.input_paths = input_paths
self.split_index = split_index
self.decos = decos
self.entrypoint = entrypoint
self.environment = environment
self.environment_type = self.environment.TYPE
self.clone_run_id = clone_run_id
self.clone_origin = None
self.origin_ds_set = origin_ds_set
self.metadata = metadata
self.event_logger = event_logger
self.monitor = monitor
self._logger = logger
self._path = '%s/%s/%s' % (self.run_id, self.step, self.task_id)
self.retries = 0
self.user_code_retries = 0
self.error_retries = 0
self.tags = metadata.sticky_tags
self.event_logger_type = self.event_logger.logger_type
self.monitor_type = monitor.monitor_type
self.metadata_type = metadata.TYPE
self.datastore_type = datastore.TYPE
self._datastore = datastore
self.datastore_sysroot = datastore.datastore_root
self._results_ds = None
if clone_run_id and may_clone:
self._is_cloned = self._attempt_clone(clone_run_id, join_type)
else:
self._is_cloned = False
# Open the output datastore only if the task is not being cloned.
if not self._is_cloned:
self.new_attempt()
for deco in decos:
deco.runtime_task_created(self._ds,
task_id,
split_index,
input_paths,
self._is_cloned)
# determine the number of retries of this task
user_code_retries, error_retries = deco.step_task_retry_count()
self.user_code_retries = max(self.user_code_retries,
user_code_retries)
self.error_retries = max(self.error_retries, error_retries)
def new_attempt(self):
self._ds = self._datastore(self.flow_name,
run_id=self.run_id,
step_name=self.step,
task_id=self.task_id,
mode='w',
metadata=self.metadata,
attempt=self.retries,
event_logger=self.event_logger,
monitor=self.monitor)
def log(self, msg, system_msg=False, pid=None):
if pid:
prefix = '[%s (pid %s)] ' % (self._path, pid)
else:
prefix = '[%s] ' % self._path
self._logger(msg, head=prefix, system_msg=system_msg)
sys.stdout.flush()
def _find_origin_task(self, clone_run_id, join_type):
if self.step == '_parameters':
pathspec = '%s/_parameters[]' % clone_run_id
origin = self.origin_ds_set.get_with_pathspec_index(pathspec)
if origin is None:
# This is just for usability: We could rerun the whole flow
# if an unknown clone_run_id is provided but probably this is
# not what the user intended, so raise a warning
raise MetaflowException("Resume could not find run id *%s*" %
clone_run_id)
else:
return origin
else:
# all inputs must have the same foreach stack, so we can safely
# pick the first one
parent_pathspec = self.input_paths[0]
origin_parent_pathspec = \
self.clone_pathspec_mapping[parent_pathspec]
parent = self.origin_ds_set.get_with_pathspec(origin_parent_pathspec)
# Parent should be non-None since only clone the child if the parent
# was successfully cloned.
foreach_stack = parent['_foreach_stack']
if join_type == 'foreach':
# foreach-join pops the topmost index
index = ','.join(str(s.index) for s in foreach_stack[:-1])
elif self.split_index:
# foreach-split pushes a new index
index = ','.join([str(s.index) for s in foreach_stack] +
[str(self.split_index)])
else:
# all other transitions keep the parent's foreach stack intact
index = ','.join(str(s.index) for s in foreach_stack)
pathspec = '%s/%s[%s]' % (clone_run_id, self.step, index)
return self.origin_ds_set.get_with_pathspec_index(pathspec)
def _attempt_clone(self, clone_run_id, join_type):
origin = self._find_origin_task(clone_run_id, join_type)
if origin and origin['_task_ok']:
# Store the mapping from current_pathspec -> origin_pathspec which
# will be useful for looking up origin_ds_set in find_origin_task.
self.clone_pathspec_mapping[self._path] = origin.pathspec
if self.step == '_parameters':
# Clone in place without relying on run_queue.
self.new_attempt()
self._ds.clone(origin)
self._ds.done()
else:
self.log("Cloning results of a previously run task %s"
% origin.pathspec, system_msg=True)
# Store the origin pathspec in clone_origin so this can be run
# as a task by the runtime.
self.clone_origin = origin.pathspec
# Save a call to creating the results_ds since its same as origin.
self._results_ds = origin
return True
else:
return False
@property
def path(self):
return self._path
@property
def results(self):
if self._results_ds:
return self._results_ds
else:
self._results_ds = self._datastore(self.flow_name,
run_id=self.run_id,
step_name=self.step,
task_id=self.task_id,
mode='r',
metadata=self.metadata,
event_logger=self.event_logger,
monitor=self.monitor)
return self._results_ds
@property
def finished_id(self):
# note: id is not available before the task has finished
return (self.step, tuple(self.results['_foreach_stack']))
@property
def is_cloned(self):
return self._is_cloned
def persist(self, flow):
# this is used to persist parameters before the start step
flow._task_ok = flow._success = True
flow._foreach_stack = []
self._ds.persist(flow)
self._ds.done()
def save_logs(self, logtype, logs):
location = self._ds.save_log(logtype, logs)
datum = [MetaDatum(field='log_location_%s' % logtype,
value=json.dumps({
'ds_type': self._ds.TYPE,
'location': location,
'attempt': self.retries}),
type='log_path')]
self.metadata.register_metadata(self.run_id,
self.step,
self.task_id,
datum)
return location
def save_metadata(self, name, metadata):
self._ds.save_metadata(name, metadata)
def __str__(self):
return ' '.join(self._args)
class TaskFailed(MetaflowException):
headline = "Step failure"
def __init__(self, task, msg=''):
body = "Step *%s* (task-id %s) failed" % (task.step,
task.task_id)
if msg:
body = '%s: %s' % (body, msg)
else:
body += '.'
super(TaskFailed, self).__init__(body)
class TruncatedBuffer(object):
def __init__(self, name, maxsize):
self.name = name
self._maxsize = maxsize
self._buffer = BytesIO()
self._size = 0
self._eof = False
def write(self, bytedata, system_msg=False):
if system_msg:
self._buffer.write(bytedata)
elif not self._eof:
if self._size + len(bytedata) < self._maxsize:
self._buffer.write(bytedata)
self._size += len(bytedata)
else:
msg = b'[TRUNCATED - MAXIMUM LOG FILE SIZE REACHED]\n'
self._buffer.write(msg)
self._eof = True
def get_bytes(self):
return self._buffer.getvalue()
class CLIArgs(object):
"""
Container to allow decorators modify the command line parameters
for step execution in StepDecorator.runtime_step_cli().
"""
def __init__(self, task):
self.task = task
self.entrypoint = list(task.entrypoint)
self.top_level_options = {
'quiet': True,
'coverage': 'coverage' in sys.modules,
'metadata': self.task.metadata_type,
'environment': self.task.environment_type,
'datastore': self.task.datastore_type,
'event-logger': self.task.event_logger_type,
'monitor': self.task.monitor_type,
'datastore-root': self.task.datastore_sysroot,
'with': [deco.make_decorator_spec() for deco in self.task.decos
if not deco.statically_defined]
}
self.commands = ['step']
self.command_args = [self.task.step]
self.command_options = {
'run-id': task.run_id,
'task-id': task.task_id,
'input-paths': compress_list(task.input_paths),
'split-index': task.split_index,
'retry-count': task.retries,
'max-user-code-retries': task.user_code_retries,
'tag': task.tags,
'namespace': get_namespace() or ''
}
self.env = {}
def get_args(self):
def options(mapping):
for k, v in mapping.items():
values = v if isinstance(v, list) else [v]
for value in values:
if value:
yield '--%s' % k
if not isinstance(value, bool):
yield to_unicode(value)
args = list(self.entrypoint)
args.extend(options(self.top_level_options))
args.extend(self.commands)
args.extend(self.command_args)
args.extend(options(self.command_options))
return args
def get_env(self):
return self.env
def __str__(self):
return ' '.join(self.get_args())
class Worker(object):
def __init__(self, task, max_logs_size):
self.task = task
self._proc = self._launch()
if task.retries > task.user_code_retries:
self.task.log('Task fallback is starting to handle the failure.',
system_msg=True,
pid=self._proc.pid)
elif not task.is_cloned:
suffix = ' (retry).' if task.retries else '.'
self.task.log('Task is starting' + suffix,
system_msg=True,
pid=self._proc.pid)
self._stdout = TruncatedBuffer('stdout', max_logs_size)
self._stderr = TruncatedBuffer('stderr', max_logs_size)
self._logs = {self._proc.stderr.fileno(): (self._proc.stderr,
self._stderr),
self._proc.stdout.fileno(): (self._proc.stdout,
self._stdout)}
self._encoding = sys.stdout.encoding or 'UTF-8'
self.killed = False
def _launch(self):
args = CLIArgs(self.task)
env = dict(os.environ)
if self.task.clone_run_id:
args.command_options['clone-run-id'] = self.task.clone_run_id
if self.task.is_cloned and self.task.clone_origin:
args.command_options['clone-only'] = self.task.clone_origin
# disabling atlas sidecar for cloned tasks due to perf reasons
args.top_level_options['monitor'] = 'nullSidecarMonitor'
else:
# decorators may modify the CLIArgs object in-place
for deco in self.task.decos:
deco.runtime_step_cli(args,
self.task.retries,
self.task.user_code_retries)
env.update(args.get_env())
# the env vars are needed by the test framework, nothing else
env['_METAFLOW_ATTEMPT'] = str(self.task.retries)
if self.task.clone_run_id:
env['_METAFLOW_RESUMED_RUN'] = '1'
env['_METAFLOW_RESUME_ORIGIN_RUN_ID'] = str(self.task.clone_run_id)
# NOTE bufsize=1 below enables line buffering which is required
# by read_logline() below that relies on readline() not blocking
# print('running', args)
cmdline = args.get_args()
debug.subcommand_exec(cmdline)
return subprocess.Popen(cmdline,
env=env,
bufsize=1,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
def write(self, msg, buf):
buf.write(msg)
text = msg.strip().decode(self._encoding, errors='replace')
self.task.log(text, pid=self._proc.pid)
def read_logline(self, fd):
fileobj, buf = self._logs[fd]
# readline() below should never block thanks to polling and
# line buffering. If it does, things will deadlock
line = fileobj.readline()
if line:
self.write(line, buf)
return True
else:
return False
def fds(self):
return (self._proc.stderr.fileno(),
self._proc.stdout.fileno())
def kill(self):
if not self.killed:
for fileobj, buf in self._logs.values():
buf.write(b'[KILLED BY ORCHESTRATOR]\n', system_msg=True)
try:
# wait for the process to clean up after itself
select.poll().poll(1000)
self._proc.kill()
except:
pass
self.killed = True
def terminate(self):
# this shouldn't block, since terminate() is called only
# after the poller has decided that the worker is dead
returncode = self._proc.wait()
# consume all remaining loglines
# we set the file descriptor to be non-blocking, since
# the pipe may stay active due to subprocesses launched by
# the worker, e.g. sidecars, so we can't rely on EOF. We try to
# read just what's available in the pipe buffer
for fileobj, buf in self._logs.values():
fileno = fileobj.fileno()
fcntl.fcntl(fileno, fcntl.F_SETFL, os.O_NONBLOCK)
try:
while self.read_logline(fileno):
pass
except:
# ignore "resource temporarily unavailable" etc. errors
# caused due to non-blocking. Draining is done on a
# best-effort basis.
pass
# Return early if the task is cloned since we don't want to
# perform any log collection.
if not self.task.is_cloned:
self.task.save_logs('stdout', self._stdout.get_bytes())
self.task.save_logs('stderr', self._stderr.get_bytes())
self.task.save_metadata('runtime', {'return_code': returncode,
'killed': self.killed,
'success': returncode == 0})
if returncode:
if not self.killed:
self.task.log('Task failed.',
system_msg=True,
pid=self._proc.pid)
else:
num = self.task.results['_foreach_num_splits']
if num:
self.task.log('Foreach yields %d child steps.' % num,
system_msg=True,
pid=self._proc.pid)
self.task.log('Task finished successfully.',
system_msg=True,
pid=self._proc.pid)
return returncode
def __str__(self):
return 'Worker[%d]: %s' % (self._proc.pid, self.task.path)
| 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/runtime.py | runtime.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.