repo
stringclasses
32 values
instance_id
stringlengths
13
37
base_commit
stringlengths
40
40
patch
stringlengths
1
1.89M
test_patch
stringclasses
1 value
problem_statement
stringlengths
304
69k
hints_text
stringlengths
0
246k
created_at
stringlengths
20
20
version
stringclasses
1 value
FAIL_TO_PASS
stringclasses
1 value
PASS_TO_PASS
stringclasses
1 value
environment_setup_commit
stringclasses
1 value
traceback
stringlengths
64
23.4k
__index_level_0__
int64
29
19k
DataDog/integrations-core
DataDog__integrations-core-446
0b9be7366a08b2fa1b83c036d823d8848762770f
diff --git a/postgres/check.py b/postgres/check.py --- a/postgres/check.py +++ b/postgres/check.py @@ -651,14 +651,17 @@ def _get_custom_metrics(self, custom_metrics, key): self.log.debug("Metric: {0}".format(m)) - for ref, (_, mtype) in m['metrics'].iteritems(): - cap_mtype = mtype.upper() - if cap_mtype not in ('RATE', 'GAUGE', 'MONOTONIC'): - raise CheckException("Collector method {0} is not known." - " Known methods are RATE, GAUGE, MONOTONIC".format(cap_mtype)) - - m['metrics'][ref][1] = getattr(PostgreSql, cap_mtype) - self.log.debug("Method: %s" % (str(mtype))) + try: + for ref, (_, mtype) in m['metrics'].iteritems(): + cap_mtype = mtype.upper() + if cap_mtype not in ('RATE', 'GAUGE', 'MONOTONIC'): + raise CheckException("Collector method {0} is not known." + " Known methods are RATE, GAUGE, MONOTONIC".format(cap_mtype)) + + m['metrics'][ref][1] = getattr(PostgreSql, cap_mtype) + self.log.debug("Method: %s" % (str(mtype))) + except Exception as e: + raise CheckException("Error processing custom metric '{}': {}".format(m, e)) self.custom_metrics[key] = custom_metrics return custom_metrics
[postgres] Improve config reading errors I had this `postgres.yaml`: ``` init_config: instances: - host: pepepe ... custom_metrics: - query: SELECT %s FROM pg_locks WHERE granted = false; metrics: count(distinct pid): [postgresql.connections_locked] descriptors: [] relation: false ``` with a few other hosts and custom metrics. When deploying this I got the following error: ``` 2017-02-13 15:33:14 UTC | ERROR | dd.collector | checks.postgres(__init__.py:762) | Check 'postgres' instance #0 failed Traceback (most recent call last): File "/opt/datadog-agent/agent/checks/__init__.py", line 745, in run self.check(copy.deepcopy(instance)) File "/opt/datadog-agent/agent/checks.d/postgres.py", line 606, in check custom_metrics = self._get_custom_metrics(instance.get('custom_metrics', []), key) File "/opt/datadog-agent/agent/checks.d/postgres.py", line 576, in _get_custom_metrics for ref, (_, mtype) in m['metrics'].iteritems(): ValueError: need more than 1 value to unpack ``` This was caused by a missing metric type in the yaml above i.e. it should have been `[postgresql.connections_locked, GAUGE]`. Because the error message is unclear and also doesn't point to the offending metric (remember I have other hosts and custom metrics), it took me a couple of hours to figure out the cause of this error. Please consider improving the error messages around config reading.
Thanks a lot for this report @mausch! We can't validate the config in a consistent manner, which makes something like this tricky to make the error better. We will work on making this a lot better in the future. However, what we can do in the very near future is make the documentation both online and in the config yaml itself a lot better. The documentation for the postgres check does not make it clear how to use the custom metrics very well, so better documentation will definitely help to assuage this issue! Thanks again for your report, we really appreciate this and I will add this to our issue board. > We can't validate the config in a consistent manner Not sure what this means exactly, but generally speaking a good error message should give the user enough context so that they can readily fix it. Better docs are great, but ultimately people will always make mistakes when defining complex config so you need good error messages. In this particular case, it could be as easy as wrapping the iteration in `_get_custom_metrics` with a `try..except` and in the exception handler wrap the exception with another one that displays the metric being processed (e.g. `raise CheckException("Error processing custom metric: " + str(m)) from e`) More generally, avoiding partial functions (like tuple unpacking in Python) makes it much easier to validate input and report errors correctly. Adding to our queue, this would make the life of support engineers much easier, thanks for reporting and for the suggestions.
2017-05-29T13:10:25Z
[]
[]
Traceback (most recent call last): File "/opt/datadog-agent/agent/checks/__init__.py", line 745, in run self.check(copy.deepcopy(instance)) File "/opt/datadog-agent/agent/checks.d/postgres.py", line 606, in check custom_metrics = self._get_custom_metrics(instance.get('custom_metrics', []), key) File "/opt/datadog-agent/agent/checks.d/postgres.py", line 576, in _get_custom_metrics for ref, (_, mtype) in m['metrics'].iteritems(): ValueError: need more than 1 value to unpack
29
DataDog/integrations-core
DataDog__integrations-core-5659
3b850d826a2f245e9dcc8a1d87d5e2343123882e
diff --git a/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py b/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py --- a/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py @@ -114,14 +114,15 @@ def _get_tag_query_tag(self, sampler, wmi_obj, tag_query): target_class, target_property, filters = self._format_tag_query(sampler, wmi_obj, tag_query) # Create a specific sampler - tag_query_sampler = WMISampler(self.log, target_class, [target_property], filters=filters, **sampler.connection) + with WMISampler( + self.log, target_class, [target_property], filters=filters, **sampler.connection + ) as tag_query_sampler: + tag_query_sampler.sample() - tag_query_sampler.sample() + # Extract tag + self._raise_on_invalid_tag_query_result(tag_query_sampler, wmi_obj, tag_query) - # Extract tag - self._raise_on_invalid_tag_query_result(tag_query_sampler, wmi_obj, tag_query) - - link_value = str(tag_query_sampler[0][target_property]).lower() + link_value = str(tag_query_sampler[0][target_property]).lower() tag = "{tag_name}:{tag_value}".format(tag_name=target_property.lower(), tag_value="_".join(link_value.split())) @@ -235,14 +236,17 @@ def _get_instance_key(self, host, namespace, wmi_class, other=None): return "{host}:{namespace}:{wmi_class}".format(host=host, namespace=namespace, wmi_class=wmi_class) - def _get_wmi_sampler(self, instance_key, wmi_class, properties, tag_by="", **kwargs): + def _get_running_wmi_sampler(self, instance_key, wmi_class, properties, tag_by="", **kwargs): """ - Create and cache a WMISampler for the given (class, properties) + Return a running WMISampler for the given (class, properties). + + If no matching WMISampler is running yet, start one and cache it. """ properties = list(properties) + [tag_by] if tag_by else list(properties) if instance_key not in self.wmi_samplers: wmi_sampler = WMISampler(self.log, wmi_class, properties, **kwargs) + wmi_sampler.start() self.wmi_samplers[instance_key] = wmi_sampler return self.wmi_samplers[instance_key] diff --git a/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py b/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py --- a/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py @@ -105,6 +105,7 @@ def __init__( # Sampling state self._sampling = False + self._stopping = False self.logger = logger @@ -146,12 +147,35 @@ def __init__( self._runSampleEvent = Event() self._sampleCompleteEvent = Event() - thread = Thread(target=self._query_sample_loop, name=class_name) - thread.daemon = True + def start(self): + """ + Start internal thread for sampling + """ + thread = Thread(target=self._query_sample_loop, name=self.class_name) + thread.daemon = True # Python 2 does not support daemon as Thread constructor parameter thread.start() + def stop(self): + """ + Dispose of the internal thread + """ + self._stopping = True + self._runSampleEvent.set() + self._sampleCompleteEvent.wait() + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + self.stop() + def _query_sample_loop(self): try: + # Initialize COM for the current (dedicated) thread + # WARNING: any python COM object (locator, connection, etc) created in a thread + # shouldn't be used in other threads (can lead to memory/handle leaks if done + # without a deep knowledge of COM's threading model). pythoncom.CoInitialize() except Exception as e: self.logger.info("exception in CoInitialize: %s", e) @@ -159,6 +183,11 @@ def _query_sample_loop(self): while True: self._runSampleEvent.wait() + if self._stopping: + self.logger.debug("_query_sample_loop stopping") + self._sampleCompleteEvent.set() + return + self._runSampleEvent.clear() if self.is_raw_perf_class and not self._previous_sample: self._current_sample = self._query() @@ -335,11 +364,6 @@ def get_connection(self): self.username, ) - # Initialize COM for the current thread - # WARNING: any python COM object (locator, connection, etc) created in a thread - # shouldn't be used in other threads (can lead to memory/handle leaks if done - # without a deep knowledge of COM's threading model). Because of this and given - # that we run each query in its own thread, we don't cache connections additional_args = [] if self.provider != ProviderArchitecture.DEFAULT: diff --git a/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py b/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py --- a/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py +++ b/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py @@ -115,7 +115,7 @@ def check(self, instance): filters.append(query) - wmi_sampler = self._get_wmi_sampler( + wmi_sampler = self._get_running_wmi_sampler( instance_key, self.EVENT_CLASS, event_properties, diff --git a/wmi_check/datadog_checks/wmi_check/wmi_check.py b/wmi_check/datadog_checks/wmi_check/wmi_check.py --- a/wmi_check/datadog_checks/wmi_check/wmi_check.py +++ b/wmi_check/datadog_checks/wmi_check/wmi_check.py @@ -52,7 +52,7 @@ def check(self, instance): metric_name_and_type_by_property, properties = self._get_wmi_properties(instance_key, metrics, tag_queries) - wmi_sampler = self._get_wmi_sampler( + wmi_sampler = self._get_running_wmi_sampler( instance_key, wmi_class, properties,
WMI integration throws Exception: SWbemLocator Not enough storage is available to process this command ```text =============== Agent (v7.16.0) =============== Status date: 2020-02-05 15:56:45.740020 GMT Agent start: 2020-02-05 15:03:08.601503 GMT Pid: 25188 Go Version: go1.12.9 Python Version: 3.7.4 Build arch: amd64 Host Info ========= bootTime: 2020-01-30 09:06:55.000000 GMT os: windows platform: Windows Server 2016 Datacenter platformFamily: Windows Server 2016 Datacenter platformVersion: 10.0 Build 14393 procs: 255 uptime: 149h56m12s wmi_check (1.6.0) ``` **Steps to reproduce the issue:** The WMI Check integration is configured to capture metrics for multiple instances of a specific process and tag them using the command line, as below ```yaml - class: Win32_PerfFormattedData_PerfProc_Process metrics: - - ThreadCount - proc.threads.count - gauge - - VirtualBytes - proc.mem.virtual - gauge - - PrivateBytes - proc.mem.private - gauge - - WorkingSet - proc.mem.workingset - gauge - - PageFaultsPerSec - proc.mem.page_faults_per_sec - gauge - - PercentProcessorTime - proc.cpu_pct - gauge - - IOReadBytesPerSec - proc.io.bytes_read - gauge - - IOWriteBytesPerSec - proc.io.bytes_written - gauge filters: - Name: Calastone.Core.MessageAdapter.Console% tag_by: Name tag_queries: - [IDProcess, Win32_Process, Handle, CommandLine] ``` There are 17 instances of the process running. **Describe the results you received:** - After a period of time (can be 40+ minutes) the following error starts to be logged ``` 2020-02-04 16:31:29 GMT | CORE | WARN | (pkg/collector/python/datadog_agent.go:118 in LogMessage) | wmi_check:a7174f61bd7a5360 | (sampler.py:469) | Failed to execute WMI query (Select CommandLine from Win32_Process WHERE ( Handle = '8408' )) Traceback (most recent call last): File "C:\Program Files\Datadog\Datadog Agent\embedded3\lib\site-packages\datadog_checks\base\checks\win\wmi\sampler.py", line 464, in _query raw_results = self.get_connection().ExecQuery(wql, "WQL", query_flags) File "C:\Program Files\Datadog\Datadog Agent\embedded3\lib\site-packages\datadog_checks\base\checks\win\wmi\sampler.py", line 351, in get_connection connection = locator.ConnectServer(self.host, self.namespace, self.username, self.password, *additional_args) File "<COMObject WbemScripting.SWbemLocator>", line 5, in ConnectServer File "C:\Program Files\Datadog\Datadog Agent\embedded3\lib\site-packages\win32com\client\dynamic.py", line 287, in _ApplyTypes_ result = self._oleobj_.InvokeTypes(*(dispid, LCID, wFlags, retType, argTypes) + args) pywintypes.com_error: (-2147352567, 'Exception occurred.', (0, 'SWbemLocator', 'Not enough storage is available to process this command. ', None, 0, -2147024888), None) 2020-02-04 16:31:29 GMT | CORE | WARN | (pkg/collector/python/datadog_agent.go:118 in LogMessage) | wmi_check:a7174f61bd7a5360 | (__init__.py:88) | Failed to extract a tag from `tag_queries` parameter: no result was returned. wmi_object={'threadcount': 27.0, 'virtualbytes': 823386112.0, 'privatebytes': 304635904.0, 'workingset': 367628288.0, 'pagefaultspersec': 0.0, 'percentprocessortime': 0.0, 'ioreadbytespersec': 0.0, 'iowritebytespersec': 0.0, 'idprocess': 8408.0, 'name': 'Calastone.Core.MessageAdapter.Console#3'} - query=['IDProcess', 'Win32_Process', 'Handle', 'CommandLine'] 2020-02-04 16:31:29 GMT | CORE | WARN | (pkg/collector/python/datadog_agent.go:118 in LogMessage) | wmi_check:a7174f61bd7a5360 | (sampler.py:469) | Failed to execute WMI query (Select CommandLine from Win32_Process WHERE ( Handle = '14836' )) ``` - The number of threads used by the agent process is observed to be rocketing (> 1700) - The server becomes unresponsive **Diagnosis:** This issue didn't occur on the previous version of the agent we were using (6.7.0). Looking at the source code suggests the problem was introduced as part of #3987 https://github.com/DataDog/integrations-core/blob/010ed622d62c9dd7de28d76f1191a4be5960a965/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py#L117 creates a WMISampler for EVERY tag query that needs to be run. With the new logic that creates a thread for each query that is never released! **Solution:** The follow hack fixes the problem. I'll put it into a PR. Change `sampler.py` ```python def _query_sample_loop(self): ... while True: self._runSampleEvent.wait() if self._stopping: return def dispose(self): """ Dispose of the internal thread """ self._stopping = True self._runSampleEvent.set() ``` Change `__init__.py` ```python def _get_tag_query_tag(self, sampler, wmi_obj, tag_query): ... tag = "{tag_name}:{tag_value}".format(tag_name=target_property.lower(), tag_value="_".join(link_value.split())) tag_query_sampler.dispose() ``` There also looks to be scope to cache these WMISampler classes like the main metric samplers. Also the connection created in `get_connection` could be created in the sampler thread method since it is now bound to that thread
2020-02-06T12:16:14Z
[]
[]
Traceback (most recent call last): File "C:\Program Files\Datadog\Datadog Agent\embedded3\lib\site-packages\datadog_checks\base\checks\win\wmi\sampler.py", line 464, in _query raw_results = self.get_connection().ExecQuery(wql, "WQL", query_flags) File "C:\Program Files\Datadog\Datadog Agent\embedded3\lib\site-packages\datadog_checks\base\checks\win\wmi\sampler.py", line 351, in get_connection connection = locator.ConnectServer(self.host, self.namespace, self.username, self.password, *additional_args) File "<COMObject WbemScripting.SWbemLocator>", line 5, in ConnectServer File "C:\Program Files\Datadog\Datadog Agent\embedded3\lib\site-packages\win32com\client\dynamic.py", line 287, in _ApplyTypes_ result = self._oleobj_.InvokeTypes(*(dispid, LCID, wFlags, retType, argTypes) + args) pywintypes.com_error: (-2147352567, 'Exception occurred.', (0, 'SWbemLocator', 'Not enough storage is available to process this command. ', None, 0, -2147024888), None)
36
DataDog/integrations-core
DataDog__integrations-core-9857
8006a053c108af2cf1988efe23db8f58df8262dc
diff --git a/mongo/datadog_checks/mongo/collectors/custom_queries.py b/mongo/datadog_checks/mongo/collectors/custom_queries.py --- a/mongo/datadog_checks/mongo/collectors/custom_queries.py +++ b/mongo/datadog_checks/mongo/collectors/custom_queries.py @@ -56,8 +56,10 @@ def _collect_custom_metrics_for_query(self, api, raw_query): mongo_query = deepcopy(raw_query.get('query')) if not mongo_query: # no cov raise ValueError("Custom query field `query` is required") + # The mongo command to run (find, aggregate, count...) mongo_command = self._extract_command_from_mongo_query(mongo_query) - collection_name = mongo_query[mongo_command] + # The value of the command, it is usually the collection name on which to run the query. + mongo_command_value = mongo_query[mongo_command] del mongo_query[mongo_command] if mongo_command not in ALLOWED_CUSTOM_QUERIES_COMMANDS: raise ValueError("Custom query command must be of type {}".format(ALLOWED_CUSTOM_QUERIES_COMMANDS)) @@ -90,20 +92,26 @@ def _collect_custom_metrics_for_query(self, api, raw_query): if field_type not in ALLOWED_CUSTOM_METRICS_TYPES + ['tag']: raise ValueError('Field `type` must be one of {}'.format(ALLOWED_CUSTOM_METRICS_TYPES + ['tag'])) - tags = list(tags) - tags.extend(raw_query.get('tags', [])) - tags.append('collection:{}'.format(collection_name)) - try: # This is where it is necessary to extract the command and its argument from the query to pass it as the # first two params. - result = db.command(mongo_command, collection_name, **mongo_query) + result = db.command(mongo_command, mongo_command_value, **mongo_query) if result['ok'] == 0: raise pymongo.errors.PyMongoError(result['errmsg']) except pymongo.errors.PyMongoError: self.log.error("Failed to run custom query for metric %s", metric_prefix) raise + # `1` is Mongo default value for commands that are collection agnostics. + if str(mongo_command_value) == '1': + # https://github.com/mongodb/mongo-python-driver/blob/01e34cebdb9aac96c72ddb649e9b0040a0dfd3a0/pymongo/aggregation.py#L208 + collection_name = '{}.{}'.format(db_name, mongo_command) + else: + collection_name = mongo_command_value + + tags.append('collection:{}'.format(collection_name)) + tags.extend(raw_query.get('tags', [])) + if mongo_command == 'count': # A count query simply returns a number, no need to iterate through it. submit_method(metric_prefix, result['n'], tags)
MongoDB: Collection-agnostic aggregations like $currentOp doesn't work Agent 7.29.1, running on Ubuntu Linux 18.04. **Steps to reproduce the issue:** Add the following configuration to `/etc/datadog-agent/conf.d/mongo.d/conf.yaml` and restart the agent: ``` custom_queries: - metric_prefix: mongodb.custom.queries_slower_than_60sec run_on_secondary: true query: { "aggregate": 1, "maxTimeMS": 1000, "pipeline": [ { "$currentOp": { "allUsers": true }}, { "$match": { "active": true, "secs_running": {"$gt": 60}}} ], "cursor": {}} fields: - field_name: secs_running name: secs_running type: gauge - field_name: appName name: app_name type: tag - field_name: ns name: mongo_op_namespace type: tag ``` **Describe the results you received:** When Datadog attempts to run this command, it produces an error (found via `journalctl`): ``` Traceback (most recent call last): 2021-07-22 06:44:38 UTC | CORE | WARN | (pkg/collector/python/datadog_agent.go:122 in LogMessage) | mongo:375a6f2e54dabf11 | (custom_queries.py:153) | Errors while collecting custom metrics with prefix mongodb.custom.queries_slower_than_60sec TypeError: name must be an instance of str raise TypeError("name must be an instance " File "/opt/datadog-agent/embedded/lib/python3.8/site-packages/pymongo/collection.py", line 160, in __init__ pymongo.collection.Collection(db, collection_name), result['cursor'], None File "/opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/mongo/collectors/custom_queries.py", line 113, in _collect_custom_metrics_for_query self._collect_custom_metrics_for_query(api, raw_query) File "/opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/mongo/collectors/custom_queries.py", line 150, in collect ``` **Describe the results you expected:** I would like to be able to send information about slow queries to Datadog. **Additional information you deem important (e.g. issue happens only occasionally):** It seems like the problem here is that when using this syntax to run an admin aggregation like `$currentOp`, you have to specify `"aggregate": 1` in the query to indicate that there is no associated collection. However, the API that Datadog is calling in pymongo expects the collection name to always be a string. Unfortunately, `"aggregate": "1"` is not equivalent and will fail. More details on the syntax: https://docs.mongodb.com/manual/reference/command/aggregate/
Hey @atodd-circleci Acknowledging the limitation, I'm able to reproduce. I'm thinking we should be able to work around that by putting `$cmd.aggregate` instead of "1" as the collection name here: https://github.com/DataDog/integrations-core/blob/master/mongo/datadog_checks/mongo/collectors/custom_queries.py#L113 but I'd have to confirm that @FlorianVeaux Thanks for taking a look so quickly. I manually edited `custom_queries.py` on my installation to replace `collection_name` with the literal `$cmd.aggregate`. It seems to have worked. When I start the agent, I see this in the log: ``` Exception: Custom query returned an empty result set. raise Exception('Custom query returned an empty result set.') File "/opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/mongo/collectors/custom_queries.py", line 145, in _collect_custom_metrics_for_query self._collect_custom_metrics_for_query(api, raw_query) File "/opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/mongo/collectors/custom_queries.py", line 150, in collect Traceback (most recent call last): 2021-07-27 05:20:05 UTC | CORE | WARN | (pkg/collector/python/datadog_agent.go:122 in LogMessage) | mongo:<redacted> | (custom_queries.py:153) | Errors while collecting custom metrics with prefix mongodb.custom.queries_slower_than_60sec ``` I'm not expecting any results, so this is good. I can't really go around manually editing our installations this way, though, so I'm looking forward to a more permanent fix. (I am a little concerned about having all of these exceptions in the system log, as well. I'll have to look at using [$count](https://docs.mongodb.com/manual/reference/operator/aggregation/count/) to always output a count instead of what I'm doing now).
2021-08-05T15:17:59Z
[]
[]
Traceback (most recent call last): 2021-07-22 06:44:38 UTC | CORE | WARN | (pkg/collector/python/datadog_agent.go:122 in LogMessage) | mongo:375a6f2e54dabf11 | (custom_queries.py:153) | Errors while collecting custom metrics with prefix mongodb.custom.queries_slower_than_60sec TypeError: name must be an instance of str
58
Lightning-AI/lightning
Lightning-AI__lightning-1360
ebd9fc9530242e1c9b5f3093dc62ceb4185735b0
diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py --- a/pytorch_lightning/loggers/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -65,10 +65,11 @@ def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None, def __getstate__(self): state = self.__dict__.copy() + # args needed to reload correct experiment + state['_id'] = self._experiment.id if self._experiment is not None else None + # cannot be pickled state['_experiment'] = None - # args needed to reload correct experiment - state['_id'] = self.experiment.id return state @property @@ -87,7 +88,7 @@ def experiment(self) -> Run: os.environ['WANDB_MODE'] = 'dryrun' self._experiment = wandb.init( name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, - id=self._id, resume='allow', tags=self._tags, entity=self._entity) + reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity) # save checkpoints in wandb dir to upload on W&B servers if self._log_model: self.save_dir = self._experiment.dir @@ -109,8 +110,11 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> @property def name(self) -> str: - return self.experiment.project_name() + # don't create an experiment if we don't have one + name = self._experiment.project_name() if self._experiment else None + return name @property def version(self) -> str: - return self.experiment.id + # don't create an experiment if we don't have one + return self._experiment.id if self._experiment else None
WandbLogger cannot be used with 'ddp' <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug wandb modifies `init` such that a child process calling init returns None if the master process has called init. This seems to cause a bug with ddp, and results in rank zero having experiment = None, which crashes the program. ### To Reproduce Can be reproduced with the basic MNIST gpu template, simply add a WandbLogger and pass 'ddp' as the distributed backend. ``` -- Process 0 terminated with the following error: Traceback (most recent call last): File "/home/rmrao/anaconda3/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap fn(i, *args) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 331, in ddp_train self.run_pretrain_routine(model) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 757, in run_pretrain_routine self.logger.log_hyperparams(ref_model.hparams) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/base.py", line 14, in wrapped_fn fn(self, *args, **kwargs) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/wandb.py", line 79, in log_hyperparams self.experiment.config.update(params) AttributeError: 'NoneType' object has no attribute 'config' ``` This occurs with the latest wandb version and with pytorch-lightning 0.6.
Hi! thanks for your contribution!, great first issue! Some hacky solutions: calling `reinit=True` for wandb, adding or this terrible hack: ```python def init_ddp_connection(self, *args, **kwargs): super().init_ddp_connection(*args, **kwargs) if torch.distributed.get_rank() == 0: import wandb wandb.run = None ``` These both seem to only kind-of work and result in multiple independent calls to wandb.init. I think the ideal solution is that the experiment is only ever initialized on rank zero. *However* doing this means that wandb *cannot* be initialized in the master thread at all. Better than this probably requires some changes to the wandb API. Following up slightly - my hacky solution doesn't really work. It's easy enough though to get the rank zero only solution to work. If this seems like a reasonable solution, let me know and I'll submit a PR. well, have observed some issues with `wandb` earlier #906 could you check it? Hmm, I think this is a slightly different issue (I'm running on Ubuntu so I don't think that's the issue). Pickling also works correctly. This particular problem I think stems from this part of the `wandb.init(...)` code: ```python def init(...): ... # If a thread calls wandb.init() it will get the same Run object as # the parent. If a child process with distinct memory space calls # wandb.init(), it won't get an error, but it will get a result of # None. # This check ensures that a child process can safely call wandb.init() # after a parent has (only the parent will create the Run object). # This doesn't protect against the case where the parent doesn't call # wandb.init but two children do. if run or os.getenv(env.INITED): return run ``` Child processes end up getting `None` for the wandb run object, which causes logging to fail. There are probably two reasonable and complementary solutions: 1. The main thread should avoid creating a wandb experiment unless absolutely necessary. Right now, [this](https://github.com/PyTorchLightning/pytorch-lightning/blob/e586ed47674fd78b158322bb7b14d00aeb912abd/pytorch_lightning/loggers/wandb.py#L63-L69) is the only part of the logging code that the parent thread calls (I assume it's called when pickling): ```python def __getstate__(self): state = self.__dict__.copy() # cannot be pickled state['_experiment'] = None # args needed to reload correct experiment state['_id'] = self.experiment.id return state ``` If this is changed to: ```python def __getstate__(self): state = self.__dict__.copy() # args needed to reload correct experiment if self._experiment is not None: state['_id'] = self._experiment.id else: state['_id'] = None # cannot be pickled state['_experiment'] = None return state ``` That will ensure that unless the user explicitly logs something or creates the wandb experiment first, then the main thread will not try to create an experiment. Since subsequent logging / saving code is wrapped by the `@rank_zero_only` decorator, this will generally solve the issue in the base case. It's also possible that [these properties](https://github.com/PyTorchLightning/pytorch-lightning/blob/e586ed47674fd78b158322bb7b14d00aeb912abd/pytorch_lightning/loggers/wandb.py#L112-L118) are also called by master. Ideally they would be wrapped to not create the experiment unless it had been already created (i.e. experiment should only be created by a function that is wrapped with the `@rank_zero_only` decorator). 2. If the main thread *has* created an experiment, rank zero should be passed the re-init argument. `wandb` does allow you to reinitialize the experiment. I tried to play around with this a little bit and got some errors, but in theory adding this: ```python wandb.init(..., reinit=dist.is_available() and dist.is_initialized() and dist.get_rank() == 0) ``` should force a re-initialization when wandb is already initialzed for rank zero.
2020-04-03T13:32:07Z
[]
[]
Traceback (most recent call last): File "/home/rmrao/anaconda3/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap fn(i, *args) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 331, in ddp_train self.run_pretrain_routine(model) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 757, in run_pretrain_routine self.logger.log_hyperparams(ref_model.hparams) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/base.py", line 14, in wrapped_fn fn(self, *args, **kwargs) File "/home/rmrao/anaconda3/lib/python3.6/site-packages/pytorch_lightning/logging/wandb.py", line 79, in log_hyperparams self.experiment.config.update(params) AttributeError: 'NoneType' object has no attribute 'config'
104
Lightning-AI/lightning
Lightning-AI__lightning-1377
b8ff9bc1d242a18f5e7147f34d63f43fcdd0e50a
diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -9,6 +9,7 @@ from torch.utils.tensorboard import SummaryWriter from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only +from pytorch_lightning import _logger as log class TensorBoardLogger(LightningLoggerBase): @@ -163,6 +164,11 @@ def version(self) -> int: def _get_next_version(self): root_dir = os.path.join(self.save_dir, self.name) + + if not os.path.isdir(root_dir): + log.warning('Missing logger folder: %s', root_dir) + return 0 + existing_versions = [] for d in os.listdir(root_dir): if os.path.isdir(os.path.join(root_dir, d)) and d.startswith("version_"):
Tensorboard logger error: lightning_logs directory not exists in multi-node DDP on nodes with rank != 0 ## 🐛 Bug In multi-node DDP train mode on all nodes except rank 0 errors appears at the start of the training caused by accessing lightning_logs directory in tensorboard logger which is not exist at the moment. ### To Reproduce Steps to reproduce the behavior: 1. setup multi-node cluster (without SLURM) 2. set environment variables on each node: ``` export MASTER_ADDR=<rank 0 node IP> export MASTER_PORT=23456 export RANK=<node id> export SLURM_NODEID=<node id> export WORLD_SIZE=<world-size> ``` 3. install dependencies: ``` pip install torch torchvision hydra-core pytorch-lightning ``` 4. copy app.y and conf.yaml to each node 5. run script on each node ``` python app.py ``` 6. see the error: ``` Exception: -- Process 0 terminated with the following error: Traceback (most recent call last): File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap fn(i, *args) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 342, in ddp_train self.run_pretrain_routine(model) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 777, in run_pretrain_routine self.configure_checkpoint_callback() File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/trainer/callback_config.py", line 45, in configure_checkpoint_callback f'version_{self.logger.version}', File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/loggers/tensorboard.py", line 161, in version self._version = self._get_next_version() File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/loggers/tensorboard.py", line 167, in _get_next_version for d in os.listdir(root_dir): FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/pytorch-lightning-intro-guide/outputs/2020-04-04/15-53-26/lightning_logs' ``` #### Code sample app.py: ``` import pathlib import hydra import pytorch_lightning as pl import torch from omegaconf import OmegaConf from torch.nn import functional as F from torch.optim import Adam from torch.utils.data import DataLoader, random_split from torchvision import datasets, transforms class LitMNIST(pl.LightningModule): def __init__(self): super().__init__() self.layer_1 = torch.nn.Linear(28 * 28, 128) self.layer_2 = torch.nn.Linear(128, 256) self.layer_3 = torch.nn.Linear(256, 10) self.train_dataset = None self.val_dataset = None self.test_dataset = None def forward(self, x): batch_size, channels, width, height = x.size() x = x.view(batch_size, -1) x = self.layer_1(x) x = F.relu(x) x = self.layer_2(x) x = F.relu(x) x = self.layer_3(x) x = F.log_softmax(x, dim=1) return x def prepare_data(self): # transform transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # download data_dir = pathlib.Path.home() / 'data' mnist_train = datasets.MNIST(data_dir, train=True, download=True, transform=transform) mnist_test = datasets.MNIST(data_dir, train=False, download=True, transform=transform) # train/val split mnist_train, mnist_val = random_split(mnist_train, [55000, 5000]) # assign to use in dataloaders self.train_dataset = mnist_train self.val_dataset = mnist_val self.test_dataset = mnist_test def train_dataloader(self): return DataLoader(self.train_dataset, batch_size=64) def val_dataloader(self): return DataLoader(self.val_dataset, batch_size=64) def test_dataloader(self): return DataLoader(self.test_dataset, batch_size=64) def configure_optimizers(self): return Adam(self.parameters(), lr=1e-3) def training_step(self, batch, batch_idx): x, y = batch logits = self(x) loss = F.nll_loss(logits, y) # add logging logs = {'loss': loss} return {'loss': loss, 'log': logs} def validation_step(self, batch, batch_idx): x, y = batch logits = self(x) loss = F.nll_loss(logits, y) return {'val_loss': loss} def validation_epoch_end(self, outputs): avg_loss = torch.stack( # pylint: disable=no-member [x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} return {'avg_val_loss': avg_loss, 'log': tensorboard_logs} def test_step(self, batch, batch_idx): x, y = batch logits = self(x) loss = F.nll_loss(logits, y) return {'val_loss': loss} def test_epoch_end(self, outputs): avg_loss = torch.stack( # pylint: disable=no-member [x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} return {'avg_val_loss': avg_loss, 'log': tensorboard_logs} def init_ddp_connection(self, proc_rank: int, world_size: int) -> None: torch.distributed.init_process_group( 'nccl', rank=proc_rank, world_size=world_size) @hydra.main(config_path='conf.yaml') def main(conf: OmegaConf): model = LitMNIST() trainer = pl.Trainer(gpus=conf.gpus, num_nodes=conf.num_nodes, distributed_backend=conf.distributed_backend, max_epochs=3) trainer.fit(model) if __name__ == '__main__': main() # pylint: disable=no-value-for-parameter ``` conf.yaml: ``` gpus: 1 num_nodes: 2 distributed_backend: ddp ``` ### Expected behavior Train should go without error ### Environment ``` cuda: GPU: Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 available: True version: 10.1 packages: numpy: 1.18.1 pyTorch_debug: False pyTorch_version: 1.4.0 pytorch-lightning: 0.7.1 tensorboard: 2.2.0 tqdm: 4.45.0 system: OS: Linux architecture: 64bit processor: x86_64 python: 3.6.10 version: #113-Ubuntu SMP Wed Jan 29 14:54:54 UTC 2020 ``` ### Additional context <!-- Add any other context about the problem here. -->
2020-04-04T16:35:26Z
[]
[]
Traceback (most recent call last): File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap fn(i, *args) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 342, in ddp_train self.run_pretrain_routine(model) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 777, in run_pretrain_routine self.configure_checkpoint_callback() File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/trainer/callback_config.py", line 45, in configure_checkpoint_callback f'version_{self.logger.version}', File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/loggers/tensorboard.py", line 161, in version self._version = self._get_next_version() File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.6/site-packages/pytorch_lightning/loggers/tensorboard.py", line 167, in _get_next_version for d in os.listdir(root_dir): FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/pytorch-lightning-intro-guide/outputs/2020-04-04/15-53-26/lightning_logs'
105
Lightning-AI/lightning
Lightning-AI__lightning-1385
4ed3027309fe1882554e9b7ffe33f1aa92c88106
diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -363,15 +363,19 @@ def load_spawn_weights(self, original_model): :param model: :return: """ - # load weights saved in ddp - path = os.path.join(self.default_save_path, '__temp_weight_ddp_end.ckpt') - loaded_model = original_model.__class__.load_from_checkpoint(path) - # copy loaded weights to old model - original_model.load_state_dict(loaded_model.state_dict()) + loaded_model = original_model - # remove ddp weights - os.remove(path) + if self.proc_rank == 0: + # load weights saved in ddp + path = os.path.join(self.default_save_path, '__temp_weight_ddp_end.ckpt') + loaded_model = original_model.__class__.load_from_checkpoint(path) + + # copy loaded weights to old model + original_model.load_state_dict(loaded_model.state_dict()) + + # remove ddp weights + os.remove(path) return loaded_model
Trainer DDP should invoke load_spawn_weights() only in proc_rank == 0 ## 🐛 Bug Trainer DDP load_spawn_weights should happen only in proc_rank == 0 since only in this process (node) `save_spawn_weights` actually saves checkpoint ### To Reproduce Steps to reproduce the behavior: 1. setup two-node cluster. 1. set SLURM_NODEID on each node: '0' on node 0 and '1' on node 1. 2. run the script `python app.py` on each node. 3. see stdout on the node 1: ``` Traceback (most recent call last): File "app.py", line 166, in <module> main_() # pylint: disable=no-value-for-parameter File "app.py", line 162, in main_ trainer.fit(model) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 593, in fit self.load_spawn_weights(model) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 368, in load_spawn_weights loaded_model = original_model.__class__.load_from_checkpoint(path) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/pytorch_lightning/core/lightning.py", line 1353, in load_from_checkpoint checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/torch/serialization.py", line 525, in load with _open_file_like(f, 'rb') as opened_file: File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/torch/serialization.py", line 212, in _open_file_like return _open_file(name_or_buffer, mode) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/torch/serialization.py", line 193, in __init__ super(_open_file, self).__init__(open(name, mode)) FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/pytorch-lightning-intro-guide/__temp_weight_ddp_end.ckpt' ``` #### Code sample app.py: ``` import pathlib import pytorch_lightning as pl import torch from torch.nn import functional as F from torch.optim import Adam from torch.utils.data import DataLoader, random_split from torchvision import datasets, transforms class LitMNIST(pl.LightningModule): def __init__(self): super().__init__() self.layer_1 = torch.nn.Linear(28 * 28, 128) self.layer_2 = torch.nn.Linear(128, 256) self.layer_3 = torch.nn.Linear(256, 10) self.train_dataset = None self.val_dataset = None self.test_dataset = None def forward(self, x): batch_size, channels, width, height = x.size() x = x.view(batch_size, -1) x = self.layer_1(x) x = F.relu(x) x = self.layer_2(x) x = F.relu(x) x = self.layer_3(x) x = F.log_softmax(x, dim=1) return x def prepare_data(self): # transform transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # download data_dir = pathlib.Path.home() / 'data' mnist_train = datasets.MNIST(data_dir, train=True, download=True, transform=transform) mnist_test = datasets.MNIST(data_dir, train=False, download=True, transform=transform) # train/val split mnist_train, mnist_val = random_split(mnist_train, [55000, 5000]) # assign to use in dataloaders self.train_dataset = mnist_train self.val_dataset = mnist_val self.test_dataset = mnist_test def train_dataloader(self): return DataLoader(self.train_dataset, batch_size=64) def val_dataloader(self): return DataLoader(self.val_dataset, batch_size=64) def test_dataloader(self): return DataLoader(self.test_dataset, batch_size=64) def configure_optimizers(self): return Adam(self.parameters(), lr=1e-3) def training_step(self, batch, batch_idx): x, y = batch logits = self(x) loss = F.nll_loss(logits, y) # add logging logs = {'loss': loss} return {'loss': loss, 'log': logs} def validation_step(self, batch, batch_idx): x, y = batch logits = self(x) loss = F.nll_loss(logits, y) return {'val_loss': loss} def test_step(self, batch, batch_idx): x, y = batch logits = self(x) loss = F.nll_loss(logits, y) return {'val_loss': loss} def test_epoch_end(self, outputs): avg_loss = torch.stack( # pylint: disable=no-member [x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} return {'avg_val_loss': avg_loss, 'log': tensorboard_logs} def init_ddp_connection(self, proc_rank: int, world_size: int) -> None: torch.distributed.init_process_group( 'nccl', rank=proc_rank, world_size=world_size) def main(): model = LitMNIST() gpus = 1 num_nodes = 2 trainer = pl.Trainer(gpus=gpus, num_nodes=num_nodes, distributed_backend='ddp', max_epochs=3) trainer.fit(model) if __name__ == '__main__': main() ``` ### Expected behavior All workers on all nodes should finish without errors. ### Environment On each node: ``` cuda: GPU: Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 Tesla K80 available: True version: 10.1 packages: numpy: 1.16.6 pyTorch_debug: False pyTorch_version: 1.4.0 pytorch-lightning: 0.7.1 tensorboard: 2.2.0 tqdm: 4.44.1 system: OS: Linux architecture: 64bit processor: x86_64 python: 3.7.7 version: #113-Ubuntu SMP Wed Jan 29 14:54:54 UTC 2020 ``` ### Additional context <!-- Add any other context about the problem here. -->
2020-04-05T23:51:47Z
[]
[]
Traceback (most recent call last): File "app.py", line 166, in <module> main_() # pylint: disable=no-value-for-parameter File "app.py", line 162, in main_ trainer.fit(model) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 593, in fit self.load_spawn_weights(model) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 368, in load_spawn_weights loaded_model = original_model.__class__.load_from_checkpoint(path) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/pytorch_lightning/core/lightning.py", line 1353, in load_from_checkpoint checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/torch/serialization.py", line 525, in load with _open_file_like(f, 'rb') as opened_file: File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/torch/serialization.py", line 212, in _open_file_like return _open_file(name_or_buffer, mode) File "/home/ubuntu/anaconda3/envs/nightly_pt/lib/python3.7/site-packages/torch/serialization.py", line 193, in __init__ super(_open_file, self).__init__(open(name, mode)) FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/pytorch-lightning-intro-guide/__temp_weight_ddp_end.ckpt'
107
Lightning-AI/lightning
Lightning-AI__lightning-1423
3f1e4b953f84ecdac7dada0c6b57d908efc9c3d3
diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -566,7 +566,7 @@ def check_gpus_data_type(gpus): :return: return unmodified gpus variable """ - if gpus is not None and type(gpus) not in (int, str, list): + if gpus is not None and (not isinstance(gpus, (int, str, list)) or isinstance(gpus, bool)): raise MisconfigurationException("GPUs must be int, string or list of ints or None.")
Use isinstance() instead of type() in trainer.distrib_parts.check_gpus_data_type <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug When instantiating a `Trainer` object, it makes sense to be able to pass a subclass of `list`. Ideally, this would be something even more general like `collections.abc.Sequence`, but I'm not too familiar with Lightning's codebase and that change would have a greater likelihood of breaking things. ### To Reproduce Instantiate a `Trainer` with the `gpus` parameter being a subclass of `list`. #### Code sample ```python >>> from pytorch_lightning import Trainer >>> class MyList(list): ... pass ... >>> gpus = MyList([0]) >>> t = Trainer(gpus=gpus) ``` This produces ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/anaconda/miniconda3/envs/ai/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 366, in __init__ self.data_parallel_device_ids = parse_gpu_ids(self.gpus) File "/opt/anaconda/miniconda3/envs/ai/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 613, in parse_gpu_ids check_gpus_data_type(gpus) File "/opt/anaconda/miniconda3/envs/ai/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 561, in check_gpus_data_type raise MisconfigurationException("GPUs must be int, string or list of ints or None.") pytorch_lightning.utilities.debugging.MisconfigurationException: GPUs must be int, string or list of ints or None. ``` ### Expected behavior `Trainer` is instantiated normally as it would had a list been passed. ### Environment - PyTorch Version: 1.4.0 - PyTorch Lightning Version: 0.7.1 - OS: Ubuntu 19.10 - How you installed PyTorch: `pip` - Python version: 3.7 ### Potential Fix In `pytorch_lightning/trainer/distrib_parts.py` check types using `isinstance()` instead of `type()`: ```python def check_gpus_data_type(gpus): # if gpus is not None and type(gpus) not in (int, str, list): if gpus is not None and not isinstance(gpus, (int, str, list)): raise MisconfigurationException("GPUs must be int, string or list of ints or None.") ``` I'll put in a PR if this change sounds good
Hi! thanks for your contribution!, great first issue! I do like this shift from `type` to an `isinstance` which extend accepted types also to child... as always a good PR is always welcome cc: @PyTorchLightning/core-contributors @jeremyjordan
2020-04-09T09:44:35Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/anaconda/miniconda3/envs/ai/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 366, in __init__ self.data_parallel_device_ids = parse_gpu_ids(self.gpus) File "/opt/anaconda/miniconda3/envs/ai/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 613, in parse_gpu_ids check_gpus_data_type(gpus) File "/opt/anaconda/miniconda3/envs/ai/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 561, in check_gpus_data_type raise MisconfigurationException("GPUs must be int, string or list of ints or None.") pytorch_lightning.utilities.debugging.MisconfigurationException: GPUs must be int, string or list of ints or None.
111
Lightning-AI/lightning
Lightning-AI__lightning-1513
9b31272cf0f3079a244944096b4a81eec20fe555
diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -61,6 +61,7 @@ class TrainerDataLoadingMixin(ABC): train_percent_check: float val_percent_check: float test_percent_check: float + replace_sampler_ddp: bool @abstractmethod def is_overriden(self, *args): @@ -88,10 +89,8 @@ def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader: # don't do anything if it's not a dataloader if not isinstance(dataloader, DataLoader): return dataloader - - need_dist_sampler = self.use_ddp or self.use_ddp2 or self.use_tpu - - if need_dist_sampler: + need_dist_sampler = (self.use_ddp or self.use_ddp2 or self.use_tpu) + if self.replace_sampler_ddp and need_dist_sampler: skip_keys = ['sampler', 'batch_sampler', 'dataset_kind'] diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -127,6 +127,7 @@ def __init__( benchmark: bool = False, reload_dataloaders_every_epoch: bool = False, auto_lr_find: Union[bool, str] = False, + replace_sampler_ddp: bool = True, default_save_path=None, # backward compatible, todo: remove in v0.8.0 gradient_clip=None, # backward compatible, todo: remove in v0.8.0 nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0 @@ -282,6 +283,9 @@ def __init__( rate in self.hparams.lr | self.hparams.learning_rate in the lightning module. To use a different key, set a string instead of True with the key name. + replace_sampler_ddp: Explicitly enables or disables sampler replacement. + If not specified this will toggled automatically ddp is used + benchmark: If true enables cudnn.benchmark. terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the @@ -362,6 +366,7 @@ def __init__( self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch self.auto_lr_find = auto_lr_find + self.replace_sampler_ddp = replace_sampler_ddp self.truncated_bptt_steps = truncated_bptt_steps self.resume_from_checkpoint = resume_from_checkpoint
0.7.3 breaks reusable dataloaders in DDP ## 🐛 Bug 0.7.3 breaks reusable dataloaders in DDP ``` Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 345, in ddp_train self.run_pretrain_routine(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 864, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 296, in train self.reset_train_dataloader(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/data_loading.py", line 128, in reset_train_dataloader self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/data_loading.py", line 112, in auto_add_sampler dataloader = type(dataloader)(**dl_args) File "../main/dataset.py", line 15, in __init__ super().__init__(*args, **kwargs) TypeError: __init__() got an unexpected keyword argument 'iterator' ``` #### Code sample ``` class _RepeatSampler(object): def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: yield from iter(self.sampler) class FastDataLoader(torch.utils.data.dataloader.DataLoader): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): return len(self.batch_sampler.sampler) def __iter__(self): for i in range(len(self)): yield next(self.iterator) ``` replace Dataloader with FastDataLoader in lightning (this snippet is from https://github.com/pytorch/pytorch/issues/15849) ### Expected behavior Dataloaders initialize correctly and are reused between train/val/epochs (works as expected in 0.7.1) ### Probable Cause https://github.com/PyTorchLightning/pytorch-lightning/pull/1425
ummm yeah. we should change the dataloader swap with swapping a dataloader init from the class or not swipe the dataloder at all but set the correct sampler. @justusschock any ideas? This is a mixture of #1425 and #1346 And I don't think we can prevent this when we want to set correct samplers also in subclasses of `DataLoader`. We use all public attributes for reinitialization. The probably easiest fix for you, would be to change `self.iterator` to `self._iterator` to avoid passing this argument in reinit. If we just change the sampler, this might yield unexpected behaviour.
2020-04-17T07:59:07Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 345, in ddp_train self.run_pretrain_routine(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 864, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 296, in train self.reset_train_dataloader(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/data_loading.py", line 128, in reset_train_dataloader self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/data_loading.py", line 112, in auto_add_sampler dataloader = type(dataloader)(**dl_args) File "../main/dataset.py", line 15, in __init__ super().__init__(*args, **kwargs) TypeError: __init__() got an unexpected keyword argument 'iterator'
128
Lightning-AI/lightning
Lightning-AI__lightning-1582
5ab5084f7b9e137c1e7769228aaed8da92eaad6e
diff --git a/pytorch_lightning/loggers/base.py b/pytorch_lightning/loggers/base.py --- a/pytorch_lightning/loggers/base.py +++ b/pytorch_lightning/loggers/base.py @@ -280,6 +280,7 @@ class LoggerCollection(LightningLoggerBase): Args: logger_iterable: An iterable collection of loggers """ + def __init__(self, logger_iterable: Iterable[LightningLoggerBase]): super().__init__() self._logger_iterable = logger_iterable @@ -347,20 +348,28 @@ def merge_dicts( Examples: >>> import pprint - >>> d1 = {'a': 1.7, 'b': 2.0, 'c': 1} - >>> d2 = {'a': 1.1, 'b': 2.2, 'v': 1} - >>> d3 = {'a': 1.1, 'v': 2.3} + >>> d1 = {'a': 1.7, 'b': 2.0, 'c': 1, 'd': {'d1': 1, 'd3': 3}} + >>> d2 = {'a': 1.1, 'b': 2.2, 'v': 1, 'd': {'d1': 2, 'd2': 3}} + >>> d3 = {'a': 1.1, 'v': 2.3, 'd': {'d3': 3, 'd4': {'d5': 1}}} >>> dflt_func = min - >>> agg_funcs = {'a': np.mean, 'v': max} + >>> agg_funcs = {'a': np.mean, 'v': max, 'd': {'d1': sum}} >>> pprint.pprint(merge_dicts([d1, d2, d3], agg_funcs, dflt_func)) - {'a': 1.3, 'b': 2.0, 'c': 1, 'v': 2.3} + {'a': 1.3, + 'b': 2.0, + 'c': 1, + 'd': {'d1': 3, 'd2': 3, 'd3': 3, 'd4': {'d5': 1}}, + 'v': 2.3} """ - + agg_key_funcs = agg_key_funcs or dict() keys = list(functools.reduce(operator.or_, [set(d.keys()) for d in dicts])) d_out = {} for k in keys: - fn = agg_key_funcs.get(k, default_func) if agg_key_funcs else default_func - agg_val = fn([v for v in [d_in.get(k) for d_in in dicts] if v is not None]) - d_out[k] = agg_val + fn = agg_key_funcs.get(k) + values_to_agg = [v for v in [d_in.get(k) for d_in in dicts] if v is not None] + + if isinstance(values_to_agg[0], dict): + d_out[k] = merge_dicts(values_to_agg, fn, default_func) + else: + d_out[k] = (fn or default_func)(values_to_agg) return d_out
After update from 0.5.x to 0.7.3 merge_dicts #1278 sometimes breaks training ## 🐛 Bug After I updated from a quite old lightning version to the newest one, I sometimes get a TypeError from merge_dicts. I guess it's related to this MR #1278 . This Type error is deterministic, meaning it always occurs at the same global step during training. It somehow seems to be related to val_check_interval as well. For some data changing this value leads to no Error. But for other datasets this does not work. Also this only happens during training step, I suspect the training step after validating. ### To Reproduce Steps to reproduce the behavior: I have no Idea. ``` File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 363, in train self.run_training_epoch() File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 470, in run_training_epoch self.log_metrics(batch_step_metrics, grad_norm_dic) File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/pytorch_lightning/trainer/logging.py", line 74, in log_metrics self.logger.agg_and_log_metrics(scalar_metrics, step=step) File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 128, in agg_and_log_metrics agg_step, metrics_to_log = self._aggregate_metrics(metrics=metrics, step=step) File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 101, in _aggregate_metrics agg_step, agg_mets = self._finalize_agg_metrics() File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 116, in _finalize_agg_metrics agg_mets = merge_dicts(self._metrics_to_agg, self._agg_key_funcs, self._agg_default_func) File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 347, in merge_dicts agg_val = fn([v for v in [d_in.get(k) for d_in in dicts] if v is not None]) File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 3118, in mean out=out, **kwargs) File "/home/sebastian/.cache/pypoetry/virtualenvs/forgerydetection-iC5ox0X1-py3.7/lib/python3.7/site-packages/numpy/core/_methods.py", line 75, in _mean ret = umr_sum(arr, axis, dtype, out, keepdims) TypeError: unsupported operand type(s) for +: 'dict' and 'dict' ``` Sometimes its also 'dict' and 'int' ### Expected behavior At least should not break training, but maybe a more verbose message what is wrong. Its quite hard for me to debug, as the structure of the logs I'm returning to lightning does not change. ### Environment ``` cuda: GPU: GeForce RTX 2080 Ti GeForce RTX 2080 Ti GeForce RTX 2080 Ti GeForce RTX 2080 Ti GeForce RTX 2080 Ti GeForce RTX 2080 Ti GeForce RTX 2080 Ti GeForce RTX 2080 Ti available: True version: 10.1.243 packages: numpy: 1.16.4 pyTorch_debug: False pyTorch_version: 1.3.0 pytorch-lightning: 0.7.3 tensorboard: 2.2.0 tqdm: 4.45.0 system: OS: Linux architecture: 64bit ELF processor: x86_64 python: 3.7.7 version: #97~16.04.1-Ubuntu SMP Wed Apr 1 03:03:31 UTC 2020 ``` ### Additional context Also for some reason some runs have an issue with multiprocessing, but it does not break the training: ``` Traceback (most recent call last):████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9/9 [00:00<00:00, 8.76it/s] File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/multiprocessing/util.py", line 277, in _run_finalizers finalizer() File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/multiprocessing/util.py", line 201, in __call__ res = self._callback(*self._args, **self._kwargs) File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/multiprocessing/util.py", line 110, in _remove_temp_dir rmtree(tempdir) File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/shutil.py", line 498, in rmtree onerror(os.rmdir, path, sys.exc_info()) File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/shutil.py", line 496, in rmtree os.rmdir(path) OSError: [Errno 39] Directory not empty: '/tmp/pymp-jcqai2xr' ```
Did you passed any 'agg_key_funcs' to the logger class? If I understand the code correctly, by default np.mean is used to aggregate the dict values returned during training. Maybe numpy tries in the mean function to *add* (+ func) values which can't be summed up? Can you maybe post the code snippets where you return the metrics to log in the lightning module and the initialization of the logger if you use one? If you don't use a logger, you can disable it by passing logger=False to the trainer (don't know if your previous version had logger on by default). Hope I can help :) Thanks for the quick reply! No I'm not using any 'agg_key_funcs' that I know of. > If I understand the code correctly, by default np.mean is used to aggregate the dict values returned during training. This only happens when there is a step in time where two times stuff is logged, right? So my guess is that at some point that is the case that two logs have to be "unified" but this fails, because I'm using "dict in dicts". I need this tho, because I want to have i.e. loss train and val in the same graph. I'm using the TestTubeLogger: ` logger = TestTubeLogger(save_dir=log_dir, name=name, description=description) ` and just pass this to the Trainer. The metric logging to lightning is a bit scattered: 1. train_step in model: ``` x, target = batch pred = self.forward(x) loss = self.loss(pred, target) lightning_log = {"loss": loss} with torch.no_grad(): train_acc = self.calculate_accuracy(pred, target) tensorboard_log = {"loss": loss, "acc": train_acc} return tensorboard_log, lightning_log ``` 2. this is passed to a function that lets me add train and val to same graph: ``` def _construct_lightning_log( self, tensorboard_log: dict, lightning_log: dict = None, suffix: str = "train", prefix: str = "metrics", ): lightning_log = lightning_log or {} fixed_log = {} for metric, value in tensorboard_log.items(): if isinstance(value, dict): fixed_log[f"{prefix}/{metric}"] = value else: fixed_log[f"{prefix}/{metric}"] = {suffix: value} return {"log": fixed_log, **lightning_log} ``` Do you pass it after training_step or training_epoch_end? I think lightning collects your logs and tries to aggregate it to one value. I can't test it now. Maybe tomorrow. But when I quickly type this into python interpreter: ``` >>> d={} >>> np.mean([d,d]) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<__array_function__ internals>", line 5, in mean File "/usr/lib/python3.8/site-packages/numpy/core/fromnumeric.py", line 3334, in mean return _methods._mean(a, axis=axis, dtype=dtype, File "/usr/lib/python3.8/site-packages/numpy/core/_methods.py", line 151, in _mean ret = umr_sum(arr, axis, dtype, out, keepdims) TypeError: unsupported operand type(s) for +: 'dict' and 'dict' ``` Seems like getting your error. Maybe print what you exactly return and when it crashes. When I have time tomorrow, I will also make some tests. After training_step. I not have a training_epoch_end or training_end method defined. > I think lightning collects your logs and tries to aggregate it to one value. Yes I think so as well. Ok I return something like this: `{'metrics/aud_std': {'test': tensor(1.6337, device='cuda:0')}, 'metrics/class_loss_diff': {'test': tensor(nan)}, 'metrics/class_loss_val': {'0': tensor(nan), '1': tensor(91.5485)}, 'metrics/loss': {'test': tensor(45.7742, device='cuda:0')}, 'metrics/vid_std': {'test': tensor(1.6506, device='cuda:0')}}` What do you mean by when it crashes exactly? I think when it crashes it's always the train step after an validation step (keep in mind I'm validation several times during one epoch). If I change the val_check_interval the error either disappears or happens at a different batch number. Hello. I think the problem is in your metrics type. Metrics must have the `Dict[str, float]` type. But in your case, the `metrics` is a nested dict. So, that's why values are failed to be aggregated. Is it possible for you to flatten the dictionary? @alexeykarnachev Hey! Ah yes that's what I thought. Do you know why the metrics dict is enforced to be of this type? In 0.5.x this was not an issue as far as I know. I mean, yes I can flatten it but I want to have i.e. val/loss and train/loss in the same graph. It's basically this: https://pytorch.org/docs/stable/tensorboard.html#torch.utils.tensorboard.writer.SummaryWriter.add_scalars I know that here https://github.com/PyTorchLightning/pytorch-lightning/issues/1144#issuecomment-599089378 It was said that this should not be done, but for me this is essential. Is there a way that I can overwrite the merge_dicts function? If so how would I do that? @fellnerse Okay, I got your point, let's ask Borda's advice) @Borda, what do you think? Is it possible to combine nested metrics dictionaries with metrics aggregation logic? At first sight, it doesn't look like a big problem. Maybe you can see any side effects of tracking aggregated metrics with nested dictionaries? If no, I can try to fix this issue I ques it can be used, just need to care about the depth and the aggregation will be a bit complicated...
2020-04-23T20:27:40Z
[]
[]
Traceback (most recent call last):████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9/9 [00:00<00:00, 8.76it/s] File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/multiprocessing/util.py", line 277, in _run_finalizers finalizer() File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/multiprocessing/util.py", line 201, in __call__ res = self._callback(*self._args, **self._kwargs) File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/multiprocessing/util.py", line 110, in _remove_temp_dir rmtree(tempdir) File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/shutil.py", line 498, in rmtree onerror(os.rmdir, path, sys.exc_info()) File "/home/sebastian/.pyenv/versions/3.7.7/lib/python3.7/shutil.py", line 496, in rmtree os.rmdir(path) OSError: [Errno 39] Directory not empty: '/tmp/pymp-jcqai2xr'
140
Lightning-AI/lightning
Lightning-AI__lightning-1589
79196246cfcc73391de1be71bfb27d4366daf75a
diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -461,10 +461,15 @@ def __transfer_data_to_device(self, batch, device, gpu_id=None): # when tuple if isinstance(batch, tuple): - batch = list(batch) - for i, x in enumerate(batch): - batch[i] = self.__transfer_data_to_device(x, device, gpu_id) - return tuple(batch) + # when namedtuple + if hasattr(batch, '_fields'): + elem_type = type(batch) + return elem_type(*(self.__transfer_data_to_device(x, device, gpu_id) for x in batch)) + else: + batch = list(batch) + for i, x in enumerate(batch): + batch[i] = self.__transfer_data_to_device(x, device, gpu_id) + return tuple(batch) # when dict if isinstance(batch, dict):
Named converted to regular tuples when sent to the gpu. <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> Named tuples returned from `Dataset` get converted to regular tuples when sent to the gpu. This happens because `isinstance(instance_of_a_named_tuple, tuple)` evaluates to True in `distrib_parts.py` https://github.com/PyTorchLightning/pytorch-lightning/blob/67d5f4dc392250d23bfeb11aba45e919a99ff1c0/pytorch_lightning/trainer/distrib_parts.py#L463 ### To Reproduce ```python import pytorch_lightning as pl from collections import namedtuple import torch import numpy NamedTupleDemoInput = namedtuple('DemoInput', ['x1', 'x2', 'y']) class NamedTupleDemoDataset: def __len__(self): return 30000 def __getitem__(self, index): x1 = numpy.random.uniform(0, 100) x2 = numpy.random.uniform(0, 100) y = 2*x1 + 3*x2 + numpy.random.normal(0, 0.05) return NamedTupleDemoInput(x1, x2, y) class WeightedSum(torch.nn.Module): def __init__(self): super(WeightedSum, self).__init__() self.a = torch.nn.Parameter(torch.zeros(1)) self.b = torch.nn.Parameter(torch.zeros(1)) def forward(self, x1, x2): return self.a * x1 + self.b * x2 class NamedTupleDemo(pl.LightningModule): def __init__(self): super(NamedTupleDemo, self).__init__() self.model = WeightedSum() def forward(self, x1, x2): return self.model(x1, x2) def train_dataloader(self): return torch.utils.data.DataLoader(NamedTupleDemoDataset(), batch_size=128) def training_step(self, batch, batch_index): yhat = self.forward(batch.x1, batch.x2) return {'loss': torch.nn.functional.mse_loss(batch.y, yhat)} def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=1e-2) if __name__ == '__main__': module = NamedTupleDemo() pl.Trainer(max_epochs=20, gpus=1).fit(module) print(f'a={float(module.model.a)} b={float(module.model.b)}') ``` <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> ``` Traceback (most recent call last): File "demo.py", line 48, in <module> pl.Trainer(max_epochs=20, gpus=1).fit(module) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 749, in fit self.single_gpu_train(model) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py", line 491, in single_gpu_train self.run_pretrain_routine(model) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 910, in run_pretrain_routine self.train() File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 384, in train self.run_training_epoch() File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 456, in run_training_epoch _outputs = self.run_training_batch(batch, batch_idx) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 633, in run_training_batch loss, batch_output = optimizer_closure() File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 597, in optimizer_closure output_dict = self.training_forward(split_batch, batch_idx, opt_idx, self.hiddens) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 770, in training_forward output = self.model.training_step(*args) File "demo.py", line 40, in training_step yhat = self.forward(batch.x1, batch.x2) AttributeError: 'tuple' object has no attribute 'x1' ``` <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> ### Expected behavior Namedtuples returned from the dataset should be keep their original fields. ### Environment * CUDA: - GPU: - GeForce RTX 2080 Ti - available: True - version: 10.2 * Packages: - numpy: 1.18.3 - pyTorch_debug: False - pyTorch_version: 1.5.0 - pytorch-lightning: 0.7.4rc5 - tensorboard: 2.2.1 - tqdm: 4.45.0 * System: - OS: Linux - architecture: - 64bit - ELF - processor: - python: 3.8.2 - version: #1 SMP PREEMPT Sun, 05 Apr 2020 05:13:14 +0000 <!-- Add any other context about the problem here. -->
2020-04-24T03:49:56Z
[]
[]
Traceback (most recent call last): File "demo.py", line 48, in <module> pl.Trainer(max_epochs=20, gpus=1).fit(module) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 749, in fit self.single_gpu_train(model) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py", line 491, in single_gpu_train self.run_pretrain_routine(model) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 910, in run_pretrain_routine self.train() File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 384, in train self.run_training_epoch() File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 456, in run_training_epoch _outputs = self.run_training_batch(batch, batch_idx) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 633, in run_training_batch loss, batch_output = optimizer_closure() File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 597, in optimizer_closure output_dict = self.training_forward(split_batch, batch_idx, opt_idx, self.hiddens) File "/home/n/repos/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 770, in training_forward output = self.model.training_step(*args) File "demo.py", line 40, in training_step yhat = self.forward(batch.x1, batch.x2) AttributeError: 'tuple' object has no attribute 'x1'
141
Lightning-AI/lightning
Lightning-AI__lightning-2014
8b9b923ca8ad9fdb0ae22928de0029e7c2e7a782
diff --git a/pl_examples/domain_templates/computer_vision_fine_tuning.py b/pl_examples/domain_templates/computer_vision_fine_tuning.py --- a/pl_examples/domain_templates/computer_vision_fine_tuning.py +++ b/pl_examples/domain_templates/computer_vision_fine_tuning.py @@ -450,5 +450,4 @@ def get_args() -> argparse.Namespace: if __name__ == '__main__': - main(get_args()) diff --git a/pl_examples/domain_templates/generative_adversarial_net.py b/pl_examples/domain_templates/generative_adversarial_net.py --- a/pl_examples/domain_templates/generative_adversarial_net.py +++ b/pl_examples/domain_templates/generative_adversarial_net.py @@ -7,7 +7,7 @@ tensorboard --logdir default """ import os -from argparse import ArgumentParser +from argparse import ArgumentParser, Namespace from collections import OrderedDict import numpy as np @@ -183,7 +183,7 @@ def on_epoch_end(self): self.logger.experiment.add_image('generated_images', grid, self.current_epoch) -def main(args): +def main(args: Namespace) -> None: # ------------------------ # 1 INIT LIGHTNING MODEL # ------------------------ diff --git a/pl_examples/domain_templates/imagenet.py b/pl_examples/domain_templates/imagenet.py --- a/pl_examples/domain_templates/imagenet.py +++ b/pl_examples/domain_templates/imagenet.py @@ -1,7 +1,7 @@ """ This example is largely adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py """ -import argparse +from argparse import ArgumentParser, Namespace import os import random from collections import OrderedDict @@ -183,7 +183,7 @@ def val_dataloader(self): @staticmethod def add_model_specific_args(parent_parser): # pragma: no-cover - parser = argparse.ArgumentParser(parents=[parent_parser]) + parser = ArgumentParser(parents=[parent_parser]) parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', choices=MODEL_NAMES, help='model architecture: ' + ' | '.join(MODEL_NAMES) + @@ -210,7 +210,7 @@ def add_model_specific_args(parent_parser): # pragma: no-cover def get_args(): - parent_parser = argparse.ArgumentParser(add_help=False) + parent_parser = ArgumentParser(add_help=False) parent_parser.add_argument('--data-path', metavar='DIR', type=str, help='path to dataset') parent_parser.add_argument('--save-path', metavar='DIR', default=".", type=str, @@ -228,20 +228,23 @@ def get_args(): return parser.parse_args() -def main(hparams): - model = ImageNetLightningModel(hparams) - if hparams.seed is not None: - random.seed(hparams.seed) - torch.manual_seed(hparams.seed) +def main(args: Namespace) -> None: + model = ImageNetLightningModel(**vars(args)) + + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) cudnn.deterministic = True + trainer = pl.Trainer( - default_root_dir=hparams.save_path, - gpus=hparams.gpus, - max_epochs=hparams.epochs, - distributed_backend=hparams.distributed_backend, - precision=16 if hparams.use_16bit else 32, + default_root_dir=args.save_path, + gpus=args.gpus, + max_epochs=args.epochs, + distributed_backend=args.distributed_backend, + precision=16 if args.use_16bit else 32, ) - if hparams.evaluate: + + if args.evaluate: trainer.run_evaluation() else: trainer.fit(model)
Bug in GAN example Bug in https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pl_examples/domain_templates/generative_adversarial_net.py When I run `python generative_adversarial_net.py ` I get ``` Traceback (most recent call last): File "generative_adversarial_net.py", line 218, in <module> main(hparams) File "generative_adversarial_net.py", line 192, in main model = GAN(hparams) File "generative_adversarial_net.py", line 90, in __init__ self.generator = Generator(latent_dim=self.latent_dim, img_shape=mnist_shape) File "generative_adversarial_net.py", line 39, in __init__ *block(latent_dim, 128, normalize=False), File "generative_adversarial_net.py", line 32, in block layers = [nn.Linear(in_feat, out_feat)] File "/home/vladimir/anaconda3/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 72, in __init__ self.weight = Parameter(torch.Tensor(out_features, in_features)) TypeError: new(): argument 'size' must be tuple of ints, but found element of type Namespace at pos 2 ```
Replace with `model = GAN(**vars(hparams))` [here](https://github.com/PyTorchLightning/pytorch-lightning/blob/fdbbe968256f6c68a5dbb840a2004b77a618ef61/pl_examples/domain_templates/generative_adversarial_net.py#L192). Same bug in [imagenet script](https://github.com/PyTorchLightning/pytorch-lightning/blob/fdbbe968256f6c68a5dbb840a2004b77a618ef61/pl_examples/domain_templates/imagenet.py#L232) also. @ternaus @rohitgr7 mind submitting a PR to fix? :)
2020-05-30T12:26:09Z
[]
[]
Traceback (most recent call last): File "generative_adversarial_net.py", line 218, in <module> main(hparams) File "generative_adversarial_net.py", line 192, in main model = GAN(hparams) File "generative_adversarial_net.py", line 90, in __init__ self.generator = Generator(latent_dim=self.latent_dim, img_shape=mnist_shape) File "generative_adversarial_net.py", line 39, in __init__ *block(latent_dim, 128, normalize=False), File "generative_adversarial_net.py", line 32, in block layers = [nn.Linear(in_feat, out_feat)] File "/home/vladimir/anaconda3/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 72, in __init__ self.weight = Parameter(torch.Tensor(out_features, in_features)) TypeError: new(): argument 'size' must be tuple of ints, but found element of type Namespace at pos 2
177
Lightning-AI/lightning
Lightning-AI__lightning-2115
0bd7780adc4d68007946cf380a6a24e1a08d99d1
diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -139,6 +139,7 @@ def _get_distributed_sampler(self, dataloader): else: world_size = { 'ddp': self.num_nodes * self.num_processes, + 'ddp_spawn': self.num_nodes * self.num_processes, 'ddp2': self.num_nodes, 'ddp_cpu': self.num_processes * self.num_nodes } diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -221,7 +221,7 @@ def set_distributed_mode(self, distributed_backend): elif self.num_gpus > 1: self.use_dp = True - elif distributed_backend == "ddp": + elif distributed_backend in ['ddp', 'ddp_spawn']: if self.num_gpus == 0: if self.num_nodes > 1 or self.num_processes > 1: self.use_ddp = True # ddp_cpu @@ -378,6 +378,7 @@ def spawn_ddp_children(self, model): self.interactive_ddp_procs = [] for local_rank in range(1, self.num_processes): + print('launching local_rank', local_rank) env_copy = os.environ.copy() env_copy['LOCAL_RANK'] = f'{local_rank}' @@ -394,7 +395,7 @@ def spawn_ddp_children(self, model): local_rank = 0 self.ddp_train(local_rank, model, is_master=True) - def ddp_train(self, process_idx, model, is_master=False): + def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): """ Entry point into a DP thread :param gpu_idx: @@ -402,6 +403,9 @@ def ddp_train(self, process_idx, model, is_master=False): :param cluster_obj: :return: """ + # offset the process id if requested + process_idx = process_idx + proc_offset + # show progressbar only on progress_rank 0 if (self.node_rank != 0 or process_idx != 0) and self.progress_bar_callback is not None: self.progress_bar_callback.disable() @@ -454,7 +458,7 @@ def ddp_train(self, process_idx, model, is_master=False): self.reinit_scheduler_properties(self.optimizers, self.lr_schedulers) # DDP2 uses all GPUs on the machine - if self.distributed_backend == 'ddp': + if self.distributed_backend == 'ddp' or self.distributed_backend == 'ddp_spawn': device_ids = [self.root_gpu] elif self.use_ddp2: device_ids = self.data_parallel_device_ids diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -246,7 +246,7 @@ def __init__( Use `row_log_interval` instead. Will remove 0.9.0. - distributed_backend: The distributed backend to use. + distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn) use_amp: .. warning:: .. deprecated:: 0.7.0 @@ -876,9 +876,16 @@ def fit( self.ddp_train(task, model) elif self.distributed_backend == 'cpu_ddp': + self.__set_random_port() self.model = model mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) + elif self.distributed_backend == 'ddp_spawn': + model.share_memory() + + # spin up peers + mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) + elif self.distributed_backend == 'ddp': self.spawn_ddp_children(model)
verify ddp and ddp_spawn implementation CUDA error: an illegal memory access was encountered after updating to the latest stable packages Can anyone help with this CUDA error: an illegal memory access was encountered ?? It runs fine for several iterations... ## 🐛 Bug ``` Traceback (most recent call last): File "train_gpu.py", line 237, in <module> main_local(hparam_trial) File "train_gpu.py", line 141, in main_local trainer.fit(model) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 859, in fit self.single_gpu_train(model) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 503, in single_gpu_train self.run_pretrain_routine(model) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1015, in run_pretrain_routine self.train() File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 347, in train self.run_training_epoch() File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 419, in run_training_epoch _outputs = self.run_training_batch(batch, batch_idx) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 604, in run_training_batch self.batch_loss_value.append(loss) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/supporters.py", line 44, in append x = x.to(self.memory) RuntimeError: CUDA error: an illegal memory access was encountered ``` ### To Reproduce ### Environment * CUDA: - GPU: - Quadro P6000 - available: True - version: 10.2 * Packages: - numpy: 1.18.1 - pyTorch_debug: False - pyTorch_version: 1.5.0 - pytorch-lightning: 0.7.6 - tensorboard: 2.2.2 - tqdm: 4.46.1 * System: - OS: Linux - architecture: - 64bit - - processor: x86_64 - python: 3.7.0 - version: #47~18.04.1-Ubuntu SMP Thu May 7 13:10:50 UTC 2020
2020-06-08T15:37:16Z
[]
[]
Traceback (most recent call last): File "train_gpu.py", line 237, in <module> main_local(hparam_trial) File "train_gpu.py", line 141, in main_local trainer.fit(model) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 859, in fit self.single_gpu_train(model) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 503, in single_gpu_train self.run_pretrain_routine(model) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1015, in run_pretrain_routine self.train() File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 347, in train self.run_training_epoch() File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 419, in run_training_epoch _outputs = self.run_training_batch(batch, batch_idx) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 604, in run_training_batch self.batch_loss_value.append(loss) File "/shared/storage/cs/staffstore/username/anaconda3/envs/sh1/lib/python3.7/site-packages/pytorch_lightning/trainer/supporters.py", line 44, in append x = x.to(self.memory) RuntimeError: CUDA error: an illegal memory access was encountered
188
Lightning-AI/lightning
Lightning-AI__lightning-2216
e780072961562ab1d89bad871918fcc422ad0ac6
diff --git a/pytorch_lightning/loggers/base.py b/pytorch_lightning/loggers/base.py --- a/pytorch_lightning/loggers/base.py +++ b/pytorch_lightning/loggers/base.py @@ -3,13 +3,11 @@ import operator from abc import ABC, abstractmethod from argparse import Namespace -from typing import Union, Optional, Dict, Iterable, Any, Callable, List, Sequence, Mapping, Tuple +from typing import Union, Optional, Dict, Iterable, Any, Callable, List, Sequence, Mapping, Tuple, MutableMapping import numpy as np import torch -from pytorch_lightning.utilities import rank_zero_only - class LightningLoggerBase(ABC): """ @@ -174,9 +172,9 @@ def _flatten_dict(params: Dict[str, Any], delimiter: str = '/') -> Dict[str, Any def _dict_generator(input_dict, prefixes=None): prefixes = prefixes[:] if prefixes else [] - if isinstance(input_dict, dict): + if isinstance(input_dict, MutableMapping): for key, value in input_dict.items(): - if isinstance(value, (dict, Namespace)): + if isinstance(value, (MutableMapping, Namespace)): value = vars(value) if isinstance(value, Namespace) else value for d in _dict_generator(value, prefixes + [key]): yield d
Hydra MLFlow Clash <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug When using the MLFlow logger with Hydra, because the parameters passed to the LightningModule is a `DictConfig`, the condition in the `logger/base.py` is not met. https://github.com/PyTorchLightning/pytorch-lightning/blob/8211256c46430e43e0c27e4f078c72085bb4ea34/pytorch_lightning/loggers/base.py#L177 ### To Reproduce Use Hydra and MLFlow together. <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> ```python Traceback (most recent call last): File "/home/siavash/KroniKare/kwae2/kwae_ma/models/pl_train_segmentation_model.py", line 115, in <module> main() File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/main.py", line 24, in decorated_main strict=strict, File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/_internal/utils.py", line 174, in run_hydra overrides=args.overrides, File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/_internal/hydra.py", line 86, in run job_subdir_key=None, File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/plugins/common/utils.py", line 109, in run_job ret.return_value = task_function(task_cfg) File "/home/siavash/KroniKare/kwae2/kwae_ma/models/pl_train_segmentation_model.py", line 111, in main trainer.fit(wound_seg_pl) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 765, in fit self.single_gpu_train(model) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 492, in single_gpu_train self.run_pretrain_routine(model) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 843, in run_pretrain_routine self.logger.log_hyperparams(ref_model.hparams) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 275, in log_hyperparams [logger.log_hyperparams(params) for logger in self._logger_iterable] File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 275, in <listcomp> [logger.log_hyperparams(params) for logger in self._logger_iterable] File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/utilities/distributed.py", line 10, in wrapped_fn return fn(*args, **kwargs) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/loggers/mlflow.py", line 105, in log_hyperparams self.experiment.log_param(self.run_id, k, v) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/mlflow/tracking/client.py", line 206, in log_param self._tracking_client.log_param(run_id, key, value) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/mlflow/tracking/_tracking_service/client.py", line 177, in log_param _validate_param_name(key) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/mlflow/utils/validation.py", line 120, in _validate_param_name INVALID_PARAMETER_VALUE) mlflow.exceptions.MlflowException: Invalid parameter name: ''. Names may be treated as files in certain cases, and must not resolve to other names when treated as such. This name would resolve to '.' ``` ### Expected behavior Check whether the instance if `dict` or `DictConfig` in the given line.
Hi! thanks for your contribution!, great first issue! > Check whether the instance if `dict` or `DictConfig` in the given line. @ssakhavi that sounds reasonable solution, mind sending a PR - fix and its test?
2020-06-17T03:24:11Z
[]
[]
Traceback (most recent call last): File "/home/siavash/KroniKare/kwae2/kwae_ma/models/pl_train_segmentation_model.py", line 115, in <module> main() File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/main.py", line 24, in decorated_main strict=strict, File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/_internal/utils.py", line 174, in run_hydra overrides=args.overrides, File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/_internal/hydra.py", line 86, in run job_subdir_key=None, File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/hydra/plugins/common/utils.py", line 109, in run_job ret.return_value = task_function(task_cfg) File "/home/siavash/KroniKare/kwae2/kwae_ma/models/pl_train_segmentation_model.py", line 111, in main trainer.fit(wound_seg_pl) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 765, in fit self.single_gpu_train(model) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 492, in single_gpu_train self.run_pretrain_routine(model) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 843, in run_pretrain_routine self.logger.log_hyperparams(ref_model.hparams) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 275, in log_hyperparams [logger.log_hyperparams(params) for logger in self._logger_iterable] File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/loggers/base.py", line 275, in <listcomp> [logger.log_hyperparams(params) for logger in self._logger_iterable] File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/utilities/distributed.py", line 10, in wrapped_fn return fn(*args, **kwargs) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/pytorch_lightning/loggers/mlflow.py", line 105, in log_hyperparams self.experiment.log_param(self.run_id, k, v) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/mlflow/tracking/client.py", line 206, in log_param self._tracking_client.log_param(run_id, key, value) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/mlflow/tracking/_tracking_service/client.py", line 177, in log_param _validate_param_name(key) File "/home/siavash/anaconda3/envs/kwae-ma/lib/python3.7/site-packages/mlflow/utils/validation.py", line 120, in _validate_param_name INVALID_PARAMETER_VALUE) mlflow.exceptions.MlflowException: Invalid parameter name: ''. Names may be treated as files in certain cases, and must not resolve to other names when treated as such. This name would resolve to '.'
201
Lightning-AI/lightning
Lightning-AI__lightning-2255
b5a2f1ec4463064394dc6d977ffd246aa11158af
diff --git a/pl_examples/basic_examples/gpu_template.py b/pl_examples/basic_examples/gpu_template.py --- a/pl_examples/basic_examples/gpu_template.py +++ b/pl_examples/basic_examples/gpu_template.py @@ -23,7 +23,7 @@ def main(hparams): # ------------------------ # 1 INIT LIGHTNING MODEL # ------------------------ - model = LightningTemplateModel(hparams) + model = LightningTemplateModel(**vars(hparams)) # ------------------------ # 2 INIT TRAINER @@ -61,7 +61,7 @@ def main(hparams): '--distributed_backend', type=str, default='dp', - help='supports three options dp, ddp, ddp2' + help='supports four options dp, ddp, ddp2, ddp_spawn' ) parent_parser.add_argument( '--use_16bit',
CPU/GPU Template ## 🐛 Bug The GPU or CPU template do not run currently on master after changes including the setup hook. ``` python -m pl_examples.basic_examples.gpu_template --gpus 4 --distributed_backend ddp python -m pl_examples.basic_examples.cpu_template ``` CPU Template Error: ``` Traceback (most recent call last): File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/anthony/Downloads/pytorch-lightning/pl_examples/basic_examples/cpu_template.py", line 53, in <module> main(args) File "/home/anthony/Downloads/pytorch-lightning/pl_examples/basic_examples/cpu_template.py", line 34, in main trainer.fit(model) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 952, in fit self.run_pretrain_routine(model) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 1063, in run_pretrain_routine self.reset_val_dataloader(ref_model) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/data_loading.py", line 331, in reset_val_dataloader self._reset_eval_dataloader(model, 'val') File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/data_loading.py", line 253, in _reset_eval_dataloader dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader')) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/data_loading.py", line 352, in request_dataloader dataloader = dataloader_fx() File "/home/anthony/Downloads/pytorch-lightning/pl_examples/models/lightning_template.py", line 158, in val_dataloader return DataLoader(self.mnist_test, batch_size=self.batch_size, num_workers=4) File "/home/anthony/.cache/pypoetry/virtualenvs/robotics-zp-60jGk-py3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 594, in __getattr__ type(self).__name__, name)) AttributeError: 'LightningTemplateModel' object has no attribute 'mnist_test' ``` GPU Template Error: ``` File "/home/anthony/Downloads/pytorch-lightning/pl_examples/models/lightning_template.py", line 64, in __init__ self.c_d1_drop = nn.Dropout(self.drop_prob) File "/home/anthony/.cache/pypoetry/virtualenvs/robotics-zp-60jGk-py3.6/lib/python3.6/site-packages/torch/nn/modules/dropout.py", line 10, in __init__ if p < 0 or p > 1: TypeError: '<' not supported between instances of 'Namespace' and 'int' ``` ### Environment * CUDA: - GPU: - GeForce RTX 2080 Ti - GeForce RTX 2080 Ti - GeForce RTX 2080 Ti - GeForce RTX 2080 Ti - available: True - version: 10.2 * Packages: - numpy: 1.18.4 - pyTorch_debug: False - pyTorch_version: 1.5.0 - pytorch-lightning: 0.8.0 - tensorboard: 2.2.1 - tqdm: 4.46.0 * System: - OS: Linux - architecture: - 64bit - ELF - processor: x86_64 - python: 3.6.8 - version: #44~18.04.2-Ubuntu SMP Thu Apr 23 14:27:18 UTC 2020
try again? > try again? it is in master now... :(
2020-06-19T02:43:10Z
[]
[]
Traceback (most recent call last): File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/anthony/Downloads/pytorch-lightning/pl_examples/basic_examples/cpu_template.py", line 53, in <module> main(args) File "/home/anthony/Downloads/pytorch-lightning/pl_examples/basic_examples/cpu_template.py", line 34, in main trainer.fit(model) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 952, in fit self.run_pretrain_routine(model) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 1063, in run_pretrain_routine self.reset_val_dataloader(ref_model) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/data_loading.py", line 331, in reset_val_dataloader self._reset_eval_dataloader(model, 'val') File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/data_loading.py", line 253, in _reset_eval_dataloader dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader')) File "/home/anthony/Downloads/pytorch-lightning/pytorch_lightning/trainer/data_loading.py", line 352, in request_dataloader dataloader = dataloader_fx() File "/home/anthony/Downloads/pytorch-lightning/pl_examples/models/lightning_template.py", line 158, in val_dataloader return DataLoader(self.mnist_test, batch_size=self.batch_size, num_workers=4) File "/home/anthony/.cache/pypoetry/virtualenvs/robotics-zp-60jGk-py3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 594, in __getattr__ type(self).__name__, name)) AttributeError: 'LightningTemplateModel' object has no attribute 'mnist_test'
209
Lightning-AI/lightning
Lightning-AI__lightning-2293
3256fe4e5a405db1ab00d4cf4d48cbbfc7730959
diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -52,6 +52,8 @@ def _has_len(dataloader: DataLoader) -> bool: return True except TypeError: return False + except NotImplementedError: # e.g. raised by torchtext if a batch_size_fn is used + return False class TrainerDataLoadingMixin(ABC):
_has_len does not handle NotImplementedError (raised by torchtext) <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug When using torchtext.data.Iterator with a batch_size_fn function the __len__ function raises a NotImplementedError which is not caught by _has_len function. A bug-fix is **very simple** by just returning False if a NotImplementedError is raised. This is unlikely to have any negative side effects since it corresponds with what _hads_len is expected to do. The fix allowed me to train my model using torch text. I plan to submit a pull request with the fix above. There are no additional dependencies required; however this problem occurred when using torchtext. Example stack trace: ``` Traceback (most recent call last): File "/Users/thomas/scm/OakDataPrep/oakSkipThoughtTrainer.py", line 18, in <module> trainer.fit(model) File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 952, in fit self.run_pretrain_routine(model) File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1091, in run_pretrain_routine self.train() File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 334, in train self.reset_train_dataloader(model) File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/data_loading.py", line 201, in reset_train_dataloader if not _has_len(self.train_dataloader): File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/data_loading.py", line 49, in _has_len if len(dataloader) == 0: File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/torchtext/data/iterator.py", line 136, in __len__ raise NotImplementedError NotImplementedError ``` ### To Reproduce Sorry I currently don't have a minimal example. The issue will always occur when torchtext.data.Iterator gets a batch_size_fn passed in. If the fix is not convincing I can take the time and construct a code example. Hope this is not necessary. #### Code sample I created my own Iterator for a Skip-Thought model, that dynamically batches sentences together. This might be unnecessary complex, or even not really useful however it revealed that issue described above when using torchtext. For context here is a code excerpt that creates the issue: ``` import torchtext ... global max_src_in_batch, max_tgt_in_batch def batch_size_fn(new, count, sofar): "Keep augmenting batch and calculate total number of tokens + padding." global max_src_in_batch, max_tgt_in_batch if count == 1: max_src_in_batch = 0 max_tgt_in_batch = 0 max_src_in_batch = max(max_src_in_batch, len(new.current)) max_tgt_in_batch = max(max_tgt_in_batch, len(new.next) + 2) src_elements = count * max_src_in_batch tgt_elements = count * max_tgt_in_batch return max(src_elements, tgt_elements) class MyIterator(torchtext.data.Iterator): def create_batches(self): if self.train: def pool(d, random_shuffler): for p in data.batch(d, self.batch_size * 100): p_batch = data.batch( sorted(p, key=self.sort_key), self.batch_size, self.batch_size_fn) for b in random_shuffler(list(p_batch)): yield b self.batches = pool(self.data(), self.random_shuffler) else: self.batches = [] for b in data.batch(self.data(), self.batch_size, self.batch_size_fn): self.batches.append(sorted(b, key=self.sort_key)) ... class SkipThoughts(pl.LightningModule): ... @pl.data_loader def train_dataloader(self): train_iter = MyIterator(self.my_train_dataloader, batch_size=self.batch_size, repeat=False, sort_key=lambda x: data.interleave_keys(len(x.current), data.interleave_keys(len(x.prev), len(x.next))), batch_size_fn=batch_size_fn, train=True, shuffle=True) return train_iter ``` But this happens whenever a batch_size_fn is used in torchtext. Because it is unknown how many batches the data set will have torchtext __len__ method returns a NotImplementedError. See code snipped below: ``` def __len__(self): if self.batch_size_fn is not None: raise NotImplementedError return math.ceil(len(self.dataset) / self.batch_size) ``` ### Expected behavior The function _has_len tests if len can is available and then returns True, otherwise False. It shoudl return False if NotImplementedError is raised. ### Environment /Users/thomas/virtualenv/Python3/PyTorch/env/bin/python /Users/thomas/scm/OakDataPrep/collect_env_details.py * CUDA: - GPU: - available: False - version: None * Packages: - numpy: 1.18.2 - pyTorch_debug: False - pyTorch_version: 1.5.0 - pytorch-lightning: 0.8.0 - tensorboard: 2.2.0 - tqdm: 4.45.0 * System: - OS: Darwin - architecture: - 64bit - - processor: i386 - python: 3.7.7 - version: Darwin Kernel Version 19.5.0: Tue May 26 20:41:44 PDT 2020; root:xnu-6153.121.2~2/RELEASE_X86_64 Process finished with exit code 0 ### Additional context Issue occur with Pytorch-Lighning 0.8 and Torchtext 0.6 <!-- Add any other context about the problem here. -->
2020-06-19T23:57:59Z
[]
[]
Traceback (most recent call last): File "/Users/thomas/scm/OakDataPrep/oakSkipThoughtTrainer.py", line 18, in <module> trainer.fit(model) File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 952, in fit self.run_pretrain_routine(model) File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1091, in run_pretrain_routine self.train() File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 334, in train self.reset_train_dataloader(model) File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/data_loading.py", line 201, in reset_train_dataloader if not _has_len(self.train_dataloader): File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/pytorch_lightning/trainer/data_loading.py", line 49, in _has_len if len(dataloader) == 0: File "/Users/thomas/virtualenv/Python3/PyTorch/env/lib/python3.7/site-packages/torchtext/data/iterator.py", line 136, in __len__ raise NotImplementedError NotImplementedError
213
Lightning-AI/lightning
Lightning-AI__lightning-2356
220bb6db57e7181e857a128e245ce242b6cf429f
diff --git a/pytorch_lightning/trainer/optimizers.py b/pytorch_lightning/trainer/optimizers.py --- a/pytorch_lightning/trainer/optimizers.py +++ b/pytorch_lightning/trainer/optimizers.py @@ -111,15 +111,25 @@ def configure_schedulers(self, schedulers: list): def reinit_scheduler_properties(self, optimizers: list, schedulers: list): # Reinitialize optimizer.step properties added by schedulers for scheduler in schedulers: + scheduler = scheduler['scheduler'] + for optimizer in optimizers: - scheduler = scheduler['scheduler'] # check that we dont mix users optimizers and schedulers if scheduler.optimizer == optimizer: # Find the mro belonging to the base lr scheduler class for i, mro in enumerate(scheduler.__class__.__mro__): - if mro == optim.lr_scheduler._LRScheduler: + if ( + mro == optim.lr_scheduler._LRScheduler + or mro == optim.lr_scheduler.ReduceLROnPlateau + ): idx = i - scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) + state = scheduler.state_dict() + else: + state = None + + scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) + if state is not None: + scheduler.load_state_dict(state) class _MockOptimizer(Optimizer):
Trainer(precision=16) fails with optim.lr_scheduler.ReduceLROnPlateau <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ### To Reproduce Steps to reproduce the behavior: 1. Create a `pl.LightningModule` that returns your optimizer along with a `optim.lr_scheduler.ReduceLROnPlateau` scheduler from `configure_optimizers` 2. Create a `pl.Trainer` wit `precision=16` 3. Run your training (i.e., `trainer.fit(model)`) 4. See error ```console Traceback (most recent call last): File "main.py", line 65, in <module> main() File "main.py", line 61, in main trainer.fit(model) File "/workspace/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 889, in fit self.dp_train(model) File "/workspace/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py", line 223, in dp_train self.reinit_scheduler_properties(optimizers, self.lr_schedulers) File "/workspace/pytorch-lightning/pytorch_lightning/trainer/optimizers.py", line 122, in reinit_scheduler_properties scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) UnboundLocalError: local variable 'idx' referenced before assignment ``` <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> <!-- #### Code sample --> <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> <!-- ### Expected behavior --> <!-- A clear and concise description of what you expected to happen. --> <!-- ### Environment Please copy and paste the output from our [environment collection script](https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py) (or fill out the checklist below manually). You can get the script and run it with: ``` wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py # For security purposes, please check the contents of collect_env_details.py before running it. python collect_env_details.py ``` - PyTorch Version (1.5): - OS (Linux): ### Additional context --> <!-- Add any other context about the problem here. --> The error occurs in `pytorch-lightning/pytorch_lightning/trainer/optimizers.py", line 122`. ```python def reinit_scheduler_properties(self, optimizers: list, schedulers: list): # Reinitialize optimizer.step properties added by schedulers for scheduler in schedulers: for optimizer in optimizers: scheduler = scheduler['scheduler'] # check that we dont mix users optimizers and schedulers if scheduler.optimizer == optimizer: # Find the mro belonging to the base lr scheduler class for i, mro in enumerate(scheduler.__class__.__mro__): if mro == optim.lr_scheduler._LRScheduler: idx = i scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) ``` The `idx` local variable is unassigned because `optim.lr_scheduler.ReduceLROnPlateau` is not a subclass of `optim.lr_scheduler._LRScheduler`. I could work around the error by adding a specific check for `optim.lr_scheduler.ReduceLROnPlateau` but I'm not sure if this is a good solution. ```python def reinit_scheduler_properties(self, optimizers: list, schedulers: list): # Reinitialize optimizer.step properties added by schedulers for scheduler in schedulers: for optimizer in optimizers: scheduler = scheduler['scheduler'] # check that we dont mix users optimizers and schedulers if scheduler.optimizer == optimizer: # Find the mro belonging to the base lr scheduler class for i, mro in enumerate(scheduler.__class__.__mro__): if mro == optim.lr_scheduler._LRScheduler: idx = i elif mro == optim.lr_scheduler.ReduceLROnPlateau: idx = i scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) ``` ### Related issue in PyTorch: ReduceLROnPlateau parent class is not _LRScheduler #21981 https://github.com/pytorch/pytorch/issues/21981
Hi! thanks for your contribution!, great first issue! @naokishibuya good catch. It seems like a problem that should be solved upstream in pytorch, but for now we can solve this locally. Would you be up for a PR? When I tried this fix, it solved the error but unfortunately `ReduceLROnPlateau` stopped working for me (i.e. there was no indication of the LR decreasing with `verbose=True` or on TensorBoard). If I switched back to `precision=32`, it works normally again I think that the fix is actually working, however only calling `__init__(scheduler, optimizer)` will reset all other arguments (patience, mode, ect) to default values for the `ReduceLrOnPlauteau` scheduler. A solution to this is to copy over these properties: ``` __init__(scheduler, optimizer, patience=scheduler.patience,mode=scheduler.mode,...) ``` Again I think this is a bit hacky, and a proper solution upstream in pytorch is better. I think this does the trick for me: ```python def reinit_scheduler_properties(self, optimizers: list, schedulers: list): # Reinitialize optimizer.step properties added by schedulers for scheduler in schedulers: for optimizer in optimizers: scheduler = scheduler["scheduler"] # check that we dont mix users optimizers and schedulers if scheduler.optimizer == optimizer: # Find the mro belonging to the base lr scheduler class for i, mro in enumerate(scheduler.__class__.__mro__): if ( mro == optim.lr_scheduler._LRScheduler or mro == optim.lr_scheduler.ReduceLROnPlateau ): idx = i state = scheduler.state_dict() else: state = None scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) if state is not None: scheduler.load_state_dict(state) ``` Happy to open a PR if it looks ok to you guys
2020-06-25T02:42:06Z
[]
[]
Traceback (most recent call last): File "main.py", line 65, in <module> main() File "main.py", line 61, in main trainer.fit(model) File "/workspace/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 889, in fit self.dp_train(model) File "/workspace/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py", line 223, in dp_train self.reinit_scheduler_properties(optimizers, self.lr_schedulers) File "/workspace/pytorch-lightning/pytorch_lightning/trainer/optimizers.py", line 122, in reinit_scheduler_properties scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) UnboundLocalError: local variable 'idx' referenced before assignment
219
Lightning-AI/lightning
Lightning-AI__lightning-2358
a5f45787eabddfec4559983f8e6ba1c8317f62f1
diff --git a/pl_examples/basic_examples/gpu_template.py b/pl_examples/basic_examples/gpu_template.py --- a/pl_examples/basic_examples/gpu_template.py +++ b/pl_examples/basic_examples/gpu_template.py @@ -61,7 +61,8 @@ def main(hparams): '--distributed_backend', type=str, default='dp', - help='supports four options dp, ddp, ddp2, ddp_spawn' + help='supports four options dp, ddp, ddp2, ddp_spawn, ...', + choices=['dp', 'ddp', 'ddp2', 'ddp_spawn', 'ddp_cpu'], ) parent_parser.add_argument( '--use_16bit', diff --git a/pytorch_lightning/core/saving.py b/pytorch_lightning/core/saving.py --- a/pytorch_lightning/core/saving.py +++ b/pytorch_lightning/core/saving.py @@ -279,7 +279,7 @@ def load_hparams_from_tags_csv(tags_csv: str) -> Dict[str, Any]: """Load hparams from a file. >>> hparams = Namespace(batch_size=32, learning_rate=0.001, data_root='./any/path/here') - >>> path_csv = './testing-hparams.csv' + >>> path_csv = os.path.join('.', 'testing-hparams.csv') >>> save_hparams_to_tags_csv(path_csv, hparams) >>> hparams_new = load_hparams_from_tags_csv(path_csv) >>> vars(hparams) == hparams_new @@ -304,7 +304,7 @@ def save_hparams_to_tags_csv(tags_csv: str, hparams: Union[dict, Namespace]) -> if isinstance(hparams, Namespace): hparams = vars(hparams) - with open(tags_csv, 'w') as fp: + with open(tags_csv, 'w', newline='') as fp: fieldnames = ['key', 'value'] writer = csv.DictWriter(fp, fieldnames=fieldnames) writer.writerow({'key': 'key', 'value': 'value'}) diff --git a/pytorch_lightning/metrics/converters.py b/pytorch_lightning/metrics/converters.py --- a/pytorch_lightning/metrics/converters.py +++ b/pytorch_lightning/metrics/converters.py @@ -10,8 +10,16 @@ import numpy as np import torch from torch.utils.data._utils.collate import np_str_obj_array_pattern - from pytorch_lightning.utilities.apply_func import apply_to_collection +from pytorch_lightning.utilities import rank_zero_warn + +try: + from torch.distributed import ReduceOp +except ImportError: + class ReduceOp: + SUM = None + + rank_zero_warn('Unsupported `ReduceOp` for distributed computing.') def _apply_to_inputs(func_to_apply: Callable, *dec_args, **dec_kwargs) -> Callable: @@ -217,7 +225,7 @@ def _tensor_collection_metric_conversion(func_to_decorate: Callable) -> Callable def _sync_ddp_if_available(result: Union[torch.Tensor], group: Optional[Any] = None, - reduce_op: Optional[torch.distributed.ReduceOp] = None, + reduce_op: Optional[ReduceOp] = None, ) -> torch.Tensor: """ Function to reduce the tensors from several ddp processes to one master process @@ -247,7 +255,7 @@ def _sync_ddp_if_available(result: Union[torch.Tensor], def sync_ddp(group: Optional[Any] = None, - reduce_op: Optional[torch.distributed.ReduceOp] = None) -> Callable: + reduce_op: Optional[ReduceOp] = None) -> Callable: """ This decorator syncs a functions outputs across different processes for DDP. @@ -269,7 +277,7 @@ def decorator_fn(func_to_decorate): def numpy_metric(group: Optional[Any] = None, - reduce_op: Optional[torch.distributed.ReduceOp] = None) -> Callable: + reduce_op: Optional[ReduceOp] = None) -> Callable: """ This decorator shall be used on all function metrics working on numpy arrays. It handles the argument conversion and DDP reduction for metrics working on numpy. @@ -292,7 +300,7 @@ def decorator_fn(func_to_decorate): def tensor_metric(group: Optional[Any] = None, - reduce_op: Optional[torch.distributed.ReduceOp] = None) -> Callable: + reduce_op: Optional[ReduceOp] = None) -> Callable: """ This decorator shall be used on all function metrics working on tensors. It handles the argument conversion and DDP reduction for metrics working on tensors. @@ -314,7 +322,7 @@ def decorator_fn(func_to_decorate): def tensor_collection_metric(group: Optional[Any] = None, - reduce_op: Optional[torch.distributed.ReduceOp] = None) -> Callable: + reduce_op: Optional[ReduceOp] = None) -> Callable: """ This decorator shall be used on all function metrics working on tensors and returning collections that cannot be converted to tensors. diff --git a/pytorch_lightning/metrics/sklearns.py b/pytorch_lightning/metrics/sklearns.py --- a/pytorch_lightning/metrics/sklearns.py +++ b/pytorch_lightning/metrics/sklearns.py @@ -5,6 +5,18 @@ from pytorch_lightning import _logger as lightning_logger from pytorch_lightning.metrics.metric import NumpyMetric +from pytorch_lightning.utilities import rank_zero_warn + +try: + from torch.distributed import ReduceOp, group +except ImportError: + class ReduceOp: + SUM = None + + class group: + WORLD = None + + rank_zero_warn('Unsupported `ReduceOp` for distributed computing.') class SklearnMetric(NumpyMetric): @@ -20,8 +32,8 @@ class SklearnMetric(NumpyMetric): def __init__( self, metric_name: str, - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, **kwargs, ): """ @@ -82,8 +94,8 @@ class Accuracy(SklearnMetric): def __init__( self, normalize: bool = True, - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -136,8 +148,8 @@ class AUC(SklearnMetric): """ def __init__( self, - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -174,8 +186,8 @@ class AveragePrecision(SklearnMetric): def __init__( self, average: Optional[str] = 'macro', - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -240,8 +252,8 @@ class ConfusionMatrix(SklearnMetric): """ def __init__( self, labels: Optional[Sequence] = None, - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -304,8 +316,8 @@ def __init__( self, labels: Optional[Sequence] = None, pos_label: Union[str, int] = 1, average: Optional[str] = 'macro', - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -397,8 +409,8 @@ def __init__( labels: Optional[Sequence] = None, pos_label: Union[str, int] = 1, average: Optional[str] = 'macro', - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -488,8 +500,8 @@ def __init__( labels: Optional[Sequence] = None, pos_label: Union[str, int] = 1, average: Optional[str] = 'macro', - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -576,8 +588,8 @@ def __init__( labels: Optional[Sequence] = None, pos_label: Union[str, int] = 1, average: Optional[str] = 'macro', - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -663,8 +675,8 @@ class PrecisionRecallCurve(SklearnMetric): def __init__( self, pos_label: Union[str, int] = 1, - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -737,8 +749,8 @@ class ROC(SklearnMetric): def __init__( self, pos_label: Union[str, int] = 1, - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: @@ -795,8 +807,8 @@ class AUROC(SklearnMetric): def __init__( self, average: Optional[str] = 'macro', - reduce_group: Any = torch.distributed.group.WORLD, - reduce_op: Any = torch.distributed.ReduceOp.SUM, + reduce_group: Any = group.WORLD, + reduce_op: Any = ReduceOp.SUM, ): """ Args: diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -35,7 +35,7 @@ try: import horovod.torch as hvd -except ImportError: +except (ModuleNotFoundError, ImportError): HOROVOD_AVAILABLE = False else: HOROVOD_AVAILABLE = True diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -139,7 +139,7 @@ def train_fx(trial_hparams, cluster_manager, _): try: import horovod.torch as hvd -except ImportError: +except (ModuleNotFoundError, ImportError): HOROVOD_AVAILABLE = False else: HOROVOD_AVAILABLE = True diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -38,7 +38,7 @@ try: import horovod.torch as hvd -except ImportError: +except (ModuleNotFoundError, ImportError): HOROVOD_AVAILABLE = False else: HOROVOD_AVAILABLE = True diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -144,7 +144,7 @@ try: import horovod.torch as hvd -except ImportError: +except (ModuleNotFoundError, ImportError): HOROVOD_AVAILABLE = False else: HOROVOD_AVAILABLE = True diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -52,7 +52,7 @@ try: import horovod.torch as hvd -except ImportError: +except (ModuleNotFoundError, ImportError): HOROVOD_AVAILABLE = False else: HOROVOD_AVAILABLE = True @@ -255,7 +255,7 @@ def __init__( Use `row_log_interval` instead. Will remove 0.9.0. - distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn) + distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn, ddp_cpu) use_amp: .. warning:: .. deprecated:: 0.7.0 @@ -885,7 +885,7 @@ def fit( task = int(os.environ['LOCAL_RANK']) self.ddp_train(task, model) - elif self.distributed_backend == 'cpu_ddp': + elif self.distributed_backend == 'ddp_cpu': self.set_random_port() self.model = model mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) diff --git a/pytorch_lightning/trainer/training_io.py b/pytorch_lightning/trainer/training_io.py --- a/pytorch_lightning/trainer/training_io.py +++ b/pytorch_lightning/trainer/training_io.py @@ -114,7 +114,7 @@ try: import horovod.torch as hvd -except ImportError: +except (ModuleNotFoundError, ImportError): HOROVOD_AVAILABLE = False else: HOROVOD_AVAILABLE = True diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -183,7 +183,7 @@ def training_step(self, batch, batch_idx): try: import horovod.torch as hvd -except ImportError: +except (ModuleNotFoundError, ImportError): HOROVOD_AVAILABLE = False else: HOROVOD_AVAILABLE = True diff --git a/pytorch_lightning/utilities/cloud_io.py b/pytorch_lightning/utilities/cloud_io.py --- a/pytorch_lightning/utilities/cloud_io.py +++ b/pytorch_lightning/utilities/cloud_io.py @@ -5,8 +5,7 @@ def load(path_or_url: str, map_location=None): - parsed = urlparse(path_or_url) - if parsed.scheme == '' or Path(path_or_url).is_file(): - # no scheme or local file + if urlparse(path_or_url).scheme == '' or Path(path_or_url).drive: # no scheme or with a drive letter return torch.load(path_or_url, map_location=map_location) - return torch.hub.load_state_dict_from_url(path_or_url, map_location=map_location) + else: + return torch.hub.load_state_dict_from_url(path_or_url, map_location=map_location)
accuracy metric dosen't support windows ## 🐛 Bug Pytorch Metric.Accuracy uses `ReduceOp` from 'torch.distribution' but torch.distributrion doesn't support `windows` - https://github.com/pytorch/pytorch/blob/cf8a9b50cacb1702f5855859c657a5358976437b/torch/distributed/__init__.py#L10 : `torch.distributed is available on Linux and MacOS.` ### To Reproduce Use Metric.Accuracy in Windows environment <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> #### Code sample - I use code sample from `https://github.com/PyTorchLightning/pytorch-lightning/issues/2355` ### Expected behavior add check OS in `metric.accuracy` and use condition for import different module ``` try: return platform.linux_distribution() except: return "N/A" ``` or warning to windows user, they can't use `metric.accuracy` ### Environment ``` * CUDA: - GPU: - GeForce RTX 2080 Ti - GeForce GTX 1080 Ti - available: True - version: 10.1 * Packages: - numpy: 1.18.1 - pyTorch_debug: False - pyTorch_version: 1.5.1 - pytorch-lightning: 0.8.1 - tensorboard: 2.2.1 - tqdm: 4.46.0 * System: - OS: Windows - architecture: - 64bit - WindowsPE - processor: Intel64 Family 6 Model 158 Stepping 10, GenuineIntel - python: 3.6.10 - version: 10.0.18362 ``` ### Additional context ``` Traceback (most recent call last): File "test.py", line 11, in <module> from pytorch_lightning.metrics.functional import accuracy File "C:\Users\dcho\Anaconda3\envs\torch_py36\lib\site-packages\pytorch_lightning\metrics\__init__.py", line 1, in <module> from pytorch_lightning.metrics.converters import numpy_metric, tensor_metric File "C:\Users\dcho\Anaconda3\envs\torch_py36\lib\site-packages\pytorch_lightning\metrics\converters.py", line 220, in <module> reduce_op: Optional[torch.distributed.ReduceOp] = None, AttributeError: module 'torch.distributed' has no attribute 'ReduceOp' ``` <!-- Add any other context about the problem here. --> Always thanks for developing & maintaining the cool framework
2020-06-25T07:51:08Z
[]
[]
Traceback (most recent call last): File "test.py", line 11, in <module> from pytorch_lightning.metrics.functional import accuracy File "C:\Users\dcho\Anaconda3\envs\torch_py36\lib\site-packages\pytorch_lightning\metrics\__init__.py", line 1, in <module> from pytorch_lightning.metrics.converters import numpy_metric, tensor_metric File "C:\Users\dcho\Anaconda3\envs\torch_py36\lib\site-packages\pytorch_lightning\metrics\converters.py", line 220, in <module> reduce_op: Optional[torch.distributed.ReduceOp] = None, AttributeError: module 'torch.distributed' has no attribute 'ReduceOp'
220
Lightning-AI/lightning
Lightning-AI__lightning-2360
f2710bb500be017d48ccc6cf596bbed6cc9bdad5
diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -1193,7 +1193,8 @@ def test( self.teardown('test') if self.is_function_implemented('teardown'): - self.model.teardown('test') + model_ref = self.get_model() + model_ref.teardown('test') def check_model_configuration(self, model: LightningModule): r"""
AttributeError: 'LightningDataParallel' object has no attribute 'teardown' <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ### To Reproduce Steps to reproduce the behavior: trainer = pytorch_lightning.Trainer( gpus=2, distributed_backend='dp' ) model = BaseModel.load_from_checkpoint(...) trainer.test(model) Traceback (most recent call last): File "run_kitti.py", line 351, in <module> trainer.test(model) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1198, in test self.model.teardown('test') File "/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 594, in __getattr__ type(self).__name__, name)) AttributeError: 'LightningDataParallel' object has no attribute 'teardown' <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> #### Code sample <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> ### Expected behavior <!-- A clear and concise description of what you expected to happen. --> ### Environment * CUDA: - GPU: - GeForce GTX 1080 Ti - GeForce GTX 1080 Ti - available: True - version: 10.1 * Packages: - numpy: 1.18.1 - pyTorch_debug: False - pyTorch_version: 1.5.1 - pytorch-lightning: 0.8.1 - tensorboard: 2.2.2 - tqdm: 4.46.0 * System: - OS: Linux - architecture: - 64bit - - processor: x86_64 - python: 3.7.7 - version: #53~18.04.1-Ubuntu SMP Thu Jun 4 14:58:26 UTC 2020 ### Additional context <!-- Add any other context about the problem here. --> If I'm not missing something, this AttributeError is a bug on your side.
Hi! thanks for your contribution!, great first issue! +1 on this issue. Also confirm this issue.
2020-06-25T14:11:42Z
[]
[]
Traceback (most recent call last): File "run_kitti.py", line 351, in <module> trainer.test(model) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1198, in test self.model.teardown('test') File "/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 594, in __getattr__ type(self).__name__, name)) AttributeError: 'LightningDataParallel' object has no attribute 'teardown'
221
Lightning-AI/lightning
Lightning-AI__lightning-2428
a75398530c3447ecf13f043a1bc817929b90fd65
diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -776,6 +776,7 @@ def optimizer_closure(self, split_batch, batch_idx, opt_idx, optimizer, hiddens) # PROCESS THE RESULT # ---------------------------- # format and reduce outputs accordingly + training_step_output_for_epoch_end = training_step_output training_step_output = self.process_output(training_step_output, train=True) # TODO: temporary part of structured results PR @@ -788,7 +789,7 @@ def optimizer_closure(self, split_batch, batch_idx, opt_idx, optimizer, hiddens) ) # if the user decides to finally reduce things in epoch_end, save raw output without graphs - training_step_output_for_epoch_end = recursive_detach(training_step_output) + training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end) # accumulate loss # (if accumulate_grad_batches = 1 no effect)
training_epoch_end's outputs doesn't have 'loss' key pytorch-lightning: build from master ``` Traceback (most recent call last): File "main.py", line 140, in <module> main(hparams) File "main.py", line 72, in main trainer.fit(model) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 881, in fit self.ddp_train(task, model) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 539, in ddp_train self.run_pretrain_routine(model) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1091, in run_pretrain_routine self.train() File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 376, in train self.run_training_epoch() File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 510, in run_training_epoch self.run_training_epoch_end(epoch_output) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 535, in run_training_epoch_end epoch_output = model.training_epoch_end(epoch_output) File "/mnt/lustre/maxiao1/PVM/models/baseline.py", line 335, in training_epoch_end avg_loss = torch.stack([x['loss'] for x in outputs]).mean() File "/mnt/lustre/maxiao1/PVM/models/baseline.py", line 335, in <listcomp> avg_loss = torch.stack([x['loss'] for x in outputs]).mean() KeyError: 'loss' ``` This is my code: ``` def training_step(self, batch, batch_idx): ... return {'loss': loss, "train_acc": acc} def training_epoch_end(self, outputs): avg_loss = torch.stack([x['loss'] for x in outputs]).mean() avg_acc = torch.stack([x['train_acc'] for x in outputs]).mean() logs = {'loss': avg_loss, 'train_acc': avg_acc} progress_bar = {'train_loss': avg_loss, 'train_acc': avg_acc} results = { 'log': logs, 'progress_bar': progress_bar } return results ```
Try: `avg_loss = torch.stack([x['batch_loss'] for x in outputs]).mean()` Thanks, it works but 'train_acc' key doesn't exist, neither do `batch_train_acc`. How to access other keys returned in training_step? As of now in lightning you can access them using `x['callback_metrics']['loss']` and `x['callback_metrics']['train_acc']`, but I think it should be handled in a similar way we do this with `validation_epoch_end` and `test_epoch_end`. Hi! One hint: for me it works with "loss" under windows but not under ubuntu. Weird!! Why is this think platform dependent?? :thinking: @Pet222 , are u sure that versions on ubuntu and windows are same? Hey @williamFalcon is this intended behaviour? I was surprised to see this breaking change being introduced with no warning. If it is intended, why not have consistent behaviour over `validation_epoch_end` and `test_epoch_end`. If it is not intended, as it seems due to the "bug fix" tag, are you working on it or should I make a PR for this? what is the behavior? that the "loss" key is not in training_epoch_end? If so, that's a bug because it should be there @williamFalcon , on the latest version, the `loss` key was changed to the `batch_loss`. I think it was changed [here](https://github.com/PyTorchLightning/pytorch-lightning/commit/0f073819d3e0df8db7602eab489b1bad0fc0949c#diff-c45bd21c331565cbe62aaa12fa43aa0aR717) Yes, the fact that you need to access it through 'callback metrics'. Got it! On Tue, 30 Jun 2020 at 12:44, William Falcon <notifications@github.com> wrote: > what is the behavior? that the "loss" key is not in training_epoch_end? If > so, that's a bug because it should be there > > — > You are receiving this because you commented. > Reply to this email directly, view it on GitHub > <https://github.com/PyTorchLightning/pytorch-lightning/issues/2372#issuecomment-651740702>, > or unsubscribe > <https://github.com/notifications/unsubscribe-auth/ABKWP6XTUJDTEDJ2NZQ3RKTRZHFY5ANCNFSM4OJKX4KQ> > . > -- Best Regards, Miguel Vera +351 915 198 452 miguel.coimbra.vera@protonmail.com Github/Captainvera <http://www.github.com/captainvera> @captainvera would love a PR :)
2020-06-30T13:23:18Z
[]
[]
Traceback (most recent call last): File "main.py", line 140, in <module> main(hparams) File "main.py", line 72, in main trainer.fit(model) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 881, in fit self.ddp_train(task, model) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 539, in ddp_train self.run_pretrain_routine(model) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 1091, in run_pretrain_routine self.train() File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 376, in train self.run_training_epoch() File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 510, in run_training_epoch self.run_training_epoch_end(epoch_output) File "/mnt/lustre/maxiao1/anaconda3/lib/python3.7/site-packages/pytorch_lightning/trainer/training_loop.py", line 535, in run_training_epoch_end epoch_output = model.training_epoch_end(epoch_output) File "/mnt/lustre/maxiao1/PVM/models/baseline.py", line 335, in training_epoch_end avg_loss = torch.stack([x['loss'] for x in outputs]).mean() File "/mnt/lustre/maxiao1/PVM/models/baseline.py", line 335, in <listcomp> avg_loss = torch.stack([x['loss'] for x in outputs]).mean() KeyError: 'loss'
230
Lightning-AI/lightning
Lightning-AI__lightning-2433
d4a02e3bd8471946c606fef7512ce44d42f07d3a
diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -802,9 +802,22 @@ def optimizer_closure(self, split_batch, batch_idx, opt_idx, optimizer, hiddens) if self.precision == 16 and not self.on_tpu: closure_loss = model_ref.amp_scale_loss(closure_loss, optimizer, opt_idx) + # enter amp context + if not NATIVE_AMP_AVALAIBLE: + context = closure_loss + closure_loss = closure_loss.__enter__() + # do backward pass model_ref.backward(self, closure_loss, optimizer, opt_idx) + # exit amp context + if self.precision == 16 and not NATIVE_AMP_AVALAIBLE: + a, b, c = None, None, None + error = context.__exit__(a, b, c) + if error: + rank_zero_warn(a, b, c) + raise Exception('apex unscale error') + # once backward has been applied, release graph closure_loss = closure_loss.detach() training_step_output.batch_loss = training_step_output.batch_loss.detach()
0.8.2 calls backward on '_GeneratorContextManager' ## 🐛 Bug 0.8.2 calls backward on '_GeneratorContextManager' and crashes training. 0.8.1 works correctly. my `training_step` returns `{'loss':loss, 'log':{'learn_rate':self.lr}}` ``` Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 538, in ddp_train self.run_pretrain_routine(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 1100, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 370, in train self.run_training_epoch() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 452, in run_training_epoch batch_output = self.run_training_batch(batch, batch_idx) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 630, in run_training_batch self.hiddens File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 804, in optimizer_closure model_ref.backward(self, closure_loss, optimizer, opt_idx) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/core/hooks.py", line 189, in backward loss.backward() AttributeError: '_GeneratorContextManager' object has no attribute 'backward' ``` ### Expected behavior backward is called on the loss and training runs correctly
did you override optimizer step? could you try master? we just pushed a fix to a typo we had Can confirm this happens on 0.8.3 ok. Can you post a colab example that replicates this? @Anjum48 @s-rog colab please @williamFalcon my optimizer step was untouched, I can't run more testing atm but I'll get to it as soon as I can @williamFalcon Hi I also encountered this, with normal Adam optimizer. I don't have a colab to replicate this atm but from what I saw earlier, this can be replicated with any setting as long as the Trainer is set to precision=16 when using Apex. Under this condition, the following lines from training_loop.py and hooks.py will run: `if self.precision == 16 and not self.on_tpu closure_loss = model_ref.amp_scale_loss(closure_loss, optimizer, opt_idx) ` `scaled_loss = amp.scale_loss(unscaled_loss, optimizer)` will cause the closure_loss be a _GeneratorContextManager object. Which then cannot have a **backward()** method. It seems under the current design, pytorch lighting's **scale_loss** function can only be used as a context? @williamFalcon Here's a colab example (my first time using colab so let me know if you have issues seeing it) https://colab.research.google.com/drive/1G08jVDpx-T-5HE2c89RLJdq4u67mM2-o?usp=sharing I suspect the issue lies with Apex AMP as suggested above by @aeryen ummm. I think this is an apex issue. I can't replicate it with 16-bit native. ![image](https://user-images.githubusercontent.com/3640001/86135032-4c97ff80-bab8-11ea-942e-ffaae17aff07.png) @aeryen min share a minimal example to reproduce? hi sorry for the delay: https://colab.research.google.com/drive/1rjaRRwgBTm4CKPfe9po_WSxnKqY4jDRv?usp=sharing I agree this is an apex issue, i.e. only occur when NATIVE_AMP_AVALAIBLE is false in the hooks.py
2020-06-30T18:33:09Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 538, in ddp_train self.run_pretrain_routine(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 1100, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 370, in train self.run_training_epoch() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 452, in run_training_epoch batch_output = self.run_training_batch(batch, batch_idx) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 630, in run_training_batch self.hiddens File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 804, in optimizer_closure model_ref.backward(self, closure_loss, optimizer, opt_idx) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/core/hooks.py", line 189, in backward loss.backward() AttributeError: '_GeneratorContextManager' object has no attribute 'backward'
231
Lightning-AI/lightning
Lightning-AI__lightning-2565
e1bc208f66891e22f0139619a1be5c06235a0f34
diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -189,6 +189,7 @@ class TrainerDDPMixin(ABC): num_nodes: int node_rank: int tpu_cores: int + testing: bool @property @abstractmethod @@ -555,15 +556,35 @@ def ddp_train(self, process_idx, q, model, is_master=False, proc_offset=0): # continue training routine results = self.run_pretrain_routine(model) + # persist info in ddp_spawn + self.__transfer_ddp_spawn_state_on_fit_end(model, q, results) + # clean up memory torch.cuda.empty_cache() + if self.global_rank == 0 and self.distributed_backend not in ['ddp_spawn', 'ddp_cpu']: + return results + + def __transfer_ddp_spawn_state_on_fit_end(self, model, q, results): + if not self.distributed_backend in ['ddp_spawn', 'ddp_cpu']: + return + + # track the best model path + best_model_path = None + if self.checkpoint_callback is not None: + best_model_path = self.checkpoint_callback.best_model_path + if self.global_rank == 0 and q is not None: - q.put(self.checkpoint_callback.best_model_path) + rank_zero_warn('cleaning up ddp environment...') + q.put(best_model_path) q.put(results) - if self.global_rank == 0 and self.distributed_backend != 'ddp_spawn': - return results + # save the last weights + last_path = None + if not self.testing: + last_path = os.path.join(self.default_root_dir, '__temp_weight_ddp_end.ckpt') + torch.save(model.state_dict(), last_path) + q.put(last_path) def save_spawn_weights(self, model): """ @@ -574,6 +595,7 @@ def save_spawn_weights(self, model): if self.is_global_zero: path = os.path.join(self.default_root_dir, '__temp_weight_ddp_end.ckpt') self.save_checkpoint(path) + return path def load_spawn_weights(self, original_model): """ diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -35,7 +35,7 @@ from pytorch_lightning.utilities import rank_zero_warn, parsing, rank_zero_info, rank_zero_only import warnings -# warnings to ignore +# warnings to ignore in trainer warnings.filterwarnings('ignore', message='torch.distributed.reduce_op is deprecated, ' 'please use torch.distributed.ReduceOp instead') @@ -1063,9 +1063,14 @@ def __run_ddp_spawn(self, model, nprocs): # restore main state with best weights best_path = q.get() results = q.get() - if best_path is not None and len(best_path) > 0: - self.checkpoint_callback.best_model_path = best_path - model.load_from_checkpoint(best_path) + last_path = q.get() + + # transfer back the best path to the trainer + self.checkpoint_callback.best_model_path = best_path + + # load last weights + if last_path is not None and not self.testing: + torch.load(last_path, map_location=lambda storage, loc: storage) self.model = model return results
Can't use None (anymore) in checkpoint_callback ## 🐛 Bug using None in checkpoint_callback now errors out ``` -- Process 0 terminated with the following error: Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 562, in ddp_train q.put(self.checkpoint_callback.best_model_path) AttributeError: 'NoneType' object has no attribute 'best_model_path' ``` ### To Reproduce `trainer = Trainer(checkpoint_callback=None)` Ran into this issue from upgrading to masters, was using masters from a few commits ago before Edit: `False` casuses the same error as well
2020-07-09T10:46:34Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 562, in ddp_train q.put(self.checkpoint_callback.best_model_path) AttributeError: 'NoneType' object has no attribute 'best_model_path'
250
Lightning-AI/lightning
Lightning-AI__lightning-2572
c197b74289997fa11cd372b51adb637f3e3846ec
diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py --- a/pytorch_lightning/core/memory.py +++ b/pytorch_lightning/core/memory.py @@ -209,7 +209,7 @@ def _forward_example_input(self) -> None: input_ = model.example_input_array input_ = model.transfer_batch_to_device(input_, model.device) - if trainer is not None and trainer.use_amp: + if trainer is not None and trainer.use_amp and not trainer.use_tpu: if NATIVE_AMP_AVALAIBLE: model.forward = torch.cuda.amp.autocast()(model.forward) diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -240,14 +240,14 @@ def dp_train(self, model): # hack forward to do autocast for the user model_autocast_original_forward = model.forward - if self.use_amp and NATIVE_AMP_AVALAIBLE: + if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu: # wrap the user's forward in autocast and give it back at the end model.forward = torch.cuda.amp.autocast()(model.forward) # TODO: remove with dropping NVIDIA AMP support # check for this bug (amp + dp + !01 doesn't work) # https://github.com/NVIDIA/apex/issues/227 - if self.use_dp and self.use_amp and not NATIVE_AMP_AVALAIBLE: + if self.use_dp and self.use_amp and not NATIVE_AMP_AVALAIBLE and not self.use_tpu: if self.amp_level == 'O2': raise MisconfigurationException( f'Amp level {self.amp_level} with DataParallel is not supported.' diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -286,7 +286,7 @@ def _evaluate( # ----------------- # RUN EVALUATION STEP # ----------------- - if self.use_amp and NATIVE_AMP_AVALAIBLE: + if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu: with torch.cuda.amp.autocast(): output = self.evaluation_forward(model, batch, batch_idx, dataloader_idx, test_mode) else: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -1118,7 +1118,7 @@ def run_pretrain_routine(self, model: LightningModule): self.copy_trainer_model_properties(ref_model) # init amp. Must be done here instead of __init__ to allow ddp to work - if NATIVE_AMP_AVALAIBLE and self.precision == 16: + if NATIVE_AMP_AVALAIBLE and self.precision == 16 and not self.use_tpu: self.scaler = torch.cuda.amp.GradScaler() # log hyper-parameters @@ -1300,6 +1300,11 @@ def __test_using_best_weights(self, ckpt_path, test_dataloaders): if ckpt_path == 'best': ckpt_path = self.checkpoint_callback.best_model_path + if len(ckpt_path) == 0: + rank_zero_warn(f'.test() found no path for the best weights, {ckpt_path}. Please ' + f'specify a path for a checkpoint .test(ckpt_path=PATH)') + return {} + ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage) model.load_state_dict(ckpt['state_dict']) diff --git a/pytorch_lightning/trainer/training_io.py b/pytorch_lightning/trainer/training_io.py --- a/pytorch_lightning/trainer/training_io.py +++ b/pytorch_lightning/trainer/training_io.py @@ -358,7 +358,7 @@ def dump_checkpoint(self, weights_only: bool = False) -> dict: checkpoint['lr_schedulers'] = lr_schedulers # save native amp scaling - if self.use_amp and NATIVE_AMP_AVALAIBLE: + if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu: checkpoint['native_amp_scaling_state'] = self.scaler.state_dict() # add the module_arguments and state_dict from the model diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -702,7 +702,7 @@ def run_batch_backward_pass(self, split_batch, batch_idx, opt_idx, optimizer): # ------------------ # CLIP GRADS # ------------------ - if self.use_amp and NATIVE_AMP_AVALAIBLE: + if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu: self.scaler.unscale_(optimizer) self.clip_gradients() @@ -750,7 +750,7 @@ def call_optimizer_step(self, optimizer, opt_idx, batch_idx, split_batch): using_native_amp=native_amp) # in native 16-bit we need to update scaler after optimizer step - if self.use_amp and NATIVE_AMP_AVALAIBLE: + if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu: self.scaler.update() # model hook @@ -767,7 +767,7 @@ def optimizer_closure(self, split_batch, batch_idx, opt_idx, optimizer, hiddens) # FORWARD # --------------------------- with self.profiler.profile('model_forward'): - if self.use_amp and NATIVE_AMP_AVALAIBLE: + if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu: with torch.cuda.amp.autocast(): training_step_output = self.training_forward(split_batch, batch_idx, opt_idx, hiddens) @@ -817,7 +817,7 @@ def optimizer_closure(self, split_batch, batch_idx, opt_idx, optimizer, hiddens) model_ref.backward(self, closure_loss, optimizer, opt_idx) # exit amp context - if self.precision == 16 and not NATIVE_AMP_AVALAIBLE: + if self.precision == 16 and not NATIVE_AMP_AVALAIBLE and not self.on_tpu: a, b, c = None, None, None error = context.__exit__(a, b, c) if error:
TPU fp16 requires apex installed <!-- ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> When I tried to use precision=16 on TPU, pytorch-lightning is trying to find amp, which is unnecessary. The backtrace is ``` GPU available: False, used: False TPU available: True, using: 8 TPU cores Traceback (most recent call last): File "bert_ner/light/fp16_debug.py", line 16, in <module> trainer = pl.Trainer(tpu_cores=8, precision=16) File "/anaconda3/envs/torch-xla-1.5/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 607, in __init__ self.init_amp() File "/anaconda3/envs/torch-xla-1.5/lib/python3.6/site-packages/pytorch_lightning/trainer/auto_mix_precision.py", line 27, in init_amp "You set `use_amp=True` but do not have apex installed." ModuleNotFoundError: You set `use_amp=True` but do not have apex installed.Install apex first using this guide and rerun with use_amp=True:https://github.com/NVIDIA/apex#linux his run will NOT use 16 bit precision ``` ### To Reproduce Steps to reproduce the behavior: build a whatever Trainer in TPU and use fp16 #### Code sample <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> ``` import pytorch_lightning as pl trainer = pl.Trainer(tpu_cores=8, precision=16) ``` ### Expected behavior <!-- A clear and concise description of what you expected to happen. --> Should have nothing error. ### Environment - PyTorch Version (e.g., 1.5.0): - OS (e.g., Linux): Linux - How you installed PyTorch (`conda`, `pip`, source): conda - Build command you used (if compiling from source): - Python version: - CUDA/cuDNN version: - GPU models and configuration: - Any other relevant information: actually I directly use pytorch-xla-1.5 docker on Google Cloud ### Additional context <!-- Add any other context about the problem here. -->
Hi! thanks for your contribution!, great first issue! If you want to do 16 bit precision training, you either need to have the nightly version of pytorch install or have apex installed. Based on the traceback I guess that you do not have any of them. I could get this working using nightly version of pytorch: ``` pl.Trainer(precision=16, tpu_cores=8) >>>GPU available: False, used: False >>>TPU available: True, using: 8 TPU cores >>>Using native 16bit precision. ``` > If you want to do 16 bit precision training, you either need to have the nightly version of pytorch install or have apex installed. Based on the traceback I guess that you do not have any of them. > I could get this working using nightly version of pytorch: > > ``` > pl.Trainer(precision=16, tpu_cores=8) > >>>GPU available: False, used: False > >>>TPU available: True, using: 8 TPU cores > >>>Using native 16bit precision. > ``` Thanks for the quick reply. But [the document](https://pytorch-lightning.readthedocs.io/en/latest/apex.html) does not point out that I must have nightly version of pytorch installed or have apex installed when training on TPU with fp16. Maybe it's better to revise that part of document? Yes, I agree that from the documentation it would look like it is only a requirement for gpu training. I guess that the specific requirement for TPU is to have pytorch version 1.6 or higher.
2020-07-10T01:17:22Z
[]
[]
Traceback (most recent call last): File "bert_ner/light/fp16_debug.py", line 16, in <module> trainer = pl.Trainer(tpu_cores=8, precision=16) File "/anaconda3/envs/torch-xla-1.5/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 607, in __init__ self.init_amp() File "/anaconda3/envs/torch-xla-1.5/lib/python3.6/site-packages/pytorch_lightning/trainer/auto_mix_precision.py", line 27, in init_amp "You set `use_amp=True` but do not have apex installed." ModuleNotFoundError: You set `use_amp=True` but do not have apex installed.Install apex first using this guide and rerun with use_amp=True:https://github.com/NVIDIA/apex#linux his run will NOT use 16 bit precision
252
Lightning-AI/lightning
Lightning-AI__lightning-275
222d7d2d5d01771e63449bdcfcdaf7dd8ed1ecec
diff --git a/pytorch_lightning/root_module/decorators.py b/pytorch_lightning/root_module/decorators.py --- a/pytorch_lightning/root_module/decorators.py +++ b/pytorch_lightning/root_module/decorators.py @@ -10,13 +10,18 @@ def data_loader(fn): attr_name = '_lazy_' + fn.__name__ - @property - def _data_loader(self): + def _get_data_loader(self): try: value = getattr(self, attr_name) except AttributeError: try: value = fn(self) # Lazy evaluation, done only once. + if ( + value is not None and + not isinstance(value, list) and + fn.__name__ in['test_dataloader', 'val_dataloader'] + ): + value = [value] except AttributeError as e: # Guard against AttributeError suppression. (Issue #142) traceback.print_exc() @@ -25,4 +30,4 @@ def _data_loader(self): setattr(self, attr_name, value) # Memoize evaluation. return value - return _data_loader + return _get_data_loader diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -24,6 +24,7 @@ import pdb from pytorch_lightning.trainer import ignored_warnings + try: from apex import amp APEX_AVAILABLE = True @@ -141,9 +142,9 @@ def __init__(self, self.nb_val_batches = 0 self.nb_training_batches = 0 self.nb_test_batches = 0 - self.train_dataloader = None - self.test_dataloader = None - self.val_dataloader = None + self.get_train_dataloader = None + self.get_test_dataloaders = None + self.get_val_dataloaders = None # training state self.model = None @@ -441,19 +442,21 @@ def training_tqdm_dict(self): def __layout_bookeeping(self): # determine number of training batches - self.nb_training_batches = len(self.train_dataloader) + self.nb_training_batches = len(self.get_train_dataloader()) self.nb_training_batches = int(self.nb_training_batches * self.train_percent_check) # determine number of validation batches # val datasets could be none, 1 or 2+ - if self.val_dataloader is not None: - self.nb_val_batches = sum(len(dataloader) for dataloader in self.val_dataloader) + if self.get_val_dataloaders() is not None: + self.nb_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders()) self.nb_val_batches = int(self.nb_val_batches * self.val_percent_check) self.nb_val_batches = max(1, self.nb_val_batches) # determine number of test batches - if self.test_dataloader is not None: - self.nb_test_batches = sum(len(dataloader) for dataloader in self.test_dataloader) + if self.get_test_dataloaders() is not None: + self.nb_test_batches = sum( + len(dataloader) for dataloader in self.get_test_dataloaders() + ) self.nb_test_batches = int(self.nb_test_batches * self.test_percent_check) self.nb_test_batches = max(1, self.nb_test_batches) @@ -472,10 +475,10 @@ def __evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=Fal # make dataloader_idx arg in validation_step optional args = [batch, batch_idx] - if test and len(self.test_dataloader) > 1: + if test and len(self.get_test_dataloaders()) > 1: args.append(dataloader_idx) - elif not test and len(self.val_dataloader) > 1: + elif not test and len(self.get_val_dataloaders()) > 1: args.append(dataloader_idx) # handle DP, DDP forward @@ -520,9 +523,9 @@ def evaluate(self, model, dataloaders, max_batches, test=False): outputs = [] # run training - for dataloader_idx, dl in enumerate(dataloaders): + for dataloader_idx, dataloader in enumerate(dataloaders): dl_outputs = [] - for batch_idx, batch in enumerate(dl): + for batch_idx, batch in enumerate(dataloader): if batch is None: # pragma: no cover continue @@ -570,21 +573,11 @@ def get_dataloaders(self, model): :param model: :return: """ + self.get_train_dataloader = model.train_dataloader + self.get_test_dataloaders = model.test_dataloader + self.get_val_dataloaders = model.val_dataloader - self.train_dataloader = model.train_dataloader - self.test_dataloader = model.test_dataloader - self.val_dataloader = model.val_dataloader - - # handle returning an actual dataloader instead of a list of loaders - have_test_loaders = self.test_dataloader is not None - if have_test_loaders and not isinstance(self.test_dataloader, list): - self.test_dataloader = [self.test_dataloader] - - have_val_loaders = self.val_dataloader is not None - if have_val_loaders and not isinstance(self.val_dataloader, list): - self.val_dataloader = [self.val_dataloader] - - if self.use_ddp and not isinstance(self.train_dataloader.sampler, DistributedSampler): + if self.use_ddp and not isinstance(self.get_train_dataloader().sampler, DistributedSampler): msg = """ You're using multiple gpus and multiple nodes without using a DistributedSampler to assign a subset of your data to each process. To silence this warning, pass a @@ -603,8 +596,8 @@ def get_dataloaders(self, model): """ warnings.warn(msg) - if self.use_ddp and self.val_dataloader is not None: - for dataloader in self.val_dataloader: + if self.use_ddp and self.get_val_dataloaders is not None: + for dataloader in self.get_val_dataloaders(): if not isinstance(dataloader.sampler, DistributedSampler): msg = """ Your val_dataloader(s) don't use DistributedSampler. @@ -626,8 +619,8 @@ def get_dataloaders(self, model): warnings.warn(msg) break - if self.use_ddp and self.test_dataloader is not None: - for dataloader in self.test_dataloader: + if self.use_ddp and self.get_test_dataloaders is not None: + for dataloader in self.get_test_dataloaders(): if not isinstance(dataloader.sampler, DistributedSampler): msg = """ Your test_dataloader(s) don't use DistributedSampler. @@ -912,12 +905,12 @@ def __run_pretrain_routine(self, model): # run tiny validation (if validation defined) # to make sure program won't crash during val ref_model.on_sanity_check_start() - if self.val_dataloader is not None and self.nb_sanity_val_steps > 0: + if self.get_val_dataloaders() is not None and self.nb_sanity_val_steps > 0: # reset progress_bar limit for sanity check if self.show_progress_bar: self.progress_bar.reset(self.nb_sanity_val_steps) - self.evaluate(model, self.val_dataloader, self.nb_sanity_val_steps, self.testing) + self.evaluate(model, self.get_val_dataloaders(), self.nb_sanity_val_steps, self.testing) # --------------------------- # CORE TRAINING LOOP @@ -928,8 +921,8 @@ def __train(self): # run all epochs for epoch_nb in range(self.current_epoch, self.max_nb_epochs): # set seed for distributed sampler (enables shuffling for each epoch) - if self.use_ddp and hasattr(self.train_dataloader.sampler, 'set_epoch'): - self.train_dataloader.sampler.set_epoch(epoch_nb) + if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'): + self.get_train_dataloader().sampler.set_epoch(epoch_nb) # get model model = self.__get_model() @@ -974,7 +967,7 @@ def run_training_epoch(self): model.on_epoch_start() # run epoch - for batch_nb, batch in enumerate(self.train_dataloader): + for batch_nb, batch in enumerate(self.get_train_dataloader()): self.batch_nb = batch_nb self.global_step += 1 @@ -1272,12 +1265,12 @@ def __run_evaluation(self, test=False): model.on_pre_performance_check() # select dataloaders - dataloaders = self.val_dataloader + dataloaders = self.get_val_dataloaders() max_batches = self.nb_val_batches # calculate max batches to use if test: - dataloaders = self.test_dataloader + dataloaders = self.get_test_dataloaders() max_batches = self.nb_test_batches # cap max batches to 1 when using fast_dev_run
JIT support **Initial issue description below** JIT support requires several changes in the `Trainer` and `LightningModule`: - [x] No use of python properties like the current dataloader implementation - Possible solution: Use getters like implemented in #275 - Other possibility: Handle dataloading completely in trainer. The user is able to modify the dataloaders e.g. every epoch using hooks/callbacks - [ ] The trainer cannot set PLModule's class members afterwards like `self.model.trainer = self` - This is because after converting `my_pl_module = torch.jit.script(my_pl_module)`, the module has the class `ScriptModule`. Adding members only adds the members to the `ScriptModule` not to the underlying `LightningModule`. - Solution could be: Implement setter in `LightningModule`. These methods will be transfered to the `ScriptModule` - [ ] Saving and restoring might need some changes, too. One could conditionally check the class of the provided module in the trainer for use of `torch.jit.save` in trainer_io - [ ] JIT is currently not compatible with distributed training (see pytorch issue [#15421](https://github.com/pytorch/pytorch/issues/15421)) **Is your feature request related to a problem? Please describe.** The current implementation of `pl.LightningModule` does not support pytorch's JIT. This is due to the use of python properties for the dataloaders, which is currently not supported in JIT ([see here](https://github.com/pytorch/pytorch/issues/23958)). example trace: ```py $ python train.py VISIBLE GPUS: Traceback (most recent call last): File "train.py", line 69, in <module> train(config, data_dir) File "train.py", line 37, in train trainer.fit(pl_module) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 694, in fit self.__run_pretrain_routine(model) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 879, in __run_pretrain_routine self.get_dataloaders(ref_model) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 574, in get_dataloaders self.train_dataloader = model.train_dataloader File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/torch/jit/__init__.py", line 1563, in __getattr__ return super(ScriptModule, self).__getattr__(attr) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/torch/nn/modules/module.py", line 589, in __getattr__ type(self).__name__, name)) AttributeError: 'ScriptModule' object has no attribute 'train_dataloader' ``` where pl_module has: ```py class PlModule(pl.LightningModule): # .... @pl.data_loader def train_dataloader(self): return self.__dataloader("train") ``` **Describe the solution you'd like** ```py torch.jit.script(pl_module) ``` **Describe alternatives you've considered** A workaround might be defining a separate Module handling all nn.Module stuff and transforming only this part into a jit script module. A solution might be to define explicit getters like `get_train_dataloader(self)` instead of using properties.
Good point. Hadn't tested lightning with JIT. I think the choice of property vs getter made sense for the first design of the framework. However, recent refactors have made those differences irrelevant. I don't see any reason why we shouldn't move to a getter instead of the property. Do you want to submit a PR for this? Yes I can do that
2019-10-01T11:06:59Z
[]
[]
Traceback (most recent call last): File "train.py", line 69, in <module> train(config, data_dir) File "train.py", line 37, in train trainer.fit(pl_module) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 694, in fit self.__run_pretrain_routine(model) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 879, in __run_pretrain_routine self.get_dataloaders(ref_model) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 574, in get_dataloaders self.train_dataloader = model.train_dataloader File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/torch/jit/__init__.py", line 1563, in __getattr__ return super(ScriptModule, self).__getattr__(attr) File "/home/schroeter/.virtualenvs/pytorch-1.2/lib/python3.7/site-packages/torch/nn/modules/module.py", line 589, in __getattr__ type(self).__name__, name)) AttributeError: 'ScriptModule' object has no attribute 'train_dataloader'
270
Lightning-AI/lightning
Lightning-AI__lightning-2832
ad0f1194aa2fa8cc82c915e49aca3a1149901709
diff --git a/pytorch_lightning/accelerator_backends/ddp_spawn_backend.py b/pytorch_lightning/accelerator_backends/ddp_spawn_backend.py --- a/pytorch_lightning/accelerator_backends/ddp_spawn_backend.py +++ b/pytorch_lightning/accelerator_backends/ddp_spawn_backend.py @@ -49,7 +49,8 @@ def teardown(self, model): last_path = self.mp_queue.get() # transfer back the best path to the trainer - self.trainer.checkpoint_callback.best_model_path = best_path + if self.trainer.checkpoint_callback: + self.trainer.checkpoint_callback.best_model_path = best_path # todo, pass also bets score # load last weights
Can't use None (anymore) in checkpoint_callback ## 🐛 Bug using None in checkpoint_callback now errors out ``` -- Process 0 terminated with the following error: Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 562, in ddp_train q.put(self.checkpoint_callback.best_model_path) AttributeError: 'NoneType' object has no attribute 'best_model_path' ``` ### To Reproduce `trainer = Trainer(checkpoint_callback=None)` Ran into this issue from upgrading to masters, was using masters from a few commits ago before Edit: `False` casuses the same error as well
@williamFalcon I saw that this issue was mentioned and supposedly fixed in the merge, but I just tested with master and I'm still getting the same error
2020-08-05T06:45:37Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap fn(i, *args) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_data_parallel.py", line 562, in ddp_train q.put(self.checkpoint_callback.best_model_path) AttributeError: 'NoneType' object has no attribute 'best_model_path'
285
Lightning-AI/lightning
Lightning-AI__lightning-2874
664258c825a68ac46c8305cb09350a7be0ae8d1c
diff --git a/pytorch_lightning/utilities/__init__.py b/pytorch_lightning/utilities/__init__.py --- a/pytorch_lightning/utilities/__init__.py +++ b/pytorch_lightning/utilities/__init__.py @@ -5,7 +5,7 @@ from pytorch_lightning.utilities.apply_func import move_data_to_device from pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn, rank_zero_info -from pytorch_lightning.utilities.parsing import AttributeDict, flatten_dict +from pytorch_lightning.utilities.parsing import AttributeDict, flatten_dict, is_picklable try: from apex import amp diff --git a/pytorch_lightning/utilities/parsing.py b/pytorch_lightning/utilities/parsing.py --- a/pytorch_lightning/utilities/parsing.py +++ b/pytorch_lightning/utilities/parsing.py @@ -1,7 +1,10 @@ import inspect +import pickle from argparse import Namespace from typing import Dict +from pytorch_lightning.utilities import rank_zero_warn + def str_to_bool(val): """Convert a string representation of truth to true (1) or false (0). @@ -25,26 +28,28 @@ def str_to_bool(val): raise ValueError(f'invalid truth value {val}') +def is_picklable(obj: object) -> bool: + """Tests if an object can be pickled""" + + try: + pickle.dumps(obj) + return True + except pickle.PicklingError: + return False + + def clean_namespace(hparams): - """Removes all functions from hparams so we can pickle.""" + """Removes all unpicklable entries from hparams""" + hparams_dict = hparams if isinstance(hparams, Namespace): - del_attrs = [] - for k in hparams.__dict__: - if callable(getattr(hparams, k)): - del_attrs.append(k) - - for k in del_attrs: - delattr(hparams, k) - - elif isinstance(hparams, dict): - del_attrs = [] - for k, v in hparams.items(): - if callable(v): - del_attrs.append(k) - - for k in del_attrs: - del hparams[k] + hparams_dict = hparams.__dict__ + + del_attrs = [k for k, v in hparams_dict.items() if not is_picklable(v)] + + for k in del_attrs: + rank_zero_warn(f"attribute '{k}' removed from hparams because it cannot be pickled", UserWarning) + del hparams_dict[k] def get_init_args(frame) -> dict:
self.hparam silently removes params that are not serializable <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> Following the approach found under [hyperparameters in the docs](https://pytorch-lightning.readthedocs.io/en/latest/hyperparameters.html#lightningmodule-hyperparameters), step 3, I passed a dict with parameters to my `pl.LightningModule`. In the `__init__` printing `self.hparams` shows all contents I passed. However, in the function `def configure_optimizers(self)`, some hparams are gone. This might be related to that not all param values are YAML serializable, and therefore automatically removed? Because the 2 removed params are `"criterion": torch.nn.BCELoss()` and `"optimizer": partial(optim.Adam, lr=0.001)`. ### To Reproduce Steps to reproduce the behavior: 1. Run the following script: ```python from functools import partial import torch import torch.optim as optim from torch.utils.data import Dataset import pytorch_lightning as pl from pytorch_lightning import Trainer # partial to give all params, except the data hparams = { "criterion": torch.nn.BCELoss(), # F.cross_entropy(), # loss function "optimizer": partial(optim.Adam, lr=0.001), # (lr=0.001), # "learning_rate": 0.001, "filters": 64, "layers": 2 } class EmptyDataset(Dataset): def __init__(self, transform=None): pass def __len__(self): return 32 def __getitem__(self, idx): return {"input": np.array([1]), "output": "nothing"} class LitLake(pl.LightningModule): def __init__(self, hparams: dict, transforms: dict = None): super().__init__() self.hparams = hparams print("self.hparams\n", self.hparams) def forward(self, x): pass def training_step(self, batch, batch_idx): """ Lightning calls this inside the training loop with the data from the training dataloader passed in as `batch`. """ # forward pass x, y = batch y_hat = self(x) loss = self.hparams["criterion"](y_hat, y) tensorboard_logs = {'train_loss': loss} return {'loss': loss, 'log': tensorboard_logs} def configure_optimizers(self): print("self.hparams\n", self.hparams) optimizer = self.hparams["optimizer"](self.parameters()) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10) return [optimizer], [scheduler] def train_dataloader(self): return DataLoader(EmptyDataset(), batch_size=4, num_workers=1) model = LitLake(hparams=hparams) # most basic trainer, uses good defaults trainer = Trainer() # gpus=1, num_nodes=1 trainer.fit(model) # KeyError: 'optimizer' ``` 2. See error <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> <details><summary>Script output (CLICK ME)</summary> <p> ```python self.hparams "criterion": BCELoss() "filters": 64 "layers": 2 "optimizer": functools.partial(<class 'torch.optim.adam.Adam'>, lr=0.001) GPU available: True, used: False TPU available: False, using: 0 TPU cores self.hparams "filters": 64 "layers": 2 Traceback (most recent call last): File "lightning_hparams_bug.py", line 61, in <module> trainer.fit(model) # KeyError: 'optimizer' File "/home/*user*/anaconda3/envs/onseilake/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 965, in fit self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model) File "/home/*user*/anaconda3/envs/onseilake/lib/python3.7/site-packages/pytorch_lightning/trainer/optimizers.py", line 18, in init_optimizers optim_conf = model.configure_optimizers() File "lightning_hparams_bug.py", line 51, in configure_optimizers optimizer = self.hparams["optimizer"](self.parameters()) KeyError: 'optimizer' ``` </p> </details> #### Code sample <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> ### Expected behavior <!-- A clear and concise description of what you expected to happen. --> Either: 1. `self.hparams` keeping the non-serializable parameters (will give problems with loading?) 2. Throw an error explaining why those param values are not acceptable and how to approach it, instead of silently removing them. ### Environment - PyTorch Version (e.g., 1.0): 1.5.0 - OS (e.g., Linux): Ubuntu 18.04 - How you installed PyTorch (`conda`, `pip`, source): `conda` - Build command you used (if compiling from source): - - Python version: 3.7.6 - CUDA/cuDNN version: 10.2 - GPU models and configuration: 1x GeForce GTX 1080 Ti - Any other relevant information: - ### Additional context <!-- Add any other context about the problem here. --> Bug reproduced by @Borda
Happens to me too. I'm also waiting for this fix, since i really want to use the new feature to put anything in hparams. @dscarmo you can take it over and send a PR :] or I ll check it tomorrow... Need to add warning about this.
2020-08-07T23:59:21Z
[]
[]
Traceback (most recent call last): File "lightning_hparams_bug.py", line 61, in <module> trainer.fit(model) # KeyError: 'optimizer' File "/home/*user*/anaconda3/envs/onseilake/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 965, in fit self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model) File "/home/*user*/anaconda3/envs/onseilake/lib/python3.7/site-packages/pytorch_lightning/trainer/optimizers.py", line 18, in init_optimizers optim_conf = model.configure_optimizers() File "lightning_hparams_bug.py", line 51, in configure_optimizers optimizer = self.hparams["optimizer"](self.parameters()) KeyError: 'optimizer'
290
Lightning-AI/lightning
Lightning-AI__lightning-2911
f9d88f8088bbc27341f9d19c4aaf27259d22e072
diff --git a/pytorch_lightning/core/saving.py b/pytorch_lightning/core/saving.py --- a/pytorch_lightning/core/saving.py +++ b/pytorch_lightning/core/saving.py @@ -167,8 +167,9 @@ def _load_model_state(cls, checkpoint: Dict[str, Any], *cls_args, **cls_kwargs): cls_kwargs = {k: v for k, v in cls_kwargs.items() if k in cls_init_args_name} # prevent passing positional arguments if class does not accept any - if len(cls_spec.args) <= 1 and not cls_spec.kwonlyargs: + if len(cls_spec.args) <= 1 and not cls_spec.varargs and not cls_spec.kwonlyargs: cls_args, cls_kwargs = [], {} + model = cls(*cls_args, **cls_kwargs) # load the state_dict on the model automatically model.load_state_dict(checkpoint['state_dict'])
load_from_checkpoint: TypeError: __init__() missing 1 required positional argument ## ❓ Questions and Help #### What is your question? load_from_checkpoint: TypeError: __init__() missing 1 required positional argument I have read the issues before, but the things different is **my `LightningModule` is inherited from my self-defined `LightningModule`.** How to solve this problem or what is the best practice better suited to my needs? #### Code To reproduce the error: ```python import os import torch from torch.nn import functional as F from torch.utils.data import DataLoader from torchvision.datasets import MNIST from torchvision import transforms import pytorch_lightning as pl from pytorch_lightning import Trainer from argparse import Namespace class _LitModel(pl.LightningModule): def __init__(self, hparams): super().__init__() if isinstance(hparams, dict): hparams = Namespace(**hparams) self.hparams = hparams self.l1 = torch.nn.Linear(28 * 28, hparams.classes) def forward(self, x): return torch.relu(self.l1(x.view(x.size(0), -1))) def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.cross_entropy(y_hat, y) tensorboard_logs = {'train_loss': loss} return {'loss': loss, 'log': tensorboard_logs} def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.cross_entropy(y_hat, y) return {'val_loss': loss} def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() return {'val_loss': avg_loss} def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.001) class LitModel(_LitModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--classes', type=int, default=10) parser.add_argument('--checkpoint', type=str, default=None) hparams = parser.parse_args() mnist_train = MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()) mnist_train = DataLoader(mnist_train, num_workers=1) mnist_val = MNIST(os.getcwd(), train=False, download=False, transform=transforms.ToTensor()) mnist_val = DataLoader(mnist_val, num_workers=1) # A bit weird here. I just want to show `load_from_checkpoint` will fail. if hparams.checkpoint is None: model = LitModel(hparams) else: model = LitModel.load_from_checkpoint(hparams.checkpoint) trainer = Trainer(max_epochs=2, limit_train_batches=2, limit_val_batches=2, progress_bar_refresh_rate=0) trainer.fit(model, mnist_train, mnist_val) ``` #### Error msg ``` Traceback (most recent call last): File "main.py", line 64, in <module> model = LitModel.load_from_checkpoint(hparams.checkpoint) File "/home/siahuat0727/.local/lib/python3.8/site-packages/pytorch_lightning/core/saving.py", line 138, in load_from_checkpoint model = cls._load_model_state(checkpoint, *args, **kwargs) File "/home/siahuat0727/.local/lib/python3.8/site-packages/pytorch_lightning/core/saving.py", line 174, in _load_model_state model = cls(*cls_args, **cls_kwargs) File "main.py", line 46, in __init__ super().__init__(*args, **kwargs) TypeError: __init__() missing 1 required positional argument: 'hparams' ``` #### How to run to get the error ```bash $ python3 main.py $ python3 main.py --checkpoint lightning_logs/version_0/checkpoints/epoch\=1.ckpt ``` #### What's your environment? - OS: Linux - Packaging: pip - Version 0.9.0rc12
Did you try to call `self.save_hyperparameters()` in _LitModel? Because it looks like hparams were not saved to checkpoint. @awaelchli Hihi, the result is the same. It works if I directly use `_LitModel` instead of `LitModel`. So I think that's sth about inheritance. https://pytorch-lightning.readthedocs.io/en/latest/hyperparameters.html > Anything assigned to self.hparams will also be saved automatically.
2020-08-11T08:17:15Z
[]
[]
Traceback (most recent call last): File "main.py", line 64, in <module> model = LitModel.load_from_checkpoint(hparams.checkpoint) File "/home/siahuat0727/.local/lib/python3.8/site-packages/pytorch_lightning/core/saving.py", line 138, in load_from_checkpoint model = cls._load_model_state(checkpoint, *args, **kwargs) File "/home/siahuat0727/.local/lib/python3.8/site-packages/pytorch_lightning/core/saving.py", line 174, in _load_model_state model = cls(*cls_args, **cls_kwargs) File "main.py", line 46, in __init__ super().__init__(*args, **kwargs) TypeError: __init__() missing 1 required positional argument: 'hparams'
297
Lightning-AI/lightning
Lightning-AI__lightning-3045
9031dc3b817d46dc9b36007cce1360cfcf99939f
diff --git a/pytorch_lightning/trainer/training_io.py b/pytorch_lightning/trainer/training_io.py --- a/pytorch_lightning/trainer/training_io.py +++ b/pytorch_lightning/trainer/training_io.py @@ -354,7 +354,7 @@ def dump_checkpoint(self, weights_only: bool = False) -> dict: checkpoint['lr_schedulers'] = lr_schedulers # save native amp scaling - if self.amp_backend == AMPType.NATIVE and not self.use_tpu: + if self.amp_backend == AMPType.NATIVE and not self.use_tpu and self.scaler is not None: checkpoint['native_amp_scaling_state'] = self.scaler.state_dict() elif self.amp_backend == AMPType.APEX: checkpoint['amp_scaling_state'] = amp.state_dict()
auto_lr_finder crashes when using 16-bit precision with pytorch-nightly and torchvision-nightly I heard that the nightly version of pytorch has native support for 16-bit training and wanted to give it a try since I'm trying to train some recent models on a GTX 1080. FYI, I'm using `pytorch-lightning=0.85.0`. I've installed the following version of the two libraries: * torch: https://download.pytorch.org/whl/nightly/cu102/torch-1.7.0.dev20200720-cp37-cp37m-linux_x86_64.whl * torch-vision: https://download.pytorch.org/whl/nightly/cu102/torchvision-0.8.0.dev20200720-cp37-cp37m-linux_x86_64.whl I've also setup the `Trainer` as follows: ```python trainer = Trainer( gpus=1, max_epochs=hparams.epochs, auto_lr_find=True, progress_bar_refresh_rate=0, accumulate_grad_batches=10, # overfit_batches=5, amp_level="O2", precision=16, logger=logger, checkpoint_callback=checkpoint_callback, ) ``` I'm training a resnext101_32x8d_wsl model using the weights provided by Facebook in `pytorch-hub`. ``` Running command: python pipe/train_cnn.py /home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/utilities/distributed.py:25: UserWarning: Checkpoint directory /home/gianluca/git/kaggle/siim-isic-melanoma-classification/models exists and is not empty with save_top_k != 0.All files in this directory will be deleted when a checkpoint is saved! warnings.warn(*args, **kwargs) Using cache found in /home/gianluca/.cache/torch/hub/facebookresearch_WSL-Images_master GPU available: True, used: True TPU available: False, using: 0 TPU cores CUDA_VISIBLE_DEVICES: [0] Using native 16bit precision. Traceback (most recent call last): File "pipe/train_cnn.py", line 237, in <module> main(create_submission=True) File "pipe/train_cnn.py", line 48, in main preds, weight_fpath = train(fold_number=fold_number, folds=folds) File "pipe/train_cnn.py", line 120, in train trainer.fit(model) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 956, in fit self._run_lr_finder_internally(model) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/lr_finder.py", line 58, in _run_lr_finder_internally lr_finder = self.lr_find(model) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/lr_finder.py", line 180, in lr_find self.save_checkpoint(str(save_path)) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/training_io.py", line 268, in save_checkpoint checkpoint = self.dump_checkpoint(weights_only) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/training_io.py", line 362, in dump_checkpoint checkpoint['native_amp_scaling_state'] = self.scaler.state_dict() AttributeError: 'NoneType' object has no attribute 'state_dict' ERROR: failed to reproduce 'train_cnn.dvc': stage: 'train_cnn.dvc' cmd 'python pipe/train_cnn.py' failed ``` - PyTorch Version (e.g., 1.0): torch-1.7.0.dev20200720 - OS (e.g., Linux): Ubuntu 18.04 - How you installed PyTorch (`conda`, `pip`, source): poetry - Build command you used (if compiling from source): - Python version: 3.7.0 - CUDA/cuDNN version: 10.2 - GPU models and configuration: 1 x GTX 1080 - Any other relevant information: ### Additional context Since `torch^1.6.0` has native support to 16-bit training, I did not install NVidia APEX. The whole reason of using a nightly version of pytorch was to avoid to install APEX since I wasn't able to figure out how to install it with `poetry`.
Hi! thanks for your contribution!, great first issue! After a few rapid experiments, the issue seems to be related to using the `auto_lr_finder`. In fact, disabling it fixes the issue. Ran into same issue, the error is clearer when you call lr_find directly: ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-9-003731b0ec57> in <module> 55 # trainer.scaler = torch.cuda.amp.GradScaler() 56 ---> 57 lrf = trainer.lr_find(model=net, train_dataloader=trn_dl, early_stop_threshold=10.) 58 ~/anaconda3/envs/dl/lib/python3.7/site-packages/pytorch_lightning/trainer/lr_finder.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, num_accumulation_steps) 178 179 # Dump model checkpoint --> 180 self.save_checkpoint(str(save_path)) 181 182 # Configure optimizer and scheduler ~/anaconda3/envs/dl/lib/python3.7/site-packages/pytorch_lightning/trainer/training_io.py in save_checkpoint(self, filepath, weights_only) 266 267 def save_checkpoint(self, filepath, weights_only: bool = False): --> 268 checkpoint = self.dump_checkpoint(weights_only) 269 270 if self.is_global_zero: ~/anaconda3/envs/dl/lib/python3.7/site-packages/pytorch_lightning/trainer/training_io.py in dump_checkpoint(self, weights_only) 360 # save native amp scaling 361 if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu: --> 362 checkpoint['native_amp_scaling_state'] = self.scaler.state_dict() 363 364 # add the module_arguments and state_dict from the model AttributeError: 'NoneType' object has no attribute 'state_dict' ``` trainer.scaler is initialized to None, and then set to torch.cuda.amp.GradScaler() [here](https://github.com/PyTorchLightning/pytorch-lightning/blob/bc833fbf5271171136824286346b04a7f1bdd0de/pytorch_lightning/trainer/trainer.py#L1104). Meanwhile lr_find wants to checkpoint the state of the scaler at some point before this happens. Quick fix: just set the value of trainer.scaler after trainer init and before lr_find. This doesn't work if you want to use auto_lr_find option. ``` trainer = pl.Trainer(gpus=1, max_epochs=20, precision=16) trainer.scaler = torch.cuda.amp.GradScaler() lrf = trainer.lr_find(model=net, train_dataloader=trn_dl) ``` Real fix: ensure that given trainer args, the scaler is initialized to non-nil before it's needed elsewhere, needs contributors to weigh in on how. see also https://github.com/PyTorchLightning/pytorch-lightning/issues/2642 seems to be duplicate to #1827
2020-08-19T02:08:12Z
[]
[]
Traceback (most recent call last): File "pipe/train_cnn.py", line 237, in <module> main(create_submission=True) File "pipe/train_cnn.py", line 48, in main preds, weight_fpath = train(fold_number=fold_number, folds=folds) File "pipe/train_cnn.py", line 120, in train trainer.fit(model) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 956, in fit self._run_lr_finder_internally(model) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/lr_finder.py", line 58, in _run_lr_finder_internally lr_finder = self.lr_find(model) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/lr_finder.py", line 180, in lr_find self.save_checkpoint(str(save_path)) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/training_io.py", line 268, in save_checkpoint checkpoint = self.dump_checkpoint(weights_only) File "/home/gianluca/git/kaggle/siim-isic-melanoma-classification/.venv/lib/python3.7/site-packages/pytorch_lightning/trainer/training_io.py", line 362, in dump_checkpoint checkpoint['native_amp_scaling_state'] = self.scaler.state_dict() AttributeError: 'NoneType' object has no attribute 'state_dict'
318
Lightning-AI/lightning
Lightning-AI__lightning-3229
f7dac3ff6c1b807734437188c66c226d490853f6
diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -476,7 +476,8 @@ def run_training_batch(self, batch, batch_idx, dataloader_idx): self.accumulated_loss.append(opt_closure_result.loss) # track all the outputs across all steps - batch_outputs[opt_idx].append(opt_closure_result.training_step_output_for_epoch_end) + batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0 + batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end) # ------------------------------ # BACKWARD PASS
Trainer crashed when optimizer frequency is defined. <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ### To Reproduce Steps to reproduce the behavior: Run the following code: https://gist.github.com/24hours/ec67de5384bb05e28544d580ae424639 <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> ``` Traceback (most recent call last): File "pl_bug.py", line 40, in <module> trainer.fit(mnist_model, train_loader) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/states.py", line 48, in wrapped_fn result = fn(self, *args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 1073, in fit results = self.accelerator_backend.train(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/accelerators/gpu_backend.py", line 51, in train results = self.trainer.run_pretrain_routine(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 1239, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 394, in train self.run_training_epoch() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 491, in run_training_epoch batch_output = self.run_training_batch(batch, batch_idx) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 883, in run_training_batch batch_outputs[opt_idx].append(opt_closure_result.training_step_output_for_epoch_end) ``` #### Code sample ```python def configure_optimizers(self): optimizer_G = torch.optim.Adam(self.parameters(), lr=0.1, weight_decay=1e-5) optimizer_D = torch.optim.Adam(self.parameters(), lr=0.1, weight_decay=1e-5) return [ {'optimizer': optimizer_D, 'frequency': 5}, {'optimizer': optimizer_G, 'frequency': 1} ] ``` the culprit is 'frequency' : 5, removing the line will allow trainer to run smoothly. https://pytorch-lightning.readthedocs.io/en/latest/lightning-module.html?highlight=optimizers#configure-optimizers The definition is correct according to this documentation. ### Expected behavior Model should train without crash. The code work in 0.8.5 environment. ### Environment * CUDA: - GPU: - GeForce GTX TITAN X - available: True - version: 11.0 * Packages: - numpy: 1.18.5 - pyTorch_debug: False - pyTorch_version: 1.6.0a0+9907a3e - pytorch-lightning: 0.9.0 - tensorboard: 2.2.0 - tqdm: 4.48.2 * System: - OS: Linux - architecture: - 64bit - - processor: x86_64 - python: 3.6.10 - version: #110-Ubuntu SMP Tue Jun 23 02:39:32 UTC 2020
Hi! thanks for your contribution!, great first issue! this is the case with multiple optimizers, you need to spec them.. so you would prefer having default 1 if freq is not specified? the exception occur because trainer_loop.py incorrect count number of optimizer if `frequency` is defined. Since this configuration work in version 0.8.5, this look like regression error. Unless of course if the configuration is unsupported in version 0.9.0 > so you would prefer having default 1 if freq is not specified? That is a totally different case I think. If the frequency is not specified we run `train_step` for both optimizers but if it is specified to 1 for both then in such case it will run 1st batch for opt_1, 2nd for opt_2, 3rd for opt_1, 4th for opt_2... A simple fix here can be: https://github.com/PyTorchLightning/pytorch-lightning/blob/a7705c8677b9e2b5105e26a38db9d1b650182576/pytorch_lightning/trainer/training_loop.py#L872 ```python if len(batch_outputs) == 1: # when frequencies are defined batch_outputs[0].append(opt_closure_result.training_step_output_for_epoch_end) else: # no frequencies batch_outputs[opt_idx].append(opt_closure_result.training_step_output_for_epoch_end) ``` ok, can someone write a test and submit a PR for this? show the test failing on master first. @awaelchli or @rohitgr7 ?
2020-08-27T18:51:21Z
[]
[]
Traceback (most recent call last): File "pl_bug.py", line 40, in <module> trainer.fit(mnist_model, train_loader) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/states.py", line 48, in wrapped_fn result = fn(self, *args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 1073, in fit results = self.accelerator_backend.train(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/accelerators/gpu_backend.py", line 51, in train results = self.trainer.run_pretrain_routine(model) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 1239, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 394, in train self.run_training_epoch() File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 491, in run_training_epoch batch_output = self.run_training_batch(batch, batch_idx) File "/opt/conda/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 883, in run_training_batch batch_outputs[opt_idx].append(opt_closure_result.training_step_output_for_epoch_end) ``` #### Code sample ```python def configure_optimizers(self): optimizer_G = torch.optim.Adam(self.parameters(), lr=0.1, weight_decay=1e-5) optimizer_D = torch.optim.Adam(self.parameters(), lr=0.1, weight_decay=1e-5) return [ {'optimizer': optimizer_D, 'frequency': 5}, {'optimizer': optimizer_G, 'frequency': 1} ] ``` the culprit is 'frequency' : 5, removing the line will allow trainer to run smoothly.
336
Lightning-AI/lightning
Lightning-AI__lightning-3404
ff5f099cb759c9e77f363732ab20c9ec9e380f9b
diff --git a/pytorch_lightning/accelerators/horovod_backend.py b/pytorch_lightning/accelerators/horovod_backend.py --- a/pytorch_lightning/accelerators/horovod_backend.py +++ b/pytorch_lightning/accelerators/horovod_backend.py @@ -72,11 +72,6 @@ def setup(self, model): if isinstance(scheduler, _LRScheduler): scheduler.base_lrs = [lr * hvd.size() for lr in scheduler.base_lrs] - if self.trainer.amp_backend: - model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level) - self.trainer.optimizers = optimizers - self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers) - # Horovod: broadcast parameters & optimizer state to ensure consistent initialization hvd.broadcast_parameters(model.state_dict(), root_rank=0) for optimizer in self.trainer.optimizers: @@ -92,6 +87,11 @@ def filter_named_parameters(model, optimizer): for optimizer in self.trainer.optimizers ] + if self.trainer.amp_backend == AMPType.APEX: + model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level) + self.trainer.optimizers = optimizers + self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers) + # Update logger rank info from Horovod to avoid race conditions from different ranks # creating directories / writing files in the same locations. self.trainer.global_rank = hvd.rank()
Horovod with native 16 precision not working <!-- ### Common bugs: 1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). 2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) --> ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ### To Reproduce Steps to reproduce the behavior: 1. using precision=16 with distributed_backend=horovod ``` Traceback (most recent call last): File "/workspace/main_lightning.py", line 500, in <module> main(hyperparams) File "/workspace/main_lightning.py", line 492, in main trainer.fit(model) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/states.py", line 48, in wrapped_fn result = fn(self, *args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/trainer.py", line 1068, in fit results = self.horovod_train(model) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/distrib_parts.py", line 213, in horovod_train model, optimizers = model.configure_apex(amp, model, self.optimizers, self.amp_level) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/core/lightning.py", line 954, in configure_apex model, optimizers = amp.initialize(model, optimizers, opt_level=amp_level) ``` #### Code sample ``` trainer = Trainer( precision=16, gpus=1, distributed_backend="horovod") ``` ### Environment - PyTorch Version: 1.6.0+cu101 - How you installed PyTorch: pip
mind have look @tgaddair 🐰 Absolutely, let me take a look today and get back to you, @mutasem-mattar.
2020-09-08T21:34:31Z
[]
[]
Traceback (most recent call last): File "/workspace/main_lightning.py", line 500, in <module> main(hyperparams) File "/workspace/main_lightning.py", line 492, in main trainer.fit(model) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/states.py", line 48, in wrapped_fn result = fn(self, *args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/trainer.py", line 1068, in fit results = self.horovod_train(model) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/distrib_parts.py", line 213, in horovod_train model, optimizers = model.configure_apex(amp, model, self.optimizers, self.amp_level) File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/core/lightning.py", line 954, in configure_apex model, optimizers = amp.initialize(model, optimizers, opt_level=amp_level) ``` #### Code sample ``` trainer = Trainer(
355
Lightning-AI/lightning
Lightning-AI__lightning-453
37729f0a17995e847fa8693f0fe694f8dd0b259b
diff --git a/pytorch_lightning/root_module/memory.py b/pytorch_lightning/root_module/memory.py --- a/pytorch_lightning/root_module/memory.py +++ b/pytorch_lightning/root_module/memory.py @@ -3,6 +3,7 @@ ''' import gc +import os import subprocess import numpy as np @@ -198,19 +199,10 @@ def get_memory_profile(mode): memory_map = get_gpu_memory_map() if mode == 'min_max': - min_mem = 1000000 - min_k = None - max_mem = 0 - max_k = None - for k, v in memory_map: - if v > max_mem: - max_mem = v - max_k = k - if v < min_mem: - min_mem = v - min_k = k - - memory_map = {min_k: min_mem, max_k: max_mem} + min_index, min_memory = min(memory_map.items(), key=lambda item: item[1]) + max_index, max_memory = max(memory_map.items(), key=lambda item: item[1]) + + memory_map = {min_index: min_memory, max_index: max_memory} return memory_map @@ -224,17 +216,18 @@ def get_gpu_memory_map(): Keys are device ids as integers. Values are memory usage as integers in MB. """ - result = subprocess.check_output( + result = subprocess.run( [ - 'nvidia-smi', '--query-gpu=memory.used', - '--format=csv,nounits,noheader' - ], encoding='utf-8') + 'nvidia-smi', + '--query-gpu=memory.used', + '--format=csv,nounits,noheader', + ], + encoding='utf-8', + capture_output=True, + check=True) # Convert lines into a dictionary - gpu_memory = [int(x) for x in result.strip().split('\n')] - gpu_memory_map = {} - for k, v in zip(range(len(gpu_memory)), gpu_memory): - k = f'gpu_{k}' - gpu_memory_map[k] = v + gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)] + gpu_memory_map = {f'gpu_{index}': memory for index, memory in enumerate(gpu_memory)} return gpu_memory_map
min_max log_gpu_memory option bug **Describe the bug** Setting `log_gpu_memory='min_max'` in `Trainer` leads to the following bug. ``` Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 347, in fit self.single_gpu_train(model) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/dp_mixin.py", line 79, in single_gpu_train self.run_pretrain_routine(model) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 467, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 60, in train self.run_training_epoch() File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 126, in run_training_epoch self.log_metrics(batch_step_metrics, grad_norm_dic) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/logging_mixin.py", line 20, in log_metrics mem_map = memory.get_memory_profile(self.log_gpu_memory) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/root_module/memory.py", line 205, in get_memory_profile for k, v in memory_map: ValueError: too many values to unpack (expected 2) ``` **To Reproduce** On current master, execute the following. ``` trainer = Trainer( ... log_gpu_memory='min_max', ... ) trainer.fit(model) ``` **Expected behavior** Log the min/max utilization of gpu memory, as `min_max` option is documented. **Desktop (please complete the following information):** - OS: Ubuntu 18.04 - Version: Current master I am working on this issue. Will submit a PR soon.
2019-11-03T14:35:56Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 347, in fit self.single_gpu_train(model) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/dp_mixin.py", line 79, in single_gpu_train self.run_pretrain_routine(model) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py", line 467, in run_pretrain_routine self.train() File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 60, in train self.run_training_epoch() File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 126, in run_training_epoch self.log_metrics(batch_step_metrics, grad_norm_dic) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/trainer/logging_mixin.py", line 20, in log_metrics mem_map = memory.get_memory_profile(self.log_gpu_memory) File "/opt/conda/lib/python3.7/site-packages/pytorch_lightning/root_module/memory.py", line 205, in get_memory_profile for k, v in memory_map: ValueError: too many values to unpack (expected 2)
370
Lightning-AI/lightning
Lightning-AI__lightning-499
d1b6b011c3403c6ca1c27c66ec6a613cdad0955f
diff --git a/pytorch_lightning/utilities/arg_parse.py b/pytorch_lightning/utilities/arg_parse.py --- a/pytorch_lightning/utilities/arg_parse.py +++ b/pytorch_lightning/utilities/arg_parse.py @@ -81,7 +81,7 @@ def add_default_args(parser, root_dir, rand_seed=None, possible_model_names=None parser.add_argument('--enable_tqdm', dest='enable_tqdm', default=False, action='store_true', help='false removes the progress bar') parser.add_argument('--overfit', default=-1, type=float, - help='% of dataset to use with this option. float, or -1 for none') + help='%% of dataset to use with this option. float, or -1 for none') # debug args if rand_seed is not None:
Escaping % in add_default_args **Describe the bug** In utilities/arg_parse.py, a percentage symbol is not escaped and would cause an error when printing help information. ```python parser.add_argument('--overfit', default=-1, type=float, help='% of dataset to use with this option. float, or -1 for none') ``` **To Reproduce** Steps to reproduce the behavior: ``` import os import random import sys from pytorch_lightning.utilities.arg_parse import add_default_args from test_tube import HyperOptArgumentParser, Experiment if __name__ == "__main__": root_dir = os.path.split(os.path.dirname(sys.modules['__main__'].__file__))[0] parent_parser = HyperOptArgumentParser(strategy='random_search', add_help=True) add_default_args(parent_parser, root_dir) hyperparams = parent_parser.parse_args() ``` Execute the file with `--help` ``` python temp.py --help ``` Throws an error: ``` WARNING:root:This caffe2 python run does not have GPU support. Will run in CPU only mode. Traceback (most recent call last): File "/Users/chenghaomou/Code/ai2/temp.py", line 11, in <module> hyperparams = parent_parser.parse_args() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/site-packages/test_tube/argparse_hopt.py", line 238, in parse_args results = self.__parse_args(args, namespace) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/site-packages/test_tube/argparse_hopt.py", line 157, in __parse_args args, argv = self.parse_known_args(args, namespace) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1782, in parse_known_args namespace, args = self._parse_known_args(args, namespace) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1988, in _parse_known_args start_index = consume_optional(start_index) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1928, in consume_optional take_action(action, args, option_string) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1856, in take_action action(self, namespace, argument_values, option_string) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1038, in __call__ parser.print_help() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 2475, in print_help self._print_message(self.format_help(), file) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 2459, in format_help return formatter.format_help() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 284, in format_help help = self._root_section.format_help() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in format_help item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in <listcomp> item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in format_help item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in <listcomp> item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 525, in _format_action help_text = self._expand_help(action) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 615, in _expand_help return self._get_help_string(action) % params TypeError: %o format: an integer is required, not dict ``` **Expected behavior** Escape the percentage sign and help can be printed. **Desktop (please complete the following information):** - OS: macOS 10.15 - Browser Chrome - Version 78.0.3904.87 **Additional context** Add any other context about the problem here.
2019-11-12T18:39:31Z
[]
[]
Traceback (most recent call last): File "/Users/chenghaomou/Code/ai2/temp.py", line 11, in <module> hyperparams = parent_parser.parse_args() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/site-packages/test_tube/argparse_hopt.py", line 238, in parse_args results = self.__parse_args(args, namespace) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/site-packages/test_tube/argparse_hopt.py", line 157, in __parse_args args, argv = self.parse_known_args(args, namespace) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1782, in parse_known_args namespace, args = self._parse_known_args(args, namespace) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1988, in _parse_known_args start_index = consume_optional(start_index) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1928, in consume_optional take_action(action, args, option_string) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1856, in take_action action(self, namespace, argument_values, option_string) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 1038, in __call__ parser.print_help() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 2475, in print_help self._print_message(self.format_help(), file) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 2459, in format_help return formatter.format_help() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 284, in format_help help = self._root_section.format_help() File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in format_help item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in <listcomp> item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in format_help item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 215, in <listcomp> item_help = join([func(*args) for func, args in self.items]) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 525, in _format_action help_text = self._expand_help(action) File "/Users/chenghaomou/Anaconda/envs/Elisa/lib/python3.7/argparse.py", line 615, in _expand_help return self._get_help_string(action) % params TypeError: %o format: an integer is required, not dict
374
Lightning-AI/lightning
Lightning-AI__lightning-575
89ececb32ba0cfd810737cb90b2285a27332f5d4
diff --git a/pytorch_lightning/callbacks/pt_callbacks.py b/pytorch_lightning/callbacks/pt_callbacks.py --- a/pytorch_lightning/callbacks/pt_callbacks.py +++ b/pytorch_lightning/callbacks/pt_callbacks.py @@ -312,16 +312,16 @@ def on_epoch_end(self, epoch, logs=None): self.best = max(self.best_k_models.values()) if self.verbose > 0: logging.info( - f'\nEpoch {epoch:05d}: {self.monitor} reached', - f'{current:0.5f} (best {self.best:0.5f}), saving model to', - f'{filepath} as top {self.save_top_k}') + f'\nEpoch {epoch:05d}: {self.monitor} reached' + f' {current:0.5f} (best {self.best:0.5f}), saving model to' + f' {filepath} as top {self.save_top_k}') self._save_model(filepath) else: if self.verbose > 0: logging.info( - f'\nEpoch {epoch:05d}: {self.monitor}', - f'was not in top {self.save_top_k}') + f'\nEpoch {epoch:05d}: {self.monitor}' + f' was not in top {self.save_top_k}') else: if self.verbose > 0:
Error in `logging` call `pt_callbacks.py` Stack trace: ``` .... [00:04<00:00, 1.83s/batch, batch_nb=1, loss=0.478, v_nb=13--- Logging error --- Traceback (most recent call last): File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 994, in emit msg = self.format(record) File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 840, in format return fmt.format(record) File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 577, in format record.message = record.getMessage() File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 338, in getMessage msg = msg % self.args TypeError: not all arguments converted during string formatting Call stack: File "edge_regressor_lightning.py", line 172, in <module> cli() File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/click/core.py", line 764, in __call__ return self.main(*args, **kwargs) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/click/core.py", line 717, in main rv = self.invoke(ctx) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/click/core.py", line 1137, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/click/core.py", line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/click/core.py", line 555, in invoke return callback(*args, **kwargs) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/click/decorators.py", line 17, in new_func return f(get_current_context(), *args, **kwargs) File "edge_regressor_lightning.py", line 166, in train trainer.fit(model) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 366, in fit self.run_pretrain_routine(model) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 474, in run_pretrain_routine self.train() File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 211, in train self.run_training_epoch() File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 265, in run_training_epoch self.run_evaluation(test=self.testing) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/pytorch_lightning/trainer/evaluation_loop_mixin.py", line 286, in run_evaluation logs=self.callback_metrics) File "/Users/kdang/.pyenv/versions/id_detection_ssd/lib/python3.6/site-packages/pytorch_lightning/callbacks/pt_callbacks.py", line 317, in on_epoch_end f'{filepath} as top {self.save_top_k}') Message: '\nEpoch 00010: val_loss reached' ``` This is due to problem errors on these lines ``` if self.verbose > 0: logging.info( f'\nEpoch {epoch:05d}: {self.monitor} reached ', f'{current:0.5f} (best {self.best:0.5f}), saving model to ', f'{filepath} as top {self.save_top_k}') self._save_model(filepath) else: if self.verbose > 0: logging.info( f'\nEpoch {epoch:05d}: {self.monitor} ', f'was not in top {self.save_top_k}') ``` The fix is simple. just need to change `,` to `+` for string concatenation
2019-12-03T09:19:11Z
[]
[]
Traceback (most recent call last): File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 994, in emit msg = self.format(record) File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 840, in format return fmt.format(record) File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 577, in format record.message = record.getMessage() File "/Users/kdang/.pyenv/versions/3.6.7/lib/python3.6/logging/__init__.py", line 338, in getMessage msg = msg % self.args TypeError: not all arguments converted during string formatting
384
Lightning-AI/lightning
Lightning-AI__lightning-701
bc67689068a0db11adaf10b32a41bcd33b8ae88e
diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -151,7 +151,7 @@ def training_step(self, batch, batch_idx): """ - +import copy import inspect from abc import ABC, abstractmethod import warnings @@ -586,7 +586,7 @@ def training_forward(self, batch, batch_idx, opt_idx, hiddens): gpu_id = 0 if isinstance(self.data_parallel_device_ids, list): gpu_id = self.data_parallel_device_ids[0] - batch = self.transfer_batch_to_gpu(batch.copy(), gpu_id) + batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id) args[0] = batch output = self.model.training_step(*args)
batch may not have the copy method ## 🐛 Bug In this commit: https://github.com/PyTorchLightning/pytorch-lightning/commit/48b797fdb046bab73fc04ef6d6780f05d3623485 The training `batch` is copied before `transfer_batch_to_gpu `, but a batch may not have the `copy` method. Thus, the following error will be raised in some cases (e.g., the batch is a tuple ): ``` Traceback (most recent call last): File "scripts/msmacro.py", line 113, in <module> main() File "scripts/msmacro.py", line 109, in main trainer.fit(model) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 405, in fit self.single_gpu_train(model) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py", line 441, in single_gpu_train self.run_pretrain_routine(model) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 539, in run_pretrain_routine self.train() File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 332, in train self.run_training_epoch() File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 385, in run_training_epoch output = self.run_training_batch(batch, batch_idx) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 504, in run_training_batch loss = optimizer_closure() File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 473, in optimizer_closure split_batch, batch_idx, opt_idx, self.hiddens) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 589, in training_forward batch = self.transfer_batch_to_gpu(batch.copy(), gpu_id) AttributeError: 'tuple' object has no attribute 'copy' ```
2020-01-17T11:51:57Z
[]
[]
Traceback (most recent call last): File "scripts/msmacro.py", line 113, in <module> main() File "scripts/msmacro.py", line 109, in main trainer.fit(model) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 405, in fit self.single_gpu_train(model) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py", line 441, in single_gpu_train self.run_pretrain_routine(model) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 539, in run_pretrain_routine self.train() File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 332, in train self.run_training_epoch() File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 385, in run_training_epoch output = self.run_training_batch(batch, batch_idx) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 504, in run_training_batch loss = optimizer_closure() File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 473, in optimizer_closure split_batch, batch_idx, opt_idx, self.hiddens) File "/home/zhaohao/Documents/pytorch-lightning/pytorch_lightning/trainer/training_loop.py", line 589, in training_forward batch = self.transfer_batch_to_gpu(batch.copy(), gpu_id) AttributeError: 'tuple' object has no attribute 'copy'
398
PrefectHQ/prefect
PrefectHQ__prefect-1165
ce13ac978c14fd6df79a66be501f5c83f245c0f6
diff --git a/src/prefect/engine/signals.py b/src/prefect/engine/signals.py --- a/src/prefect/engine/signals.py +++ b/src/prefect/engine/signals.py @@ -23,8 +23,9 @@ class PrefectStateSignal(PrefectError): def __init__(self, message: str = None, *args, **kwargs): # type: ignore super().__init__(message) # type: ignore + kwargs.setdefault("result", self) self.state = self._state_cls( # type: ignore - result=self, message=message, *args, **kwargs + message=message, *args, **kwargs )
Cannot raise a skip signal with a result I am filing an issue by suggestion of @cicdw after a conversation on gitter. I came up with the following use case: a task that raises a skip signal with a result because its logic has detected that there is no work to do and the result is already calculated somewhere. I could just return it, but it would be useful for me to know that the _heavy_ part of the task did not actually execute. An example of the use case would be: ```python from prefect import task, Flow from prefect.engine import signals @task def test_skipped(): raise signals.SKIP('skipping', result=5) f = Flow("test", tasks=[test_skipped]) flow_state = f.run() ``` which fails because of how the `PrefectStateSignal` constructor handles its initialization: ``` Traceback (most recent call last): File ".../prefect/engine/signals.py", line 27, in __init__ result=self, message=message, *args, **kwargs TypeError: type object got multiple values for keyword argument 'result' ``` Chris suggested the following workaround, which works correctly, but still pointed out that the case above should work. ```python from prefect import task, Flow from prefect.engine.runner import ENDRUN from prefect.engine.state import Skipped @task def test_skipped(): skip = Skipped("skipping", result=5) raise ENDRUN(state=skip) f = Flow("test", tasks=[test_skipped]) flow_state = f.run() flow_state.result[test_skipped].result # 5 ```
2019-06-21T23:17:24Z
[]
[]
Traceback (most recent call last): File ".../prefect/engine/signals.py", line 27, in __init__ result=self, message=message, *args, **kwargs TypeError: type object got multiple values for keyword argument 'result'
465
PrefectHQ/prefect
PrefectHQ__prefect-1704
39131bbce118029957cc3137c7f5483b14a9e65e
diff --git a/src/prefect/environments/storage/docker.py b/src/prefect/environments/storage/docker.py --- a/src/prefect/environments/storage/docker.py +++ b/src/prefect/environments/storage/docker.py @@ -102,17 +102,20 @@ def __init__( else: # create an image from python:*-slim directly self.base_image = "python:{}-slim".format(python_version) - self.extra_commands.extend( - [ - "apt update && apt install -y gcc git && rm -rf /var/lib/apt/lists/*", - "pip install git+https://github.com/PrefectHQ/prefect.git@{}#egg=prefect[kubernetes]".format( - self.prefect_version - ), - ] + self.extra_commands.append( + "apt update && apt install -y gcc git && rm -rf /var/lib/apt/lists/*", ) else: self.base_image = base_image + # we should always try to install prefect, unless it is already installed. We can't determine this until + # image build time. + self.extra_commands.append( + "pip show prefect || pip install git+https://github.com/PrefectHQ/prefect.git@{}#egg=prefect[kubernetes]".format( + self.prefect_version + ), + ) + not_absolute = [ file_path for file_path in self.files if not os.path.isabs(file_path) ]
Cloudpickle error when base_image isn't specified ## Description If you remove the base_image kwarg from storage and attempt to deploy a flow, a Cloudpickle error is triggered Traceback (most recent call last): File "/root/.prefect/healthcheck.py", line 12, in <module> import cloudpickle ModuleNotFoundError: No module named 'cloudpickle' ## Expected Behavior No errors and flow deploys as expected (note: using kwarg base_image="prefecthq/prefect:0.7.0-3.7" allowed me to deploy my flow successfully) ## Reproduction Remove base_image kwarg from storage and deploy flow ## Environment Running Core 0.7.0
@cicdw @wagoodman Do you think this could be due to cached layers? I haven't encountered this yet and I often don't provide a base image. @nanseay could you include what version of Python you are running? Yep, Python 3.7.3 We looked at this offline and it seems like it's an issue with assumptions on the Docker storage. If you provide that your `base_image` is `python:3.7` and you specify a `prefect_version` of `0.7.0` it won't install that version of prefect (no matter what you provide). This is due to the assumption that if a base image is provided the docker storage does not perform the extra prefect installation. https://github.com/PrefectHQ/prefect/blob/master/src/prefect/environments/storage/docker.py#L94 After more discussion, we think the best way forward is to always attempt to install the prefect package unless it is already installed. For example, regardless of the `base_image` or `prefect_version` there is an expectation that prefect will be installed, thus we should always attempt to install it. However, we should check for existing installations (via `pip show prefect`) and do not reinstall if it already exists. Additionally, it would be good to check if the user specified a `prefect_version` that the final installed version matches what the user provided. This will be a safety measure for when a user brings a specific image that already has prefect installed, and additionally provides a `prefect_version` (which mismatches the version from the base image)... we want the flow deploy to fail since there is an unexpected version in use.
2019-11-05T15:01:30Z
[]
[]
Traceback (most recent call last): File "/root/.prefect/healthcheck.py", line 12, in <module> import cloudpickle ModuleNotFoundError: No module named 'cloudpickle'
538
PrefectHQ/prefect
PrefectHQ__prefect-1782
d91c5ebf3f7d6a11bdb895125efe203e8ba34bab
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py --- a/src/prefect/core/flow.py +++ b/src/prefect/core/flow.py @@ -1086,13 +1086,21 @@ def get_color(task: Task, map_index: int = None) -> str: name = "{} <map>".format(t.name) if is_mapped else t.name if is_mapped and flow_state: assert isinstance(flow_state.result, dict) - for map_index, _ in enumerate(flow_state.result[t].map_states): + if flow_state.result[t].is_mapped(): + for map_index, _ in enumerate(flow_state.result[t].map_states): + kwargs = dict( + color=get_color(t, map_index=map_index), + style="filled", + colorscheme="svg", + ) + graph.node( + str(id(t)) + str(map_index), name, shape=shape, **kwargs + ) + else: kwargs = dict( - color=get_color(t, map_index=map_index), - style="filled", - colorscheme="svg", + color=get_color(t), style="filled", colorscheme="svg", ) - graph.node(str(id(t)) + str(map_index), name, shape=shape, **kwargs) + graph.node(str(id(t)), name, shape=shape, **kwargs) else: kwargs = ( {} @@ -1108,15 +1116,22 @@ def get_color(task: Task, map_index: int = None) -> str: or any(edge.mapped for edge in self.edges_to(e.downstream_task)) ) and flow_state: assert isinstance(flow_state.result, dict) - for map_index, _ in enumerate( - flow_state.result[e.downstream_task].map_states - ): - upstream_id = str(id(e.upstream_task)) - if any(edge.mapped for edge in self.edges_to(e.upstream_task)): - upstream_id += str(map_index) + down_state = flow_state.result[e.downstream_task] + if down_state.is_mapped(): + for map_index, _ in enumerate(down_state.map_states): + upstream_id = str(id(e.upstream_task)) + if any(edge.mapped for edge in self.edges_to(e.upstream_task)): + upstream_id += str(map_index) + graph.edge( + upstream_id, + str(id(e.downstream_task)) + str(map_index), + e.key, + style=style, + ) + else: graph.edge( - upstream_id, - str(id(e.downstream_task)) + str(map_index), + str(id(e.upstream_task)), + str(id(e.downstream_task)), e.key, style=style, )
Flow state visualization fails if a mapped task is skipped ## Description When a flow contains a task which maps over a collection and this task is skipped, the visualization of the flow state fails with the following error message: > AttributeError: 'Skipped' object has no attribute 'map_states' The flow itself executes successfully. ## Expectation Mapped tasks should be visualised without error as a grey box. This was the case in earlier versions of prefect. ## Reproduction A slightly modified version of the ETL flow from the documentation: ```python from prefect import task, Flow, Parameter from prefect.tasks.control_flow.conditional import ifelse @task def extract(): """Get a list of data""" return [1, 2, 3] @task def transform(data): """Multiply the input by 10""" return [i * 10 for i in data] @task def load(data): """Print the data to indicate it was received""" print("Here's your data: {}".format(data)) with Flow('ETL') as flow: do_load = Parameter('do_load') e = extract() t = transform(e) l = load.map(t) ifelse(do_load, l, None) state = flow.run(do_load=False) flow.visualize(flow_state=state) ``` Both changing `do_load` to `True` or removing the `map` on the load task will lead to a successful visualization being produced. Output: ``` [2019-11-21 09:41:33,824] INFO - prefect.FlowRunner | Beginning Flow run for 'ETL' [2019-11-21 09:41:33,826] INFO - prefect.FlowRunner | Starting flow run. [2019-11-21 09:41:33,831] INFO - prefect.TaskRunner | Task 'extract': Starting task run... [2019-11-21 09:41:33,834] INFO - prefect.TaskRunner | Task 'extract': finished task run for task with final state: 'Success' [2019-11-21 09:41:33,839] INFO - prefect.TaskRunner | Task 'do_load': Starting task run... [2019-11-21 09:41:33,841] INFO - prefect.TaskRunner | Task 'do_load': finished task run for task with final state: 'Success' [2019-11-21 09:41:33,848] INFO - prefect.TaskRunner | Task 'CompareValue: "False"': Starting task run... [2019-11-21 09:41:33,850] INFO - prefect.TaskRunner | Task 'CompareValue: "False"': finished task run for task with final state: 'Success' [2019-11-21 09:41:33,856] INFO - prefect.TaskRunner | Task 'CompareValue: "True"': Starting task run... [2019-11-21 09:41:33,858] INFO - prefect.TaskRunner | Task 'CompareValue: "True"': finished task run for task with final state: 'Skipped' [2019-11-21 09:41:33,863] INFO - prefect.TaskRunner | Task 'transform': Starting task run... [2019-11-21 09:41:33,865] INFO - prefect.TaskRunner | Task 'transform': finished task run for task with final state: 'Success' [2019-11-21 09:41:33,870] INFO - prefect.TaskRunner | Task 'load': Starting task run... [2019-11-21 09:41:33,872] INFO - prefect.TaskRunner | Task 'load': finished task run for task with final state: 'Skipped' [2019-11-21 09:41:33,874] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded Traceback (most recent call last): File "min_example.py", line 28, in <module> flow.visualize(flow_state=state) File "/Users/jtherhaa/miniconda3/envs/prefect/lib/python3.6/site-packages/prefect/core/flow.py", line 1089, in visualize for map_index, _ in enumerate(flow_state.result[t].map_states): AttributeError: 'Skipped' object has no attribute 'map_states' ``` ## Environment prefect 0.7.2 on macOS Mojave 10.14.6
2019-11-30T00:47:11Z
[]
[]
Traceback (most recent call last): File "min_example.py", line 28, in <module> flow.visualize(flow_state=state) File "/Users/jtherhaa/miniconda3/envs/prefect/lib/python3.6/site-packages/prefect/core/flow.py", line 1089, in visualize for map_index, _ in enumerate(flow_state.result[t].map_states): AttributeError: 'Skipped' object has no attribute 'map_states'
546
PrefectHQ/prefect
PrefectHQ__prefect-1862
43fb417c3020dc0e91b4c5d34b0ce6c52492214b
diff --git a/src/prefect/engine/cloud/task_runner.py b/src/prefect/engine/cloud/task_runner.py --- a/src/prefect/engine/cloud/task_runner.py +++ b/src/prefect/engine/cloud/task_runner.py @@ -3,13 +3,14 @@ import _thread import time import warnings -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Iterable, Optional, Tuple import pendulum import prefect from prefect.client import Client from prefect.core import Edge, Task +from prefect.utilities.executors import tail_recursive from prefect.engine.cloud.utilities import prepare_state_for_cloud from prefect.engine.result import NoResult, Result from prefect.engine.result_handlers import ResultHandler @@ -249,6 +250,7 @@ def check_task_is_cached(self, state: State, inputs: Dict[str, Result]) -> State return state + @tail_recursive def run( self, state: State = None, diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py --- a/src/prefect/engine/task_runner.py +++ b/src/prefect/engine/task_runner.py @@ -46,7 +46,11 @@ TimedOut, TriggerFailed, ) -from prefect.utilities.executors import run_with_heartbeat +from prefect.utilities.executors import ( + run_with_heartbeat, + tail_recursive, + RecursiveCall, +) if TYPE_CHECKING: from prefect.engine.result_handlers import ResultHandler @@ -177,6 +181,7 @@ def initialize_run( # type: ignore return TaskRunnerInitializeResult(state=state, context=context) + @tail_recursive def run( self, state: State = None, @@ -310,6 +315,8 @@ def run( if exc.state.is_pending() or exc.state.is_failed(): exc.state.cached_inputs = task_inputs or {} # type: ignore state = exc.state + except RecursiveCall as exc: + raise exc except Exception as exc: msg = "Task '{name}': unexpected error while running task: {exc}".format( @@ -1028,7 +1035,9 @@ def check_task_is_looping( ) context.update(task_run_version=prefect.context.get("task_run_version")) new_state = Pending(message=msg) - return self.run( + raise RecursiveCall( + self.run, + self, new_state, upstream_states=upstream_states, context=context, diff --git a/src/prefect/utilities/executors.py b/src/prefect/utilities/executors.py --- a/src/prefect/utilities/executors.py +++ b/src/prefect/utilities/executors.py @@ -286,3 +286,56 @@ def run_with_ctx(*args: Any, _ctx_dict: dict, **kwargs: Any) -> Any: return fut.result(timeout=timeout) except FutureTimeout: raise TimeoutError("Execution timed out.") + + +class RecursiveCall(Exception): + def __init__(self, func: Callable, *args: Any, **kwargs: Any): + self.func = func + self.args = args + self.kwargs = kwargs + + +def tail_recursive(func: Callable) -> Callable: + """ + Helper function to facilitate tail recursion of the wrapped function. + + This allows for recursion with unlimited depth since a stack is not allocated for + each "nested" call. Note: instead of calling the target function in question, a + `RecursiveCall` exception must be raised instead. + + Args: + - fn (callable): the function to execute + + Returns: + - the result of `f(*args, **kwargs)` + + Raises: + - RecursionError: if a recursive "call" (raised exception) is made with a function that is + not decorated with `tail_recursive` decorator. + """ + + @wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + while True: + try: + return func(*args, **kwargs) + except RecursiveCall as exc: + try: + call_func = getattr(exc.func, "__wrapped_func__") + except AttributeError: + raise RecursionError( + "function has not been wrapped to provide tail recursion (func={})".format( + exc.func + ) + ) + + # there may be multiple nested recursive calls, we should only respond to calls for the + # wrapped function explicitly, otherwise allow the call to continue to propagate + if call_func != func: + raise exc + args = exc.args + kwargs = exc.kwargs + continue + + setattr(wrapper, "__wrapped_func__", func) + return wrapper
LOOPing a Task depends on the recursion limit for Python ## Description *A clear description of the bug* Right now, Looping a task relies on recursion. This can cause the user to experience the following error: `RecursionError: maximum recursion depth exceeded in comparison`. Unless the user updates the system recursion limit for python (which may not be a great idea) the maximum number of times their task can loop is capped. ## Expected Behavior *What did you expect to happen instead?* I expected looping to support more than a limited loop count. ## Reproduction *A minimal example that exhibits the behavior.* ```python import prefect from prefect import task, Flow from prefect.engine.signals import LOOP @task def example(): loop_payload = prefect.context.get("task_loop_result", 0) if loop_payload < 4000: loop_payload += 1 raise LOOP(result=loop_payload) return loop_payload with Flow("Example") as flow: result = example() ``` I run into the following error: ```bash In [16]: flow.run() [2019-12-10 23:16:25,571] INFO - prefect.FlowRunner | Beginning Flow run for 'Example' [2019-12-10 23:16:25,575] INFO - prefect.FlowRunner | Starting flow run. [2019-12-10 23:16:25,583] INFO - prefect.TaskRunner | Task 'example': Starting task run... [2019-12-10 23:16:29,646] ERROR - prefect.TaskRunner | Task 'example': unexpected error while running task: RecursionError('maximum recursion depth exceeded in comparison') Traceback (most recent call last): File "/Users/dylanhughes/dev/prefect/src/prefect/engine/task_runner.py", line 229, in run with prefect.context(context): File "/Users/dylanhughes/miniconda3/envs/product_flows/lib/python3.7/contextlib.py", line 112, in __enter__ return next(self.gen) File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/context.py", line 111, in __call__ previous_context = self.copy() File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/collections.py", line 107, in copy return type(self)(self.__dict__.copy()) File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/context.py", line 78, in __init__ super().__init__(*args, **kwargs) File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/collections.py", line 62, in __init__ super().update(init_dict) File "/Users/dylanhughes/miniconda3/envs/product_flows/lib/python3.7/_collections_abc.py", line 839, in update if isinstance(other, Mapping): File "/Users/dylanhughes/miniconda3/envs/product_flows/lib/python3.7/abc.py", line 139, in __instancecheck__ return _abc_instancecheck(cls, instance) RecursionError: maximum recursion depth exceeded in comparison [2019-12-10 23:16:29,750] INFO - prefect.TaskRunner | Task 'example': finished task run for task with final state: 'Failed' [2019-12-10 23:16:29,753] INFO - prefect.TaskRunner | Task 'example': finished task run for task with final state: 'Failed' [2019-12-10 23:16:29,753] INFO - prefect.FlowRunner | Flow run FAILED: some reference tasks failed. Out[16]: <Failed: "Some reference tasks failed."> ``` ## Environment *Any additional information about your environment* Standard python environment (python 3.7 docker container)
Specific task run for reference https://cloud.prefect.io/prefect-qa2/task-run/fa0d004f-df37-40c4-a2b7-a71318ce0724
2019-12-17T15:21:34Z
[]
[]
Traceback (most recent call last): File "/Users/dylanhughes/dev/prefect/src/prefect/engine/task_runner.py", line 229, in run with prefect.context(context): File "/Users/dylanhughes/miniconda3/envs/product_flows/lib/python3.7/contextlib.py", line 112, in __enter__ return next(self.gen) File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/context.py", line 111, in __call__ previous_context = self.copy() File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/collections.py", line 107, in copy return type(self)(self.__dict__.copy()) File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/context.py", line 78, in __init__ super().__init__(*args, **kwargs) File "/Users/dylanhughes/dev/prefect/src/prefect/utilities/collections.py", line 62, in __init__ super().update(init_dict) File "/Users/dylanhughes/miniconda3/envs/product_flows/lib/python3.7/_collections_abc.py", line 839, in update if isinstance(other, Mapping): File "/Users/dylanhughes/miniconda3/envs/product_flows/lib/python3.7/abc.py", line 139, in __instancecheck__ return _abc_instancecheck(cls, instance) RecursionError: maximum recursion depth exceeded in comparison
554
PrefectHQ/prefect
PrefectHQ__prefect-1991
4aa4808648dd8c49d4a2aa35417fcc277f1e5d56
diff --git a/src/prefect/tasks/control_flow/conditional.py b/src/prefect/tasks/control_flow/conditional.py --- a/src/prefect/tasks/control_flow/conditional.py +++ b/src/prefect/tasks/control_flow/conditional.py @@ -110,7 +110,11 @@ def ifelse(condition: Task, true_task: Task, false_task: Task) -> None: - false_task (Task): a task that will be executed if the condition is False """ - switch(condition=condition, cases={True: true_task, False: false_task}) + @prefect.task + def as_bool(x): + return bool(x) + + switch(condition=as_bool(condition), cases={True: true_task, False: false_task}) def merge(*tasks: Task) -> Task:
ifelse checks for True/False rather than truthy/falsy values ## Description `prefect.tasks.control_flow.conditional.ifelse` should check for truthy/falsy values, but (relying on `switch`) checks for exact equality to `True` or `False`. ## Expected Behavior [From the docs](https://docs.prefect.io/core/task_library/control_flow.html#if-else): > If the condition evaluates True(ish), the true_task will run. If it evaluates False(ish), the false_task will run. `ifelse` should run the `true_branch` for any value that evaluates to `True`: non-empty strings, dicts and lists, ints not equal to 0... ## Reproduction ```python from prefect import Flow, task from prefect.tasks.control_flow.conditional import ifelse, merge @task def run_if_truthy(): return 'a' @task def run_if_falsy(): return 'b' @task def return_truthy_value(): # non-empty strings are truthy assert('c') return 'c' with Flow('test-flow') as flow: branch_truthy = run_if_truthy() branch_falsy = run_if_falsy() ifelse(return_truthy_value(), branch_truthy, branch_falsy) merged_result = merge(branch_truthy, branch_falsy) result = flow.run() assert(not result.result.get(merged_result).is_skipped()) assert(result.result.get(merged_result)._result.value == 'a') ``` Output: ``` [2020-02-03 16:38:57,428] INFO - prefect.FlowRunner | Beginning Flow run for 'test-flow' [2020-02-03 16:38:57,431] INFO - prefect.FlowRunner | Starting flow run. [2020-02-03 16:38:57,441] INFO - prefect.TaskRunner | Task 'return_truthy_value': Starting task run... [2020-02-03 16:38:57,445] INFO - prefect.TaskRunner | Task 'return_truthy_value': finished task run for task with final state: 'Success' [2020-02-03 16:38:57,455] INFO - prefect.TaskRunner | Task 'CompareValue: "True"': Starting task run... [2020-02-03 16:38:57,460] INFO - prefect.TaskRunner | Task 'CompareValue: "True"': finished task run for task with final state: 'Skipped' [2020-02-03 16:38:57,470] INFO - prefect.TaskRunner | Task 'run_if_truthy': Starting task run... [2020-02-03 16:38:57,474] INFO - prefect.TaskRunner | Task 'run_if_truthy': finished task run for task with final state: 'Skipped' [2020-02-03 16:38:57,483] INFO - prefect.TaskRunner | Task 'CompareValue: "False"': Starting task run... [2020-02-03 16:38:57,488] INFO - prefect.TaskRunner | Task 'CompareValue: "False"': finished task run for task with final state: 'Skipped' [2020-02-03 16:38:57,497] INFO - prefect.TaskRunner | Task 'run_if_falsy': Starting task run... [2020-02-03 16:38:57,501] INFO - prefect.TaskRunner | Task 'run_if_falsy': finished task run for task with final state: 'Skipped' [2020-02-03 16:38:57,510] INFO - prefect.TaskRunner | Task 'Merge': Starting task run... [2020-02-03 16:38:57,514] INFO - prefect.TaskRunner | Task 'Merge': finished task run for task with final state: 'Skipped' [2020-02-03 16:38:57,516] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded Traceback (most recent call last): File "/tmp/test-flow.py", line 25, in <module> assert(not result.result.get(merged_result).is_skipped()) AssertionError ``` ## Environment Prefect 0.9.2, Python 3.6, on Linux x64.
2020-02-07T02:01:29Z
[]
[]
Traceback (most recent call last): File "/tmp/test-flow.py", line 25, in <module> assert(not result.result.get(merged_result).is_skipped()) AssertionError
573
PrefectHQ/prefect
PrefectHQ__prefect-2047
e3e540e2bb18e3454016d06c698e086770b6ad36
diff --git a/src/prefect/engine/executors/dask.py b/src/prefect/engine/executors/dask.py --- a/src/prefect/engine/executors/dask.py +++ b/src/prefect/engine/executors/dask.py @@ -33,8 +33,10 @@ class DaskExecutor(Executor): Defaults to `False`. - debug (bool, optional): whether to operate in debug mode; `debug=True` will produce many additional dask logs. Defaults to the `debug` value in your Prefect configuration - - **kwargs (dict, optional): additional kwargs to be passed to the - `dask.distributed.Client` upon initialization (e.g., `n_workers`) + - **kwargs (dict, optional): additional kwargs to be passed to the [`dask.distributed.Client`](https://distributed.dask.org/en/latest/api.html#client) upon + initialization (e.g., `n_workers`, `security`, etc.), which will also pass any unmatched kwargs down to child objects such as + [`distributed.deploy.local.LocalCluster`](https://docs.dask.org/en/latest/setup/single-distributed.html#distributed.deploy.local.LocalCluster). + Please see the Dask docs to see all of the options that child objects will respond to. """ def __init__(
DaskExecutor.address doesn't work with Dask Gateway proxy ## Description Hello! I'm attempting to run a Prefect flow with DaskExecutor connected to a Dask cluster that was created using [Dask Gateway.](https://github.com/dask/dask-gateway) This raises an SSL error, however it could have something to do with my DG implementation. DG is relatively new, so I'm wondering if it has been tested with Prefect? Thanks! ## Expected Behavior *What did you expect to happen instead?* Flow to run as it normally would with a DaskExecutor. ## Reproduction ``` from dask_gateway import Gateway from prefect.engine.executors import DaskExecutor from prefect import task, Flow import datetime import random from time import sleep @task def inc(x): sleep(random.random() / 10) return x + 1 with Flow("dask-example") as flow: incs = inc.map(x=range(100)) gateway = Gateway() cluster = gateway.new_cluster() cluster.scale(4) # Example scheduler address from DG: 'gateway://dask-scheduler-proxy.<fqdn>:443/<hash from dg>' executor = DaskExecutor(address=cluster.scheduler_address) flow.run(executor=executor) ``` Error: ``` [2020-01-28 19:17:47,571] INFO - prefect.FlowRunner | Beginning Flow run for 'dask-example' [2020-01-28 19:17:47,574] INFO - prefect.FlowRunner | Starting flow run. [2020-01-28 19:17:47,578] ERROR - prefect.FlowRunner | Unexpected error: TypeError('Gateway expects a `ssl_context` argument of type ssl.SSLContext, instead got None') Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/flow_runner.py", line 400, in get_flow_run_state with executor.start(): File "/opt/conda/lib/python3.7/contextlib.py", line 112, in __enter__ return next(self.gen) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/executors/dask.py", line 75, in start with Client(self.address, **self.kwargs) as client: File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 728, in __init__ self.start(timeout=timeout) File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 893, in start sync(self.loop, self._start, **kwargs) File "/opt/conda/lib/python3.7/site-packages/distributed/utils.py", line 335, in sync raise exc.with_traceback(tb) File "/opt/conda/lib/python3.7/site-packages/distributed/utils.py", line 319, in f result[0] = yield future File "/opt/conda/lib/python3.7/site-packages/tornado/gen.py", line 735, in run value = future.result() File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 986, in _start await self._ensure_connected(timeout=timeout) File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 1043, in _ensure_connected connection_args=self.connection_args, File "/opt/conda/lib/python3.7/site-packages/distributed/comm/core.py", line 218, in connect quiet_exceptions=EnvironmentError, File "/opt/conda/lib/python3.7/site-packages/dask_gateway/comm.py", line 41, in connect "ssl.SSLContext, instead got %s" % ctx TypeError: Gateway expects a `ssl_context` argument of type ssl.SSLContext, instead got None [2020-01-28 19:17:47,584] ERROR - prefect.Flow: dask-example | Unexpected error occured in FlowRunner: TypeError('Gateway expects a `ssl_context` argument of type ssl.SSLContext, instead got None') <Failed: "Unexpected error: TypeError('Gateway expects a `ssl_context` argument of type ssl.SSLContext, instead got None')"> ``` ## Environment Dask cluster running on Kubernetes managed with Dask Gateway.
Successful connection and execution of `prefect` flow by passing in the `cluster.security` attribute as a `kwarg`: ``` executor = DaskExecutor(address=cluster.scheduler_address, security=cluster.security) flow.run(executor=executor) [2020-01-28 21:10:04,687] INFO - prefect.FlowRunner | Beginning Flow run for 'dask-example' [2020-01-28 21:10:04,690] INFO - prefect.FlowRunner | Starting flow run. [2020-01-28 21:10:07,820] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded <Success: "All reference tasks succeeded."> ``` Great! @cicdw You may want to have someone add a note to the docs on how to do this.
2020-02-18T21:33:05Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/flow_runner.py", line 400, in get_flow_run_state with executor.start(): File "/opt/conda/lib/python3.7/contextlib.py", line 112, in __enter__ return next(self.gen) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/executors/dask.py", line 75, in start with Client(self.address, **self.kwargs) as client: File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 728, in __init__ self.start(timeout=timeout) File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 893, in start sync(self.loop, self._start, **kwargs) File "/opt/conda/lib/python3.7/site-packages/distributed/utils.py", line 335, in sync raise exc.with_traceback(tb) File "/opt/conda/lib/python3.7/site-packages/distributed/utils.py", line 319, in f result[0] = yield future File "/opt/conda/lib/python3.7/site-packages/tornado/gen.py", line 735, in run value = future.result() File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 986, in _start await self._ensure_connected(timeout=timeout) File "/opt/conda/lib/python3.7/site-packages/distributed/client.py", line 1043, in _ensure_connected connection_args=self.connection_args, File "/opt/conda/lib/python3.7/site-packages/distributed/comm/core.py", line 218, in connect quiet_exceptions=EnvironmentError, File "/opt/conda/lib/python3.7/site-packages/dask_gateway/comm.py", line 41, in connect "ssl.SSLContext, instead got %s" % ctx TypeError: Gateway expects a `ssl_context` argument of type ssl.SSLContext, instead got None
580
PrefectHQ/prefect
PrefectHQ__prefect-2136
974625cfcb6bbd317afa36f320f6fe0575bdba54
diff --git a/src/prefect/engine/result_handlers/s3_result_handler.py b/src/prefect/engine/result_handlers/s3_result_handler.py --- a/src/prefect/engine/result_handlers/s3_result_handler.py +++ b/src/prefect/engine/result_handlers/s3_result_handler.py @@ -7,6 +7,7 @@ import cloudpickle import pendulum +import prefect from prefect.client import Secret from prefect.engine.result_handlers import ResultHandler @@ -54,7 +55,10 @@ def initialize_client(self) -> None: aws_access_key = aws_credentials["ACCESS_KEY"] aws_secret_access_key = aws_credentials["SECRET_ACCESS_KEY"] - s3_client = boto3.client( + # use a new boto session when initializing in case we are in a new thread + # see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?#multithreading-multiprocessing + session = boto3.session.Session() + s3_client = session.client( "s3", aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_access_key, @@ -63,8 +67,13 @@ def initialize_client(self) -> None: @property def client(self) -> "boto3.client": - if not hasattr(self, "_client"): + """ + Initializes a client if we believe we are in a new thread. + We consider ourselves in a new thread if we haven't stored a client yet in the current context. + """ + if not prefect.context.get("boto3client"): self.initialize_client() + prefect.context["boto3client"] = self._client return self._client @client.setter
Flow S3ResultHandler Fails for Dask Worker with nthreads > 1 ## Description Specifying S3ResultHandler for a Flow running on Dask worker(s) with nthreads > 1 fails with: `KeyError: 'credential_provider'`, likely due to a race condition in using the global boto3 session (boto3.client) between threads. ## Expected Behavior In a multithreaded environment, boto3 recommends creating a session per thread rather than sharing the default boto3 session, i.e. boto3.client. See boto3 documentation at: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing This thread in Prefect's Community Slack describes using this session-per-thread approach to successfully fix a similar issue when using boto3 in Prefect tasks: https://prefect-community.slack.com/archives/CM28LL405/p1581434710167100 ## Reproduction A Flow with tasks that can run in parallel (e.g. mapped tasks or different Flow branches) and where the Flow-level result_handler is set to S3ResultHandler should reproduce this behavior. Full stack trace: ``` February 29th 2020 at 8:09:43am | prefect.CloudTaskRunner ERROR Failed to set task state with error: KeyError('credential_provider') Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/task_runner.py", line 117, in call_runner_target_handlers cloud_state = prepare_state_for_cloud(new_state) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/utilities.py", line 21, in prepare_state_for_cloud res.store_safe_value() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result.py", line 93, in store_safe_value value = self.result_handler.write(self.value) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 103, in write self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 67, in client self.initialize_client() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 60, in initialize_client aws_secret_access_key=aws_secret_access_key, File "/opt/conda/lib/python3.7/site-packages/boto3/__init__.py", line 91, in client return _get_default_session().client(*args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/boto3/session.py", line 263, in client aws_session_token=aws_session_token, config=config) File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 823, in create_client credentials = self.get_credentials() File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 428, in get_credentials 'credential_provider').load_credentials() File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 923, in get_component del self._deferred[name] KeyError: 'credential_provider' ``` ## Environment We create a long-running Dask cluster where our Dask workers are started with --nprocs 1 --nthreads 3. (Thanks to @JLouSRM for identifying this issue and capturing log evidence!)
Very interesting - thanks for the issue! For reference, [this is the boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing) referenced in that Slack thread, and Adam said: > By first creating a session and then creating the client from the session, each thread has a different session. As an aside, this could also motivate introducing a new [S3FS](https://github.com/dask/s3fs) Result Handler (similar to the discussion in https://github.com/PrefectHQ/prefect/issues/1475).
2020-03-09T22:03:19Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/task_runner.py", line 117, in call_runner_target_handlers cloud_state = prepare_state_for_cloud(new_state) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/utilities.py", line 21, in prepare_state_for_cloud res.store_safe_value() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result.py", line 93, in store_safe_value value = self.result_handler.write(self.value) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 103, in write self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 67, in client self.initialize_client() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 60, in initialize_client aws_secret_access_key=aws_secret_access_key, File "/opt/conda/lib/python3.7/site-packages/boto3/__init__.py", line 91, in client return _get_default_session().client(*args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/boto3/session.py", line 263, in client aws_session_token=aws_session_token, config=config) File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 823, in create_client credentials = self.get_credentials() File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 428, in get_credentials 'credential_provider').load_credentials() File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 923, in get_component del self._deferred[name] KeyError: 'credential_provider'
591
PrefectHQ/prefect
PrefectHQ__prefect-2205
f3717b1a5b3625efe919c1c0c9f1a8e938b3b39d
diff --git a/src/prefect/engine/result_handlers/s3_result_handler.py b/src/prefect/engine/result_handlers/s3_result_handler.py --- a/src/prefect/engine/result_handlers/s3_result_handler.py +++ b/src/prefect/engine/result_handlers/s3_result_handler.py @@ -36,6 +36,7 @@ class S3ResultHandler(ResultHandler): def __init__(self, bucket: str, aws_credentials_secret: str = None) -> None: self.bucket = bucket self.aws_credentials_secret = aws_credentials_secret + self._client = None super().__init__() def initialize_client(self) -> None: @@ -71,9 +72,10 @@ def client(self) -> "boto3.client": Initializes a client if we believe we are in a new thread. We consider ourselves in a new thread if we haven't stored a client yet in the current context. """ - if not prefect.context.get("boto3client"): + if not prefect.context.get("boto3client") or not self._client: self.initialize_client() prefect.context["boto3client"] = self._client + return self._client @client.setter
S3ResultHandler fails during Cloud Flow Run ## Description Running on `0.9.8` we've observed the S3ResultHandler failing with `AttributeError: 'S3ResultHandler' object has no attribute '_client'`. This was also reported by another user in the Prefect Community Slack. (See thread: https://prefect-community.slack.com/archives/CL09KU1K7/p1584980231422300) For now we are working around this by disabling checkpointing in all of our tasks, but this will obviously prevent tasks from being able to retry, etc. ## Expected Behavior S3ResultHandler should correctly checkpoint tasks to store outputs in an S3 bucket. ## Reproduction The following is a stub Flow that recreates the issue: ``` import prefect from prefect import Flow from prefect.tasks.shell import ShellTask from prefect.environments.storage import S3 from prefect.engine.result_handlers import S3ResultHandler env = <an environment, we use our own secure Dask environment for TLS by extending RemoteEnvironment> BUCKET = "<redacted>" s3_storage = S3(bucket=BUCKET) rh = S3ResultHandler(BUCKET) shell_task = ShellTask(name="shell", return_all=True) with Flow("RecreateS3ResultHandlerIssue", environment=env, storage=s3_storage, result_handler=rh) as flow: final_cmd = ["ls","cd;ls"] embulk_results = shelltask.map(command=final_cmd) s3_storage.build() flow.register(project_name="Test Project 1") # Now run Flow from Cloud either via UI or API ``` The Flow run will fail with this full stack trace: ``` Unexpected error: AttributeError("'S3ResultHandler' object has no attribute '_client'") Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 925, in get_task_run_state state._result.store_safe_value() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result.py", line 121, in store_safe_value value = self.result_handler.write(self.value) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 112, in write self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 77, in client return self._client AttributeError: 'S3ResultHandler' object has no attribute '_client' ``` ## Environment We use a long-running Dask cluster on k8s via AWS EKS. (Hat tip to @JLouSRM for identifying this issue.)
Hi! I will take a look to reproduce this evening, I changed the S3ResultHandler in 0.9.8 re: trying to thread safe the boto3 client, so I'm the one who probably broke it. Looking at it I think there's a chance the `client` property is retrieved without `self._client` being set prior. https://github.com/PrefectHQ/prefect/blob/master/src/prefect/engine/result_handlers/s3_result_handler.py#L77 Possibly defining `self._client = None` in the `__init__` would do the trick
2020-03-27T19:06:20Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 925, in get_task_run_state state._result.store_safe_value() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result.py", line 121, in store_safe_value value = self.result_handler.write(self.value) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 112, in write self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 77, in client return self._client AttributeError: 'S3ResultHandler' object has no attribute '_client'
606
PrefectHQ/prefect
PrefectHQ__prefect-2233
bdf152392320be34c77bb9886a2cf876e52f5f93
diff --git a/src/prefect/engine/result_handlers/s3_result_handler.py b/src/prefect/engine/result_handlers/s3_result_handler.py --- a/src/prefect/engine/result_handlers/s3_result_handler.py +++ b/src/prefect/engine/result_handlers/s3_result_handler.py @@ -72,7 +72,7 @@ def client(self) -> "boto3.client": Initializes a client if we believe we are in a new thread. We consider ourselves in a new thread if we haven't stored a client yet in the current context. """ - if not prefect.context.get("boto3client") or not self._client: + if not prefect.context.get("boto3client") or not getattr(self, "_client", None): self.initialize_client() prefect.context["boto3client"] = self._client
S3ResultHandler still failing on 0.10.0 ## Description We're still seeing https://github.com/PrefectHQ/prefect/issues/2204 after upgrading to `0.10.0`. We do see the code change from https://github.com/PrefectHQ/prefect/pull/2205 in the stack trace: ``` Unexpected error: AttributeError("'S3ResultHandler' object has no attribute '_client'") Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 925, in get_task_run_state state._result.store_safe_value() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result/base.py", line 127, in store_safe_value value = self.result_handler.write(self.value) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 114, in write self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 75, in client if not prefect.context.get("boto3client") or not self._client: AttributeError: 'S3ResultHandler' object has no attribute '_client' ``` Just eyeballing it, maybe hasattr() will fix it, i.e. `if not prefect.context.get("boto3client") or not hasattr(self, "_client"):` will fix it. ## Expected Behavior S3ResultHandler should checkpoint task outputs. ## Reproduction See previous issue. ## Environment We're running the Flow via Cloud with S3 storage.
2020-03-31T16:56:46Z
[]
[]
Traceback (most recent call last): File "/opt/conda/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 925, in get_task_run_state state._result.store_safe_value() File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result/base.py", line 127, in store_safe_value value = self.result_handler.write(self.value) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 114, in write self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri) File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 75, in client if not prefect.context.get("boto3client") or not self._client: AttributeError: 'S3ResultHandler' object has no attribute '_client'
610
PrefectHQ/prefect
PrefectHQ__prefect-2337
1babcb7f38d1ff5a8e7eeec06a2ae7bbe7eeb89b
diff --git a/src/prefect/cli/auth.py b/src/prefect/cli/auth.py --- a/src/prefect/cli/auth.py +++ b/src/prefect/cli/auth.py @@ -182,17 +182,17 @@ def switch_tenants(id, slug): @auth.command(hidden=True) @click.option("--name", "-n", required=True, help="A token name.", hidden=True) -@click.option("--role", "-r", required=True, help="A token role.", hidden=True) -def create_token(name, role): +@click.option("--scope", "-s", required=True, help="A token scopre.", hidden=True) +def create_token(name, scope): """ Create a Prefect Cloud API token. - For more info on API tokens visit https://docs.prefect.io/cloud/concepts/api.html + For more info on API tokens visit https://docs.prefect.io/orchestration/concepts/api.html \b Options: --name, -n TEXT A name to give the generated token - --role, -r TEXT A role for the token + --scope, -r TEXT A scope for the token """ check_override_auth_token() @@ -204,7 +204,7 @@ def create_token(name, role): "create_api_token(input: $input)": {"token"} } }, - variables=dict(input=dict(name=name, role=role)), + variables=dict(input=dict(name=name, scope=scope)), ) if not output.get("data", None):
HTTPError attempting to retrieve a runner token from the CLI ## Description Attempting to follow the docs, I tried to create a runner token from the command line after successfully logging in with a user token. I was greeted with an HTTPError (400) ``` (default) prefect auth login -t $(cat token) Login successful! (default) prefect auth create-token -n my-runner-token -r RUNNER Traceback (most recent call last): File "/opt/continuum/anaconda/envs/default/bin/prefect", line 6, in <module> exit(cli()) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/cli/auth.py", line 201, in create_token output = client.graphql( File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/client/client.py", line 212, in graphql result = self.post( File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/client/client.py", line 171, in post response = self._request( File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/client/client.py", line 314, in _request response.raise_for_status() File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/requests/models.py", line 941, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.prefect.io/graphql/alpha ``` ## Expected Behavior I expected to get a token ## Reproduction ``` conda create -n default -c defaults -c conda-forge prefect source activate prefect prefect auth login -t <TOKEN> prefect auth create-token -n my-runner-token -r RUNNER ``` ## Environment Conda environment: ``` # This file may be used to create an environment using: # $ conda create --name <env> --file <this file> # platform: linux-64 _libgcc_mutex=0.1=main appdirs=1.4.3=pyh91ea838_0 asn1crypto=1.3.0=py38_0 blas=1.0=mkl bokeh=2.0.1=py38_0 ca-certificates=2020.1.1=0 certifi=2020.4.5.1=py38_0 cffi=1.14.0=py38h2e261b9_0 chardet=3.0.4=py38_1003 click=7.1.1=py_0 cloudpickle=1.2.2=py_0 croniter=0.3.30=py_0 cryptography=2.8=py38h1ba5d50_0 cytoolz=0.10.1=py38h7b6447c_0 dask=2.14.0=py_0 dask-core=2.14.0=py_0 distributed=2.14.0=py38_0 docker-py=4.2.0=py38_0 docker-pycreds=0.4.0=py_0 freetype=2.9.1=h8a8886c_1 fsspec=0.7.1=py_0 heapdict=1.0.1=py_0 idna=2.9=py_1 intel-openmp=2020.0=166 jinja2=2.11.1=py_0 jpeg=9b=h024ee3a_2 ld_impl_linux-64=2.33.1=h53a641e_7 libedit=3.1.20181209=hc058e9b_0 libffi=3.2.1=hd88cf55_4 libgcc-ng=9.1.0=hdf63c60_0 libgfortran-ng=7.3.0=hdf63c60_0 libpng=1.6.37=hbc83047_0 libstdcxx-ng=9.1.0=hdf63c60_0 libtiff=4.1.0=h2733197_0 locket=0.2.0=py38_1 markupsafe=1.1.1=py38h7b6447c_0 marshmallow=3.5.1=py_0 marshmallow-oneofschema=2.0.1=py_0 mkl=2020.0=166 mkl-service=2.3.0=py38he904b0f_0 mkl_fft=1.0.15=py38ha843d7b_0 mkl_random=1.1.0=py38h962f231_0 msgpack-python=1.0.0=py38hfd86e86_1 mypy_extensions=0.4.3=py38_0 ncurses=6.2=he6710b0_0 numpy=1.18.1=py38h4f9e942_0 numpy-base=1.18.1=py38hde5b4d6_1 olefile=0.46=py_0 openssl=1.1.1f=h7b6447c_0 packaging=20.3=py_0 pandas=1.0.3=py38h0573a6f_0 partd=1.1.0=py_0 pendulum=2.1.0=py38_1 pillow=7.0.0=py38hb39fc2d_0 pip=20.0.2=py38_1 prefect=0.10.2=py_0 psutil=5.7.0=py38h7b6447c_0 pycparser=2.20=py_0 pyopenssl=19.1.0=py38_0 pyparsing=2.4.6=py_0 pysocks=1.7.1=py38_0 python=3.8.2=hcf32534_0 python-box=4.2.2=py_0 python-dateutil=2.8.1=py_0 python-slugify=3.0.4=py_0 pytz=2019.3=py_0 pytzdata=2019.3=py_0 pyyaml=5.3.1=py38h7b6447c_0 readline=8.0=h7b6447c_0 requests=2.23.0=py38_0 ruamel.yaml=0.16.5=py38h7b6447c_1 ruamel.yaml.clib=0.2.0=py38h7b6447c_0 setuptools=46.1.3=py38_0 six=1.14.0=py38_0 sortedcontainers=2.1.0=py38_0 sqlite=3.31.1=h7b6447c_0 tabulate=0.8.3=py38_0 tblib=1.6.0=py_0 text-unidecode=1.3=py_0 tk=8.6.8=hbc83047_0 toml=0.10.0=pyh91ea838_0 toolz=0.10.0=py_0 tornado=6.0.4=py38h7b6447c_1 typing_extensions=3.7.4.1=py38_0 unidecode=1.1.1=py_0 urllib3=1.25.8=py38_0 websocket-client=0.56.0=py38_0 wheel=0.34.2=py38_0 xz=5.2.4=h14c3975_4 yaml=0.1.7=had09818_2 zict=2.0.0=py_0 zlib=1.2.11=h7b6447c_3 zstd=1.3.7=h0b5b093_0 ``` Prefect diagnostics: ``` { "config_overrides": {}, "env_vars": [], "system_information": { "platform": "Linux-3.10.0-957.21.3.el7.x86_64-x86_64-with-glibc2.10", "prefect_version": "0.10.2", "python_version": "3.8.2" } } ```
I should point out that a runner token retrieved from the UI works fine. Thank for opening @mcg1969! I am able to reproduce, looks to be a change in the input to the mutation that this command needs to be adjusted for. Will make the change
2020-04-15T16:08:04Z
[]
[]
Traceback (most recent call last): File "/opt/continuum/anaconda/envs/default/bin/prefect", line 6, in <module> exit(cli()) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/cli/auth.py", line 201, in create_token output = client.graphql( File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/client/client.py", line 212, in graphql result = self.post( File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/client/client.py", line 171, in post response = self._request( File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/prefect/client/client.py", line 314, in _request response.raise_for_status() File "/opt/continuum/anaconda/envs/default/lib/python3.8/site-packages/requests/models.py", line 941, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.prefect.io/graphql/alpha
621
PrefectHQ/prefect
PrefectHQ__prefect-2502
58126ed79fa90c0a3d682e2074c9c96b0887cfbd
diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py --- a/src/prefect/engine/task_runner.py +++ b/src/prefect/engine/task_runner.py @@ -637,9 +637,11 @@ def check_task_is_cached(self, state: State, inputs: Dict[str, Result]) -> State state = Pending("Cache was invalid; ready to run.") if self.task.cache_for is not None: - candidate_states = prefect.context.caches.get( - self.task.cache_key or self.task.name, [] - ) + candidate_states = [] + if prefect.context.get("caches"): + candidate_states = prefect.context.caches.get( + self.task.cache_key or self.task.name, [] + ) sanitized_inputs = {key: res.value for key, res in inputs.items()} for candidate in candidate_states: if self.task.cache_validator(
FlowRunner manages context cache wrongly ## Description AttributeError and different behavior of output caching between Flow.run and FlowRunner.run. ## Expected Behavior No AttributeError, same behavior and cached result reuse of FlowRunner.run after Flow.run. ## Reproduction ```python from datetime import timedelta import random from prefect import task, Flow from prefect.engine import FlowRunner from prefect.engine.cache_validators import duration_only @task(cache_for=timedelta(seconds=10), cache_validator=duration_only) def rand_inc(r, x): rand = random.randint(0, r) print("RAND", rand) return rand + x with Flow("Cache") as f: a1 = rand_inc(10, 0) # this fails: runner = FlowRunner(f) state1 = runner.run() # this would pass: #f.run() ``` Traceback: ``` Traceback (most recent call last): File "/Users/dafcok/miniconda3/lib/python3.6/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "~/miniconda3/lib/python3.6/site-packages/prefect/engine/task_runner.py", line 631, in check_task_is_cached candidate_states = prefect.context.caches.get( AttributeError: 'Context' object has no attribute 'caches' ``` Moreover, if you `f.run()` once before `runner.run()`, cache validation always logs `Task 'rand_inc': can't use cache because it is now invalid`. ## Environment ``` { "config_overrides": {}, "env_vars": [], "system_information": { "platform": "Darwin-19.0.0-x86_64-i386-64bit", "prefect_version": "0.10.4", "python_version": "3.6.8" } } ```
Looking for some thoughts on this. What's happening here is the `flow.run` function sets a global `caches` block in context (among other things) https://github.com/PrefectHQ/prefect/blob/6d141372cf89064d24bbafad582a9db26be0cbd5/src/prefect/core/flow.py#L1024-L1025 And then is uses flow runners to run that flow where each flow runner would have access to that `context.caches`. Now running the flow directly from the flow runner takes it a level deeper, below that context block. This means that if the flow runner is updated to make this `context.caches` in a similar way to the `flow.run` then the cache will not persist across runs since the flow runner is responsible for only a single run. We could add this in there with the knowledge that it will proceed without error but in calling something like ``` runner = FlowRunner(f) state1 = runner.run() state2 = runner.run() ``` the second run will not use the cache from the first run. I think one design goal could be that FlowRunner.run() is guaranteed to not modify global state, caches or otherwise. Is that already the case? If yes, passing `context=my_dict` could explicitly modify `my_dict` which can be used for other runs. In the long run, I'm not sure if in-memory caching should become obsolete after #2394
2020-05-06T15:46:49Z
[]
[]
Traceback (most recent call last): File "/Users/dafcok/miniconda3/lib/python3.6/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "~/miniconda3/lib/python3.6/site-packages/prefect/engine/task_runner.py", line 631, in check_task_is_cached candidate_states = prefect.context.caches.get( AttributeError: 'Context' object has no attribute 'caches'
651
PrefectHQ/prefect
PrefectHQ__prefect-2570
d3305a7dd590ad1dee4bb85a18ddbefadfa7531c
diff --git a/src/prefect/agent/agent.py b/src/prefect/agent/agent.py --- a/src/prefect/agent/agent.py +++ b/src/prefect/agent/agent.py @@ -98,9 +98,11 @@ def __init__( no_cloud_logs: bool = False, ) -> None: self.name = name or config.cloud.agent.get("name", "agent") - self.labels = list( - labels or ast.literal_eval(config.cloud.agent.get("labels", "[]")) - ) + + self.labels = labels or config.cloud.agent.get("labels", []) + # quick hack in case config has not been evaluated to a list yet + if isinstance(self.labels, str): + self.labels = ast.literal_eval(self.labels) self.env_vars = env_vars or config.cloud.agent.get("env_vars", dict()) self.max_polls = max_polls self.log_to_cloud = False if no_cloud_logs else True @@ -166,7 +168,7 @@ def _register_agent(self) -> str: - The agent ID as a string """ agent_id = self.client.register_agent( - agent_type=type(self).__name__, name=self.name, labels=self.labels + agent_type=type(self).__name__, name=self.name, labels=self.labels # type: ignore ) self.logger.debug(f"Agent ID: {agent_id}")
Kubernetes Agent Failing to Parse Labels (version 0.11.0) ## Description *A clear description of the bug* It appears the kubernetes agent in version 0.11.0 is failing to read labels (possibly tied to https://github.com/PrefectHQ/prefect/pull/2558) with the following error: ``` (dw_kube) dylanhughes@Dylans-MacBook-Pro-Prefect ~> kubectl get po 1 NAME READY STATUS RESTARTS AGE prefect-agent-74d7947c44-dsxxw 1/2 CrashLoopBackOff 5 5m14s (dw_kube) dylanhughes@Dylans-MacBook-Pro-Prefect ~> kubectl logs prefect-agent-74d7947c44-dsxxw agent 1 Traceback (most recent call last): File "/usr/local/bin/prefect", line 8, in <module> sys.exit(cli()) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/click/decorators.py", line 21, in new_func return f(get_current_context(), *args, **kwargs) File "/usr/local/lib/python3.6/site-packages/prefect/cli/agent.py", line 278, in start agent_address=agent_address, File "/usr/local/lib/python3.6/site-packages/prefect/agent/kubernetes/agent.py", line 67, in __init__ no_cloud_logs=no_cloud_logs, File "/usr/local/lib/python3.6/site-packages/prefect/agent/agent.py", line 102, in __init__ labels or ast.literal_eval(config.cloud.agent.get("labels", "[]")) File "/usr/local/lib/python3.6/ast.py", line 85, in literal_eval return _convert(node_or_string) File "/usr/local/lib/python3.6/ast.py", line 84, in _convert raise ValueError('malformed node or string: ' + repr(node)) ValueError: malformed node or string: <BoxList: ['prefect-data-warehouse']> (dw_kube) dylanhughes@Dylans-MacBook-Pro-Prefect ~> prefect version 0.11.0 ``` ## Reproduction *A minimal example that exhibits the behavior.* While running version 0.11.0 run: ``` prefect agent install kubernetes -t TOKEN --label prefect-data-warehouse --rbac --resource-manager | kubectl apply -f - ``` The deployment will come up but the pod will die with the above error. ## Environment *Any additional information about your environment* Not sure it's relevant given above error *Optionally run `prefect diagnostics` from the command line and paste the information here*
Looks like there's a double `literal_eval` happening. Looking into it
2020-05-15T15:22:11Z
[]
[]
Traceback (most recent call last): File "/usr/local/bin/prefect", line 8, in <module> sys.exit(cli()) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.6/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/click/decorators.py", line 21, in new_func return f(get_current_context(), *args, **kwargs) File "/usr/local/lib/python3.6/site-packages/prefect/cli/agent.py", line 278, in start agent_address=agent_address, File "/usr/local/lib/python3.6/site-packages/prefect/agent/kubernetes/agent.py", line 67, in __init__ no_cloud_logs=no_cloud_logs, File "/usr/local/lib/python3.6/site-packages/prefect/agent/agent.py", line 102, in __init__ labels or ast.literal_eval(config.cloud.agent.get("labels", "[]")) File "/usr/local/lib/python3.6/ast.py", line 85, in literal_eval return _convert(node_or_string) File "/usr/local/lib/python3.6/ast.py", line 84, in _convert raise ValueError('malformed node or string: ' + repr(node)) ValueError: malformed node or string: <BoxList: ['prefect-data-warehouse']>
662
PrefectHQ/prefect
PrefectHQ__prefect-2594
2032c445521f223bc8569715fcb35f0b339a8210
diff --git a/src/prefect/engine/results/s3_result.py b/src/prefect/engine/results/s3_result.py --- a/src/prefect/engine/results/s3_result.py +++ b/src/prefect/engine/results/s3_result.py @@ -167,7 +167,7 @@ def exists(self, location: str, **kwargs: Any) -> bool: Bucket=self.bucket, Key=location.format(**kwargs) ).load() except botocore.exceptions.ClientError as exc: - if exc.response["Error"]["Code"] == "404": + if exc.response["Error"]["Code"] == "NoSuchKey": return False raise except Exception as exc:
S3Result with target raises error ## Archived from the [Prefect Public Slack Community](https://join.slack.com/t/prefect-public/shared_invite/enQtNzE5OTU3OTQwNzc1LTQ5M2FkZmQzZjI0ODg1ZTBmOTc0ZjVjYWFjMWExZDAyYzBmYjVmMTE1NTQ1Y2IxZTllOTc4MmI3NzYxMDlhYWU) **livni.itay**: Hi - I am working with `S3Result` and receiving a ``` botocore.errorfactory.NoSuchKey: An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist ``` Which upon further research - it can be anything including a permission error. (I tried different buckets with settings) The credentials are stored as AWS_CREDENTIALS in prefect cloud. With the config.toml set to use cloud secrets ``` [cloud] use_local_secrets = false ``` Switching back to `result_handler` argument with `S3Result` subclass *did work,* . And combining `result handler` with `target` does not. Is there something different in the way that credentials are handled between `result` and `result_handler`? The new prefect is really nice :slightly_smiling_face: **chris**: Hi itay - could you share the code you used to initialize the `result_handler` and the `result`? **livni.itay**: `tsx_imb_res = S3Result(bucket="tsx-moc-bcp")` **livni.itay**: ``` @task( max_retries=3, retry_delay=timedelta(seconds=1), # In production this will be change to 20 minutes result_handler=tsx_imb_res, target="{task_name}-{today}", state_handlers=[imb_handler, error_handler] ) ``` **livni.itay**: Works with `target` commented out **chris**: Ah! The `result_handler` kwarg is now deprecated, so you should instead try: ``` ... result=tsx_imb_res, ... ``` **livni.itay**: Right that does not work **livni.itay**: That is the problem **chris**: ahhh interesting! OK so this might actually be a bug with our `exists` logic on the `S3Result` type. Would you mind sharing this example code + the traceback you’re seeing? Sorry about that! **livni.itay**: Actually it looks like I am not using `target` right? ``` [2020-05-16 21:31:37] INFO - prefect.FlowRunner | Beginning Flow run for 'Our first flow' [2020-05-16 21:31:37] INFO - prefect.FlowRunner | Starting flow run. [2020-05-16 21:31:37] INFO - prefect.TaskRunner | Task 'tsx_url': Starting task run... [2020-05-16 21:31:37] INFO - prefect.TaskRunner | Task 'tsx_url': finished task run for task with final state: 'Success' [2020-05-16 21:31:37] INFO - prefect.TaskRunner | Task 'get_tsx_moc_imb': Starting task run... [2020-05-16 21:31:38] ERROR - prefect.TaskRunner | Unexpected error: NoSuchKey('An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist.') Traceback (most recent call last): File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 651, in check_target if result.exists(target, **prefect.context): File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/results/s3_result.py", line 167, in exists Bucket=self.bucket, Key=location.format(**kwargs) File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/botocore/client.py", line 316, in _api_call return self._make_api_call(operation_name, kwargs) File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/botocore/client.py", line 626, in _make_api_call raise error_class(parsed_response, operation_name) botocore.errorfactory.NoSuchKey: An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist. [2020-05-16 21:31:38] INFO - prefect.TaskRunner | Task 'get_tsx_moc_imb': finished task run for task with final state: 'Skipped' [2020-05-16 21:31:38] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded ``` **livni.itay**: `target="{task_name}-{today}",` **chris**: your code looks alright to me actually, including your `target` specification; I think this exception catching logic here is flawed: <https://github.com/PrefectHQ/prefect/blob/master/src/prefect/engine/results/s3_result.py#L169> **chris**: it’s possible this was tested on a different version of `boto3` or something, we’ll need to investigate a little deeper **livni.itay**: Cool. Let me know if you need anything more. **chris**: I’ll use our bot to open the issue and we can track progress there **chris**: <@ULVA73B9P> archive “S3Result with target raises error” Original thread can be found [here](https://prefect-community.slack.com/archives/CL09KU1K7/p1589663959085200?thread_ts=1589663959.085200&cid=CL09KU1K7).
Opening because this is still an active issue From the thread, relevant package versions are: ``` botocore: 1.15.32 boto3: 1.12.32 ``` Oh interesting, looks like we'll want to check something like: ```python if ex.response['Error']['Code'] == 'NoSuchKey': return False ``` or ```python except client.exceptions.NoSuchKey ``` boto3 has minimal documentation on this
2020-05-18T13:27:48Z
[]
[]
Traceback (most recent call last): File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 651, in check_target if result.exists(target, **prefect.context): File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/results/s3_result.py", line 167, in exists Bucket=self.bucket, Key=location.format(**kwargs) File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/botocore/client.py", line 316, in _api_call return self._make_api_call(operation_name, kwargs) File "/home/ilivni/miniconda3/envs/py37moc/lib/python3.7/site-packages/botocore/client.py", line 626, in _make_api_call raise error_class(parsed_response, operation_name) botocore.errorfactory.NoSuchKey: An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist.
664
PrefectHQ/prefect
PrefectHQ__prefect-2608
254cbf7f80b2612447e32bd95184f2e9656513fc
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py --- a/src/prefect/core/flow.py +++ b/src/prefect/core/flow.py @@ -469,9 +469,12 @@ def add_task(self, task: Task) -> Task: self.tasks.add(task) self._cache.clear() - case = prefect.context.get("case", None) - if case is not None: - case.add_task(task, self) + # Parameters must be root tasks + # All other new tasks should be added to the current case (if any) + if not isinstance(task, Parameter): + case = prefect.context.get("case", None) + if case is not None: + case.add_task(task, self) return task
Parameter must be bound before case context ## Current behavior 0.11.0 `case.__enter__` mimics python if blocks, yet assumes that parameters are bound to the flow outside its context. That can cause some head-scratching for users. ```python from prefect import task, Parameter, Flow from prefect.tasks.control_flow import merge, case with Flow("test maybe first param use") as f: x = Parameter("x") p = Parameter("p") with case(p > 10, True): y = x-p y = merge(y, p) state = f.run(parameters=dict(x=0, p=11)) assert state.result[y].result == -11 state = f.run(parameters=dict(x=0, p=9)) assert state.result[y].result == 9 ``` **Errors** with: ``` Traceback (most recent call last): File "pref_case_bind.py", line 8, in <module> y = x-p File "~/miniconda3/lib/python3.6/site-packages/prefect/tasks/control_flow/case.py", line 100, in __exit__ child.set_upstream(cond, flow=self._flow) File "~/miniconda3/lib/python3.6/site-packages/prefect/core/task.py", line 591, in set_upstream self.set_dependencies(flow=flow, upstream_tasks=[task], mapped=mapped) File "~/miniconda3/lib/python3.6/site-packages/prefect/core/task.py", line 566, in set_dependencies mapped=mapped, File "~/miniconda3/lib/python3.6/site-packages/prefect/core/flow.py", line 811, in set_dependencies mapped=is_mapped, File "~/miniconda3/lib/python3.6/site-packages/prefect/core/flow.py", line 482, in add_edge "Parameters must be root tasks and can not have upstream dependencies." ValueError: Parameters must be root tasks and can not have upstream dependencies. ``` **Passes** if: `x = Parameter("x")()` ## Proposed behavior If `child` is a `Parameter` we would `.bind(self_flow)` between L#99 and L#100. I cannot think of any unintended side-effects, because use outside of a case context also binds yet unbound parameters (please confirm though). ## Example See above
2020-05-19T18:27:00Z
[]
[]
Traceback (most recent call last): File "pref_case_bind.py", line 8, in <module> y = x-p File "~/miniconda3/lib/python3.6/site-packages/prefect/tasks/control_flow/case.py", line 100, in __exit__ child.set_upstream(cond, flow=self._flow) File "~/miniconda3/lib/python3.6/site-packages/prefect/core/task.py", line 591, in set_upstream self.set_dependencies(flow=flow, upstream_tasks=[task], mapped=mapped) File "~/miniconda3/lib/python3.6/site-packages/prefect/core/task.py", line 566, in set_dependencies mapped=mapped, File "~/miniconda3/lib/python3.6/site-packages/prefect/core/flow.py", line 811, in set_dependencies mapped=is_mapped, File "~/miniconda3/lib/python3.6/site-packages/prefect/core/flow.py", line 482, in add_edge "Parameters must be root tasks and can not have upstream dependencies." ValueError: Parameters must be root tasks and can not have upstream dependencies.
667
PrefectHQ/prefect
PrefectHQ__prefect-2686
4cc0606a0219bfe8b33bbb50507a9f3e3b581823
diff --git a/src/prefect/utilities/gcp.py b/src/prefect/utilities/gcp.py --- a/src/prefect/utilities/gcp.py +++ b/src/prefect/utilities/gcp.py @@ -3,7 +3,6 @@ """ import prefect -from google.cloud import bigquery, storage from google.oauth2.service_account import Credentials @@ -47,6 +46,8 @@ def get_storage_client(credentials: dict = None, project: str = None): Returns: - Client: an initialized and authenticated Google Client """ + from google.cloud import storage + return get_google_client(storage, credentials=credentials, project=project) @@ -63,4 +64,6 @@ def get_bigquery_client(credentials: dict = None, project: str = None): Returns: - Client: an initialized and authenticated Google Client """ + from google.cloud import bigquery + return get_google_client(bigquery, credentials=credentials, project=project)
Google Imports are Tied Together ## Description *A clear description of the bug* I’m using the new `GCSResult` and I’m getting an import error when I don’t also specify `google-cloud-bigquery` as a dependency since they’re imports occur in the same file, I think? ``` Unexpected error: ImportError("cannot import name 'bigquery' from 'google.cloud' (unknown location)") Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 986, in get_task_run_state result = self.result.write(value, filename="output", **prefect.context) File "/usr/local/lib/python3.7/site-packages/prefect/engine/results/gcs_result.py", line 73, in write self.gcs_bucket.blob(new.location).upload_from_string(binary_data) File "/usr/local/lib/python3.7/site-packages/prefect/engine/results/gcs_result.py", line 35, in gcs_bucket from prefect.utilities.gcp import get_storage_client File "/usr/local/lib/python3.7/site-packages/prefect/utilities/gcp.py", line 6, in <module> from google.cloud import bigquery, storage ImportError: cannot import name 'bigquery' from 'google.cloud' (unknown location) ``` https://cloud.prefect.io/prefect/flow-run/6704aa4e-ba9b-40ed-a4f8-386920839a8e?logId=75b1fc01-0ee8-4061-ab8b-5481e6123a79 On a cool note, changing to `python_dependencies=["prefect[google]"]` did work 🎉 ## Expected Behavior *What did you expect to happen instead?* I'd like to be able to specify one import in insolation (in this case `google-cloud-storage`) ## Reproduction *A minimal example that exhibits the behavior.* ``` from prefect import task, Flow from prefect.tasks.notifications.slack_task import SlackTask from prefect.schedules import CronSchedule from prefect.environments.storage import Docker from prefect.engine.results import GCSResult import pendulum import datetime @task(name="Get Week Message", max_retries=5, retry_delay=datetime.timedelta(seconds=5)) def get_week_message(): prefects_birthday = pendulum.date(2018, 1, 17) current_week = prefects_birthday.diff(pendulum.now()).in_weeks() return f"Hello, Jeremiah! It is week {current_week}." send_message = SlackTask( name="Slack Jeremiah", max_retries=5, retry_delay=datetime.timedelta(seconds=5), webhook_secret="SLACK_WEBHOOK", ) schedule = CronSchedule(cron="50 11 * * MON", start_date=pendulum.now(tz="US/Eastern")) storage = Docker( base_image="prefecthq/prefect:latest-python3.7", registry_url=URL, python_dependencies=["google-cloud-storage"], files={ FILE_LOCATION: FILENAME }, env_vars={"GOOGLE_APPLICATION_CREDENTIALS": FILENAME}, ) gcs_result = GCSResult(bucket="what_week_is_it_results") with Flow( name="What Week is It?", schedule=schedule, storage=storage, result=gcs_result ) as flow: week_message = get_week_message() result = send_message(message=week_message) ``` ## Environment *Any additional information about your environment* *Optionally run `prefect diagnostics` from the command line and paste the information here* ``` { "config_overrides": { "cloud": { "auth_token": true, "use_local_secrets": true }, "context": { "secrets": false }, "home_dir": true }, "env_vars": [], "system_information": { "platform": "Darwin-19.4.0-x86_64-i386-64bit", "prefect_version": "0.11.2", "python_version": "3.7.7" } } ```
2020-06-01T03:14:15Z
[]
[]
Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 986, in get_task_run_state result = self.result.write(value, filename="output", **prefect.context) File "/usr/local/lib/python3.7/site-packages/prefect/engine/results/gcs_result.py", line 73, in write self.gcs_bucket.blob(new.location).upload_from_string(binary_data) File "/usr/local/lib/python3.7/site-packages/prefect/engine/results/gcs_result.py", line 35, in gcs_bucket from prefect.utilities.gcp import get_storage_client File "/usr/local/lib/python3.7/site-packages/prefect/utilities/gcp.py", line 6, in <module> from google.cloud import bigquery, storage ImportError: cannot import name 'bigquery' from 'google.cloud' (unknown location)
682
PrefectHQ/prefect
PrefectHQ__prefect-2868
960f15e9f59fcbd43a3f61199907f4970a3230e9
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py --- a/src/prefect/core/flow.py +++ b/src/prefect/core/flow.py @@ -6,6 +6,7 @@ import os import tempfile import time +import uuid import warnings from contextlib import contextmanager from pathlib import Path @@ -943,7 +944,14 @@ def _run( # run this flow indefinitely, so long as its schedule has future dates while True: - flow_run_context.update(scheduled_start_time=next_run_time) + # add relevant context keys + # many of these are intended to ensure local runs behave similarly as runs against a backend + flow_run_context.update( + scheduled_start_time=next_run_time, + flow_id=self.name, + flow_run_id=str(uuid.uuid4()), + flow_run_name=str(uuid.uuid4()), + ) if flow_state.is_scheduled(): next_run_time = flow_state.start_time @@ -960,12 +968,17 @@ def _run( # begin a single flow run while not flow_state.is_finished(): runner = runner_cls(flow=self) + task_ctxts = kwargs.pop("task_contexts", {}).copy() + for t in self.tasks: + task_ctxts.setdefault(t, dict()) + task_ctxts[t].update(task_run_id=str(uuid.uuid4())) flow_state = runner.run( parameters=parameters, return_tasks=self.tasks, state=flow_state, task_states=flow_state.result, context=flow_run_context, + task_contexts=task_ctxts, **kwargs, ) diff --git a/src/prefect/engine/flow_runner.py b/src/prefect/engine/flow_runner.py --- a/src/prefect/engine/flow_runner.py +++ b/src/prefect/engine/flow_runner.py @@ -173,8 +173,11 @@ def initialize_run( # type: ignore for task in self.flow.tasks: task_contexts.setdefault(task, {}).update( - task_name=task.name, task_slug=task.slug + task_name=task.name, + task_slug=self.flow.slugs[task], + task_id=self.flow.slugs[task], ) + state, context = super().initialize_run(state=state, context=context) return FlowRunnerInitializeResult( state=state, diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py --- a/src/prefect/engine/task_runner.py +++ b/src/prefect/engine/task_runner.py @@ -165,7 +165,6 @@ def initialize_run( # type: ignore task_run_count=run_count, task_name=self.task.name, task_tags=self.task.tags, - task_slug=self.task.slug, ) context.setdefault("checkpointing", config.flows.checkpointing)
Key error when using task_run_id in a target template ## Description Similar to one of the issues in #2640 where `filename` throws a key error when used in a template, so does `task_run_id`. ``` (py37moc) :~/prefect_guide$ /miniconda3/envs/py37moc/bin/python /home/ilivni/prefect_guide/tst_map.py 0.11.3 [2020-05-27 18:56:17] INFO - prefect.FlowRunner | Beginning Flow run for 'blah' [2020-05-27 18:56:17] INFO - prefect.FlowRunner | Starting flow run. [2020-05-27 18:56:17] INFO - prefect.TaskRunner | Task 'return_list': Starting task run... [2020-05-27 18:56:17] ERROR - prefect.TaskRunner | Unexpected error: KeyError('task_run_id') Traceback (most recent call last): File "/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 660, in check_target if result.exists(target, **prefect.context): File "//miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/results/local_result.py", line 123, in exists return os.path.exists(os.path.join(self.dir, location.format(**kwargs))) KeyError: 'task_run_id' ``` ## Reproduction ``` import prefect print(prefect.__version__) from prefect import task, Flow from prefect.engine.results import LocalResult lcl_res = LocalResult(dir="~/prefect_guide/results/{flow_name}") @task(target="{task_name}-{task_run_id}", ) def return_list(): return [1, 2, 3] @task(target="{task_name}/{map_index}.prefect") def mapped_task(x): return x + 1 with Flow("blah", result=lcl_res) as flow: mapped_task.map(return_list) st = flow.run() flow.visualize(flow_state=st) ``` ## Environment *Any additional information about your environment* *Optionally run `prefect diagnostics` from the command line and paste the information here*
Hey @gryBox having a task run ID requires a use of a backend—either core's server or Cloud. Since each task run ID corresponds to a unique entry in the database. So I would expect some behavior like this to happen. I do think we _could_ raise a more informative error though. @joshmeek I think there's an opportunity here for us to polish the templating interface; things like `task_run_id` don't exist in Core-alone, but I can imagine people doing local development with Cloud-specific context vars like this and it'd be nice if we can find a way to support that. Same issue with the `{filename}` template with mapping. Hi - Currently not exposing the `{filename}` in core results in an error loop if trying to deploy with docker. 1. Test locally (no `{filename}`) -> Good 2. Deploy with docker storage -> Error need `{filename}` 3. Add `{filename}` -> KeyError 4. Rinse lather repeat @cicdw on the perspective of 'finding a way to support [Cloud-specific context vars in Core only]', do we think making some context that is ephmeral and meaningless (like a random UUID that won't be stored anywhere in the example of `task_run_id`) is anywhere towards the right direction? Alternatively we were also talking about catching KeyErrors and raising our own exception that redirects people to the context docs as a way to deal with all cloud-only context. > making some context that is ephmeral and meaningless (like a random UUID...) Yea I think that would work! To ensure the ID is somewhat meaningful and constant across the full lifecycle of a flow run we could randomly generate them all for each task somewhere in this region: https://github.com/PrefectHQ/prefect/blob/master/src/prefect/core/flow.py#L904-L919 The only edge case here is for mapped tasks - when running against a backend, we generate a unique task run id for each child (happens here: https://github.com/PrefectHQ/prefect/blob/master/src/prefect/core/flow.py#L904-L919) so we'd want to consider what to do when running locally in this situation. > Alternatively we were also talking about catching KeyErrors and raising our own exception that redirects people to the context docs as a way to deal with all cloud-only context. Yea, I def think catching `KeyError`s and pointing to docs is a great idea; however, I do think it's important to ensure some amount of consistency between local runs -> cloud runs, so as a goal we should try to have context key parity where it makes sense. Was this fixed by #2717, or is there still more to do here? @jcrist I think there's still an opportunity here to ensure Cloud <-> Core populate the exact same context keys. Here you can find a list of the keys that Cloud populates that Core alone does not: https://docs.prefect.io/api/latest/utilities/context.html. I'm 90% that most of these can be handled with auto-generated UUIDs somewhere around here: https://github.com/PrefectHQ/prefect/blob/master/src/prefect/core/flow.py#L903
2020-06-25T21:06:18Z
[]
[]
Traceback (most recent call last): File "/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/runner.py", line 48, in inner new_state = method(self, state, *args, **kwargs) File "/miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/task_runner.py", line 660, in check_target if result.exists(target, **prefect.context): File "//miniconda3/envs/py37moc/lib/python3.7/site-packages/prefect/engine/results/local_result.py", line 123, in exists return os.path.exists(os.path.join(self.dir, location.format(**kwargs))) KeyError: 'task_run_id'
702
PrefectHQ/prefect
PrefectHQ__prefect-2877
71d40d7a4d3f97d0ca562b80b9033646e1a2c9ae
diff --git a/src/prefect/agent/fargate/agent.py b/src/prefect/agent/fargate/agent.py --- a/src/prefect/agent/fargate/agent.py +++ b/src/prefect/agent/fargate/agent.py @@ -370,7 +370,18 @@ def _parse_kwargs(self, user_kwargs: dict, check_envars: bool = False) -> tuple: self.logger.debug("{} = {}".format(key, item)) container_definitions_kwargs = {} - for key, item in user_kwargs.get("containerDefinitions", [{}])[0].items(): + container_defs = user_kwargs.get("containerDefinitions", [{}]) + try: + container_defs = literal_eval(container_defs) + except (ValueError, SyntaxError): + pass + + if len(container_defs) != 1: + raise ValueError( + "Fargate agent only accepts configuration for a single container definition." + ) + + for key, item in container_defs[0].items(): if key in container_definitions_kwarg_list: try: # Parse kwarg if needed
Fargate container definitions argument parsing failing (CLI) ## Description It seems the string data input to be used by the Fargate Agent is never cast as a dict: ``` prefect agent start fargate --containerDefinitions="[{'logConfiguration': 'options': {'awslogs-group': 'something', 'awslogs-stream-prefix': 'prefect-flow-runs', 'awslogs-create-group': 'true'}}]" ``` ``` Traceback (most recent call last): File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/bin/prefect", line 10, in <module> sys.exit(cli()) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/decorators.py", line 21, in new_func return f(get_current_context(), *args, **kwargs) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/prefect/cli/agent.py", line 268, in start from_qualified_name(retrieved_agent)( File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/prefect/agent/fargate/agent.py", line 142, in __init__ ) = self._parse_kwargs(kwargs, True) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/prefect/agent/fargate/agent.py", line 359, in _parse_kwargs for key, item in user_kwargs.get("containerDefinitions", [{}])[0].items(): AttributeError: 'str' object has no attribute 'items' ``` ## Expected Behavior <!-- What did you expect to happen instead? --> It should behave [as the docs describe](https://docs.prefect.io/orchestration/agents/fargate.html#prefect-cli-using-kwargs) and use the above for the fargate task container definition, example in the docs: ``` prefect agent start fargate cpu=256 memory=512 networkConfiguration="{'awsvpcConfiguration': {'assignPublicIp': 'ENABLED', 'subnets': ['my_subnet_id'], 'securityGroups': []}}" ``` ## Environment ``` poetry run prefect diagnostics { "config_overrides": { "server": { "telemetry": { "enabled": true } } }, "env_vars": [ "PREFECT__CLOUD__AUTH_TOKEN" ], "system_information": { "platform": "macOS-10.15.5-x86_64-i386-64bit", "prefect_version": "0.12.1", "python_version": "3.8.2" } } ``` (I deactivated telemetry in the config, need to look into this too)
2020-06-26T14:39:43Z
[]
[]
Traceback (most recent call last): File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/bin/prefect", line 10, in <module> sys.exit(cli()) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/click/decorators.py", line 21, in new_func return f(get_current_context(), *args, **kwargs) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/prefect/cli/agent.py", line 268, in start from_qualified_name(retrieved_agent)( File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/prefect/agent/fargate/agent.py", line 142, in __init__ ) = self._parse_kwargs(kwargs, True) File "/Users/nbatalha/Library/Caches/pypoetry/virtualenvs/pipelines-P6yz7rn1-py3.8/lib/python3.8/site-packages/prefect/agent/fargate/agent.py", line 359, in _parse_kwargs for key, item in user_kwargs.get("containerDefinitions", [{}])[0].items(): AttributeError: 'str' object has no attribute 'items'
704
PrefectHQ/prefect
PrefectHQ__prefect-3085
f0a2056af0bcac28cca3554862dc0f2041b88b02
diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py --- a/src/prefect/engine/task_runner.py +++ b/src/prefect/engine/task_runner.py @@ -159,7 +159,12 @@ def initialize_run( # type: ignore task_name=self.task.name, task_tags=self.task.tags, ) - context.setdefault("checkpointing", config.flows.checkpointing) + # Use the config stored in context if possible (should always be present) + try: + checkpointing = context["config"]["flows"]["checkpointing"] + except KeyError: + checkpointing = config.flows.checkpointing + context.setdefault("checkpointing", checkpointing) map_index = context.get("map_index", None) if isinstance(map_index, int) and context.get("task_full_name"):
DaskExecutor doesn't write results ## Description DaskExecutor doesn't write LocalResults with JSONSerializer. In our tests, also other results and with a cluster aren't written. For the Repro, I picked the simplest case. ## Expected Behavior DaskExecutor writes LocalResults. If workers need extra treatment to set their global checkpointing config, I'd consider it an enhancement if configs percolate. ## Reproduction ```python import json import os from tempfile import TemporaryDirectory from typing import Optional from prefect import Flow, task from prefect.engine.executors import Executor, DaskExecutor from prefect.engine.results import LocalResult from prefect.engine.serializers import JSONSerializer from prefect.utilities.configuration import set_temporary_config from prefect.utilities.debug import raise_on_exception def test(e: Optional[Executor]): with TemporaryDirectory() as tmpdir: flow_result = LocalResult(tmpdir, serializer=JSONSerializer(), location="{task_name}.json") with Flow("write_result", result=flow_result) as f: _terminal = task(lambda: 42, checkpoint=True, name="magic")() with set_temporary_config({"flows.checkpointing": True}), \ raise_on_exception(): f.run(executor=e) files = os.listdir(tmpdir) assert files == ["magic.json"], files with open(os.path.join(tmpdir, files[0]), "rb") as file: val = json.load(file) assert val==42 if __name__ == "__main__": print("Local") test(None) print("DaskExecutor") test(DaskExecutor()) ``` *Output:* ```bash Local [2020-08-01 09:48:05] INFO - prefect.FlowRunner | Beginning Flow run for 'write_result' [2020-08-01 09:48:05] INFO - prefect.FlowRunner | Starting flow run. [2020-08-01 09:48:05] INFO - prefect.TaskRunner | Task 'magic': Starting task run... [2020-08-01 09:48:05] INFO - prefect.TaskRunner | Task 'magic': finished task run for task with final state: 'Success' [2020-08-01 09:48:05] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded DaskExecutor [2020-08-01 09:48:05] INFO - prefect.FlowRunner | Beginning Flow run for 'write_result' [2020-08-01 09:48:05] INFO - prefect.FlowRunner | Starting flow run. [2020-08-01 09:48:07] INFO - prefect.TaskRunner | Task 'magic': Starting task run... [2020-08-01 09:48:07] INFO - prefect.TaskRunner | Task 'magic': finished task run for task with final state: 'Success' [2020-08-01 09:48:08] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded Traceback (most recent call last): File "repro_local_write.py", line 37, in <module> test(DaskExecutor()) File "repro_local_write.py", line 26, in test assert files == ["magic.json"], files AssertionError: [] ``` ## Environment ```json { "config_overrides": {}, "env_vars": [], "system_information": { "platform": "Darwin-19.5.0-x86_64-i386-64bit", "prefect_version": "0.12.5", "python_version": "3.6.8" } } ``` `dask==2.21.0`
Hi @ahirner I don't think setting temporary config like that will do what you expect due to the workers being created outside of that context manager. Having checkpointing set in your global config.toml (or through an env var) should do the trick: ``` [flows] checkpointing = true ``` Global configuration options work, thanks! I didn't expect clients to have only partial control over checkpointing.
2020-08-04T15:38:51Z
[]
[]
Traceback (most recent call last): File "repro_local_write.py", line 37, in <module> test(DaskExecutor()) File "repro_local_write.py", line 26, in test assert files == ["magic.json"], files AssertionError: []
723
PrefectHQ/prefect
PrefectHQ__prefect-473
f5d5a4349fefcac65d856f615040ae2306bcfb22
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py --- a/src/prefect/core/flow.py +++ b/src/prefect/core/flow.py @@ -858,7 +858,12 @@ def run( Returns: - State of the flow after it is run resulting from it's return tasks """ - runner = prefect.engine.flow_runner.FlowRunner(flow=self) # type: ignore + if prefect.config.get("prefect_cloud", False) is True: + runner_cls = prefect.engine.cloud_runners.CloudFlowRunner # type: ignore + else: + runner_cls = prefect.engine.flow_runner.FlowRunner # type: ignore + + runner = runner_cls(flow=self) parameters = parameters or {} unknown_params = [ p for p in parameters if p not in self.parameters(names_only=True) diff --git a/src/prefect/engine/__init__.py b/src/prefect/engine/__init__.py --- a/src/prefect/engine/__init__.py +++ b/src/prefect/engine/__init__.py @@ -3,5 +3,6 @@ import prefect.engine.executors import prefect.engine.state import prefect.engine.signals +import prefect.engine.cloud_runners from prefect.engine.flow_runner import FlowRunner from prefect.engine.task_runner import TaskRunner diff --git a/src/prefect/engine/cloud_runners.py b/src/prefect/engine/cloud_runners.py new file mode 100644 --- /dev/null +++ b/src/prefect/engine/cloud_runners.py @@ -0,0 +1,285 @@ +# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula + +import warnings +from typing import Any, Callable, Dict, Iterable, Optional, Tuple + +import prefect +from prefect import config +from prefect.client import Client +from prefect.client.result_handlers import ResultHandler +from prefect.core import Flow, Task +from prefect.engine import signals +from prefect.engine.runner import ENDRUN +from prefect.engine.state import Failed, State +from prefect.engine.flow_runner import FlowRunner +from prefect.engine.task_runner import TaskRunner + + +class CloudTaskRunner(TaskRunner): + """ + TaskRunners handle the execution of Tasks and determine the State of a Task + before, during and after the Task is run. + + In particular, through the TaskRunner you can specify the states of any upstream dependencies, + any inputs required for this Task to run, and what state the Task should be initialized with. + + Args: + - task (Task): the Task to be run / executed + - result_handler (ResultHandler, optional): the handler to use for + retrieving and storing state results during execution + - state_handlers (Iterable[Callable], optional): A list of state change handlers + that will be called whenever the task changes state, providing an + opportunity to inspect or modify the new state. The handler + will be passed the task runner instance, the old (prior) state, and the new + (current) state, with the following signature: + + ``` + state_handler( + task_runner: TaskRunner, + old_state: State, + new_state: State) -> State + ``` + + If multiple functions are passed, then the `new_state` argument will be the + result of the previous handler. + """ + + def __init__( + self, + task: Task, + result_handler: ResultHandler = None, + state_handlers: Iterable[Callable] = None, + ) -> None: + self.task = task + self.client = Client() + self.result_handler = result_handler + super().__init__( + task=task, result_handler=result_handler, state_handlers=state_handlers + ) + + def _heartbeat(self) -> None: + try: + task_run_id = self.task_run_id + self.client.update_task_run_heartbeat(task_run_id) + except: + warnings.warn("Heartbeat failed for Task '{}'".format(self.task.name)) + + def call_runner_target_handlers(self, old_state: State, new_state: State) -> State: + """ + A special state handler that the TaskRunner uses to call its task's state handlers. + This method is called as part of the base Runner's `handle_state_change()` method. + + Args: + - old_state (State): the old (previous) state + - new_state (State): the new (current) state + + Returns: + - State: the new state + """ + new_state = super().call_runner_target_handlers( + old_state=old_state, new_state=new_state + ) + + task_run_id = prefect.context.get("task_run_id") + version = prefect.context.get("task_run_version") + + try: + res = self.client.set_task_run_state( + task_run_id=task_run_id, + version=version, + state=new_state, + cache_for=self.task.cache_for, + result_handler=self.result_handler, + ) + except Exception as exc: + raise ENDRUN(state=new_state) + + prefect.context.update(task_run_version=version + 1) # type: ignore + + return new_state + + def initialize_run( + self, state: Optional[State], context: Dict[str, Any] + ) -> Tuple[State, Dict[str, Any]]: + """ + Initializes the Task run by initializing state and context appropriately. + + Args: + - state (State): the proposed initial state of the flow run; can be `None` + - context (dict): the context to be updated with relevant information + + Returns: + - tuple: a tuple of the updated state and context objects + """ + flow_run_id = context.get("flow_run_id", None) + try: + task_run_info = self.client.get_task_run_info( + flow_run_id, + context.get("task_id", ""), + map_index=context.get("map_index", None), + result_handler=self.result_handler, + ) + except Exception as exc: + if state is None: + state = Failed( + message="Could not retrieve state from Prefect Cloud", result=exc + ) + raise ENDRUN(state=state) + + # if state is set, keep it; otherwise load from db + state = state or task_run_info.state # type: ignore + context.update( + task_run_version=task_run_info.version, # type: ignore + task_run_id=task_run_info.id, # type: ignore + ) + self.task_run_id = task_run_info.id # type: ignore + + # update inputs, prioritizing kwarg-provided inputs + if hasattr(state, "cached_inputs") and isinstance( + state.cached_inputs, dict # type: ignore + ): + inputs = state.cached_inputs # type: ignore + inputs.update(context.get("inputs", {})) + context.update(inputs=inputs) + + context.update(task_name=self.task.name) + return super().initialize_run(state=state, context=context) + + +class CloudFlowRunner(FlowRunner): + """ + FlowRunners handle the execution of Flows and determine the State of a Flow + before, during and after the Flow is run. + + In particular, through the FlowRunner you can specify which tasks should be + the first tasks to run, which tasks should be returned after the Flow is finished, + and what states each task should be initialized with. + + Args: + - flow (Flow): the `Flow` to be run + - task_runner_cls (TaskRunner, optional): The class used for running + individual Tasks. Defaults to [TaskRunner](task_runner.html) + - state_handlers (Iterable[Callable], optional): A list of state change handlers + that will be called whenever the flow changes state, providing an + opportunity to inspect or modify the new state. The handler + will be passed the flow runner instance, the old (prior) state, and the new + (current) state, with the following signature: + + ``` + state_handler( + flow_runner: FlowRunner, + old_state: State, + new_state: State) -> State + ``` + + If multiple functions are passed, then the `new_state` argument will be the + result of the previous handler. + + Note: new FlowRunners are initialized within the call to `Flow.run()` and in general, + this is the endpoint through which FlowRunners will be interacted with most frequently. + + Example: + ```python + @task + def say_hello(): + print('hello') + + with Flow() as f: + say_hello() + + fr = FlowRunner(flow=f) + flow_state = fr.run() + ``` + """ + + def __init__( + self, + flow: Flow, + task_runner_cls: type = None, + state_handlers: Iterable[Callable] = None, + ) -> None: + self.flow = flow + self.task_runner_cls = task_runner_cls or CloudTaskRunner + self.client = Client() + super().__init__( + flow=flow, + task_runner_cls=self.task_runner_cls, + state_handlers=state_handlers, + ) + + def _heartbeat(self) -> None: + try: + flow_run_id = prefect.context.get("flow_run_id") + self.client.update_flow_run_heartbeat(flow_run_id) + except: + warnings.warn("Heartbeat failed for Flow '{}'".format(self.flow.name)) + + def call_runner_target_handlers(self, old_state: State, new_state: State) -> State: + """ + A special state handler that the FlowRunner uses to call its flow's state handlers. + This method is called as part of the base Runner's `handle_state_change()` method. + + Args: + - old_state (State): the old (previous) state + - new_state (State): the new (current) state + + Returns: + - State: the new state + """ + new_state = super().call_runner_target_handlers( + old_state=old_state, new_state=new_state + ) + + flow_run_id = prefect.context.get("flow_run_id", None) + version = prefect.context.get("flow_run_version") + + try: + res = self.client.set_flow_run_state( + flow_run_id=flow_run_id, + version=version, + state=new_state, + result_handler=self.flow.result_handler, + ) + except Exception as exc: + raise ENDRUN(state=new_state) + + prefect.context.update(flow_run_version=version + 1) # type: ignore + + return new_state + + def initialize_run( + self, state: Optional[State], context: Dict[str, Any] + ) -> Tuple[State, Dict[str, Any]]: + """ + Initializes the Flow run by initializing state and context appropriately. + + Args: + - state (State): the proposed initial state of the flow run; can be `None` + - context (dict): the context to be updated with relevant information + + Returns: + - tuple: a tuple of the updated state and context objects + """ + + try: + flow_run_info = self.client.get_flow_run_info( + flow_run_id=prefect.context.get("flow_run_id", ""), + result_handler=self.flow.result_handler, + ) + except Exception as exc: + if state is None: + state = Failed( + message="Could not retrieve state from Prefect Cloud", result=exc + ) + raise ENDRUN(state=state) + + context.update(flow_run_version=flow_run_info.version) # type: ignore + # if state is set, keep it; otherwise load from db + state = state or flow_run_info.state # type: ignore + + # update parameters, prioritizing kwarg-provided params + parameters = flow_run_info.parameters or {} # type: ignore + parameters.update(context.get("parameters", {})) + context.update(parameters=parameters) + + return super().initialize_run(state=state, context=context) diff --git a/src/prefect/engine/flow_runner.py b/src/prefect/engine/flow_runner.py --- a/src/prefect/engine/flow_runner.py +++ b/src/prefect/engine/flow_runner.py @@ -6,7 +6,6 @@ import prefect from prefect import config -from prefect.client import Client from prefect.core import Edge, Flow, Task from prefect.engine import signals from prefect.engine.executors import DEFAULT_EXECUTOR @@ -79,13 +78,8 @@ def __init__( ): self.flow = flow self.task_runner_cls = task_runner_cls or TaskRunner - self.client = Client() super().__init__(state_handlers=state_handlers) - def _heartbeat(self) -> None: - flow_run_id = prefect.context.get("flow_run_id") - self.client.update_flow_run_heartbeat(flow_run_id) - def call_runner_target_handlers(self, old_state: State, new_state: State) -> State: """ A special state handler that the FlowRunner uses to call its flow's state handlers. @@ -101,51 +95,8 @@ def call_runner_target_handlers(self, old_state: State, new_state: State) -> Sta for handler in self.flow.state_handlers: new_state = handler(self.flow, old_state, new_state) - # Set state if in prefect cloud - if config.get("prefect_cloud", None): - flow_run_id = prefect.context.get("flow_run_id", None) - version = prefect.context.get("flow_run_version") - - res = self.client.set_flow_run_state( - flow_run_id=flow_run_id, - version=version, - state=new_state, - result_handler=self.flow.result_handler, - ) - prefect.context.update(flow_run_version=res.version) # type: ignore - return new_state - def initialize_run( - self, state: Optional[State], context: Dict[str, Any] - ) -> Tuple[State, Dict[str, Any]]: - """ - Initializes the Flow run by initializing state and context appropriately. - - Args: - - state (State): the proposed initial state of the flow run; can be `None` - - context (dict): the context to be updated with relevant information - - Returns: - - tuple: a tuple of the updated state and context objects - """ - - if config.get("prefect_cloud", None): - flow_run_info = self.client.get_flow_run_info( - flow_run_id=prefect.context.get("flow_run_id", ""), - result_handler=self.flow.result_handler, - ) - context.update(flow_run_version=flow_run_info.version) # type: ignore - # if state is set, keep it; otherwise load from db - state = state or flow_run_info.state # type: ignore - - ## update parameters, prioritizing kwarg-provided params - parameters = flow_run_info.parameters or {} # type: ignore - parameters.update(context.get("parameters", {})) - context.update(parameters=parameters) - - return super().initialize_run(state=state, context=context) - def run( self, state: State = None, @@ -193,7 +144,13 @@ def run( parameters = parameters or {} context.update(parameters=parameters, flow_name=self.flow.name) - state, context = self.initialize_run(state, context) + + # if run fails to initialize, end the run + try: + state, context = self.initialize_run(state, context) + except ENDRUN as exc: + state = exc.state + return state if return_tasks.difference(self.flow.tasks): raise ValueError("Some tasks in return_tasks were not found in the flow.") diff --git a/src/prefect/engine/runner.py b/src/prefect/engine/runner.py --- a/src/prefect/engine/runner.py +++ b/src/prefect/engine/runner.py @@ -71,6 +71,9 @@ def __init__(self, state_handlers: Iterable[Callable] = None): self.state_handlers = state_handlers or [] self.logger = logging.get_logger(type(self).__name__) + def _heartbeat(self) -> None: + pass + def initialize_run( self, state: Optional[State], context: Dict[str, Any] ) -> Tuple[State, Dict[str, Any]]: @@ -122,7 +125,8 @@ def handle_state_change(self, old_state: State, new_state: State) -> State: Raises: - PAUSE: if raised by a handler - - ENDRUN(Failed()): if any of the handlers fail + - ENDRUN: if raised by a handler + - ENDRUN(Failed()): if any of the handlers fail unexpectedly """ raise_on_exception = prefect.context.get("raise_on_exception", False) @@ -135,8 +139,8 @@ def handle_state_change(self, old_state: State, new_state: State) -> State: for handler in self.state_handlers: new_state = handler(self, old_state, new_state) - # raise pauses - except signals.PAUSE: + # raise pauses and ENDRUNs + except (signals.PAUSE, ENDRUN): raise # trap signals diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py --- a/src/prefect/engine/task_runner.py +++ b/src/prefect/engine/task_runner.py @@ -19,7 +19,6 @@ import prefect from prefect import config -from prefect.client import Client from prefect.client.result_handlers import ResultHandler from prefect.core import Edge, Task from prefect.engine import signals @@ -80,14 +79,9 @@ def __init__( state_handlers: Iterable[Callable] = None, ): self.task = task - self.client = Client() self.result_handler = result_handler super().__init__(state_handlers=state_handlers) - def _heartbeat(self) -> None: - task_run_id = self.task_run_id - self.client.update_task_run_heartbeat(task_run_id) - def call_runner_target_handlers(self, old_state: State, new_state: State) -> State: """ A special state handler that the TaskRunner uses to call its task's state handlers. @@ -103,20 +97,6 @@ def call_runner_target_handlers(self, old_state: State, new_state: State) -> Sta for handler in self.task.state_handlers: new_state = handler(self.task, old_state, new_state) - # Set state if in prefect cloud - if config.get("prefect_cloud", None): - task_run_id = prefect.context.get("task_run_id") - version = prefect.context.get("task_run_version") - - res = self.client.set_task_run_state( - task_run_id=task_run_id, - version=version, - state=new_state, - cache_for=self.task.cache_for, - result_handler=self.result_handler, - ) - prefect.context.update(task_run_version=res.version) # type: ignore - return new_state def initialize_run( @@ -132,23 +112,6 @@ def initialize_run( Returns: - tuple: a tuple of the updated state and context objects """ - if config.get("prefect_cloud", None): - flow_run_id = context.get("flow_run_id", None) - task_run_info = self.client.get_task_run_info( - flow_run_id, - context.get("task_id", ""), - map_index=context.get("map_index", None), - result_handler=self.result_handler, - ) - - # if state is set, keep it; otherwise load from db - state = state or task_run_info.state # type: ignore - context.update( - task_run_version=task_run_info.version, # type: ignore - task_run_id=task_run_info.id, # type: ignore - ) - self.task_run_id = task_run_info.id # type: ignore - context.update(task_name=self.task.name) return super().initialize_run(state=state, context=context) @@ -198,8 +161,15 @@ def run( context = context or {} executor = executor or DEFAULT_EXECUTOR - context.update(map_index=map_index) - state, context = self.initialize_run(state, context) + context.update(inputs=inputs, map_index=map_index) + + # if run fails to initialize, end the run + try: + state, context = self.initialize_run(state, context) + inputs = context.get("inputs") or {} + except ENDRUN as exc: + state = exc.state + return state # construct task inputs task_inputs = {} # type: Dict[str, Any] diff --git a/src/prefect/utilities/executors.py b/src/prefect/utilities/executors.py --- a/src/prefect/utilities/executors.py +++ b/src/prefect/utilities/executors.py @@ -29,18 +29,19 @@ def inner( self: "prefect.engine.runner.Runner", *args: Any, **kwargs: Any ) -> "prefect.engine.state.State": try: - if prefect.config.get("prefect_cloud", None): - timer = threading.Timer( - prefect.config.cloud.heartbeat_interval, self._heartbeat - ) + timer = threading.Timer( + prefect.config.cloud.heartbeat_interval, self._heartbeat + ) + try: self._heartbeat() - timer.start() + except: + pass + timer.start() return runner_method(self, *args, **kwargs) except Exception as exc: raise exc finally: - if prefect.config.get("prefect_cloud", None): - timer.cancel() + timer.cancel() return inner
Create separate CloudFlowRunner and CloudTaskRunner classes to encapsulate logic We're starting to get to a point of lots of conditionals (`if prefect_cloud:`) and repetitive logic; I think these could be standalone classes set as default by a config. State handler exception raised on client error Output of error: `"Failed(\"Exception raised while calling state handlers.\")\r\n"` Traceback: ``` Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/usr/local/lib/python3.6/site-packages/prefect/environments.py", line 204, in run return runner.run(**(runner_kwargs or {})) File "/usr/local/lib/python3.6/site-packages/prefect/engine/flow_runner.py", line 229, in run raise exc File "/usr/local/lib/python3.6/site-packages/prefect/engine/flow_runner.py", line 210, in run state = self.set_flow_to_running(state) File "/usr/local/lib/python3.6/site-packages/prefect/engine/runner.py", line 60, in inner return self.handle_state_change(old_state=state, new_state=new_state) File "/usr/local/lib/python3.6/site-packages/prefect/engine/runner.py", line 112, in handle_state_change new_state = self.call_runner_target_handlers(old_state, new_state) File "/usr/local/lib/python3.6/site-packages/prefect/engine/flow_runner.py", line 109, in call_runner_target_handlers result_handler=self.flow.result_handler, File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 368, in set_flow_run_state parse_graphql(mutation), state=json.dumps(serialized_state) File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 108, in graphql server=self.graphql_server, File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 83, in post response = self._request(method="POST", path=path, params=params, server=server) File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 170, in _request return request_fn() File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 164, in request_fn response.raise_for_status() File "/usr/local/lib/python3.6/site-packages/requests/models.py", line 940, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: client_url_here ``` cc @cicdw
Following conversation with @cicdw: - CloudTaskRunner needs to query for all upstream states immediately before using them (to ensure they aren't stale) - CloudFlowRunner needs to query for all reference/terminal states immediately before using them (to ensure they aren't stale)
2019-01-01T02:42:37Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/usr/local/lib/python3.6/site-packages/prefect/environments.py", line 204, in run return runner.run(**(runner_kwargs or {})) File "/usr/local/lib/python3.6/site-packages/prefect/engine/flow_runner.py", line 229, in run raise exc File "/usr/local/lib/python3.6/site-packages/prefect/engine/flow_runner.py", line 210, in run state = self.set_flow_to_running(state) File "/usr/local/lib/python3.6/site-packages/prefect/engine/runner.py", line 60, in inner return self.handle_state_change(old_state=state, new_state=new_state) File "/usr/local/lib/python3.6/site-packages/prefect/engine/runner.py", line 112, in handle_state_change new_state = self.call_runner_target_handlers(old_state, new_state) File "/usr/local/lib/python3.6/site-packages/prefect/engine/flow_runner.py", line 109, in call_runner_target_handlers result_handler=self.flow.result_handler, File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 368, in set_flow_run_state parse_graphql(mutation), state=json.dumps(serialized_state) File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 108, in graphql server=self.graphql_server, File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 83, in post response = self._request(method="POST", path=path, params=params, server=server) File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 170, in _request return request_fn() File "/usr/local/lib/python3.6/site-packages/prefect/client/client.py", line 164, in request_fn response.raise_for_status() File "/usr/local/lib/python3.6/site-packages/requests/models.py", line 940, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: client_url_here
756
PrefectHQ/prefect
PrefectHQ__prefect-863
9291bef70c4dcf1b3491813174268d3454786802
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py --- a/src/prefect/core/flow.py +++ b/src/prefect/core/flow.py @@ -3,6 +3,7 @@ import functools import inspect import json +import os import tempfile import time import uuid @@ -1105,8 +1106,12 @@ def get_color(task: Task, map_index: int = None) -> str: except Exception: pass - with tempfile.NamedTemporaryFile() as tmp: - graph.render(tmp.name, view=True) + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.close() + try: + graph.render(tmp.name, view=True) + finally: + os.unlink(tmp.name) return graph
flow.visualize() fails on windows A simple flow.visualize() command fails when running on Windows. Looks like it's due to the way that windows creates tempfiles (see this thread: https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file) Traceback (most recent call last): File "C:\Dev\source\python\prefecttesting\dostuff.py", line 34, in <module> flow.visualize() File "C:\Dev\source\python\prefecttesting\venv\lib\site-packages\prefect\core\flow.py", line 1106, in visualize graph.render(tmp.name, view=True) File "C:\Dev\source\python\prefecttesting\venv\lib\site-packages\graphviz\files.py", line 183, in render filepath = self.save(filename, directory) File "C:\Dev\source\python\prefecttesting\venv\lib\site-packages\graphviz\files.py", line 155, in save with io.open(filepath, 'w', encoding=self.encoding) as fd: PermissionError: [Errno 13] Permission denied: 'c:\\dev\\temp\\tmpn3cczj0a'
Thanks for the issue and the helpful pointer @mblye ! It looks like `graphviz` is attempting to open the temporary file a _second_ time here: https://github.com/PrefectHQ/prefect/blob/master/src/prefect/core/flow.py#L1105-L1106 and Windows doesn't like that. It should be possible to work around this by creating a temporary _directory_ and then using an arbitrary filename in the call to `graph.render` 👍
2019-03-27T19:22:59Z
[]
[]
Traceback (most recent call last): File "C:\Dev\source\python\prefecttesting\dostuff.py", line 34, in <module> flow.visualize() File "C:\Dev\source\python\prefecttesting\venv\lib\site-packages\prefect\core\flow.py", line 1106, in visualize graph.render(tmp.name, view=True) File "C:\Dev\source\python\prefecttesting\venv\lib\site-packages\graphviz\files.py", line 183, in render filepath = self.save(filename, directory) File "C:\Dev\source\python\prefecttesting\venv\lib\site-packages\graphviz\files.py", line 155, in save with io.open(filepath, 'w', encoding=self.encoding) as fd: PermissionError: [Errno 13] Permission denied: 'c:\\dev\\temp\\tmpn3cczj0a'
824
PrefectHQ/prefect
PrefectHQ__prefect-973
4d09b0832f675847def55050bb1c7f0e651e93ef
diff --git a/src/prefect/environments/storage/docker.py b/src/prefect/environments/storage/docker.py --- a/src/prefect/environments/storage/docker.py +++ b/src/prefect/environments/storage/docker.py @@ -7,6 +7,7 @@ import tempfile import textwrap import uuid +from slugify import slugify from typing import Any, Callable, Dict, Iterable, List import docker @@ -125,7 +126,7 @@ def add_flow(self, flow: "prefect.core.flow.Flow") -> str: flow.name ) ) - flow_path = "/root/.prefect/{}.prefect".format(flow.name.replace(" ", "")) + flow_path = "/root/.prefect/{}.prefect".format(slugify(flow.name)) self.flows[flow.name] = flow_path self._flows[flow.name] = flow # needed prior to build return flow_path @@ -289,11 +290,12 @@ def create_dockerfile_object(self, directory: str = None) -> None: # Write all flows to file and load into the image copy_flows = "" for flow_name, flow_location in self.flows.items(): - flow_path = os.path.join(directory, "{}.flow".format(flow_name)) + clean_name = slugify(flow_name) + flow_path = os.path.join(directory, "{}.flow".format(clean_name)) with open(flow_path, "wb") as f: cloudpickle.dump(self._flows[flow_name], f) copy_flows += "COPY {source} {dest}\n".format( - source="{}.flow".format(flow_name), dest=flow_location + source="{}.flow".format(clean_name), dest=flow_location ) # Write a healthcheck script into the image
Flow storage copying fails with spaces in flow.name ``` Step 1/9 : FROM python:3.6 ---> 2bb3204ab1d1 Step 2/9 : RUN pip install pip --upgrade ---> Using cache ---> 4d8b101e2933 Step 3/9 : RUN pip install wheel ---> Using cache ---> 7389d69ba240 Step 4/9 : RUN mkdir /root/.prefect/ ---> Using cache ---> 10619fcde458 Step 5/9 : COPY Test Flow.flow /root/.prefect/TestFlow.prefect Traceback (most recent call last): File "managed_agent_testing.py", line 15, in <module> f.deploy(project_name="Test Project") File "/Users/josh/Desktop/code/prefect/src/prefect/core/flow.py", line 1361, in deploy set_schedule_inactive=set_schedule_inactive, File "/Users/josh/Desktop/code/prefect/src/prefect/client/client.py", line 357, in deploy serializedFlow=flow.serialize(build=build), File "/Users/josh/Desktop/code/prefect/src/prefect/core/flow.py", line 1116, in serialize storage = self.storage.build() # type: Optional[Storage] File "/Users/josh/Desktop/code/prefect/src/prefect/environments/storage/docker.py", line 170, in build image_name, image_tag = self.build_image(push=push) File "/Users/josh/Desktop/code/prefect/src/prefect/environments/storage/docker.py", line 219, in build_image "Your flow failed to deserialize in the container; please ensure that all necessary files and dependencies have been included." prefect.utilities.exceptions.SerializationError: Your flow failed to deserialize in the container; please ensure that all necessary files and dependencies have been included. ``` Code to reproduce: ``` from prefect import task, Flow from prefect.environments.storage import Docker @task def my_task(): print("ASDF") with Flow( "Test Flow", storage=Docker(...) ) as f: t1 = my_task() f.deploy(project_name="Test Project") ``` `COPY Test Flow.flow /root/.prefect/TestFlow.prefect` the whitespace between Test and Flow.flow is the issue. We need to sanitize this by replacing it with a special character
Since we already require `slugify`, we could use that as a quick off-the-shelf solution: ```python import slugify slugify.slugify("Test Flow") ## 'test-flow' ``` Check emojis too...
2019-04-24T00:00:55Z
[]
[]
Traceback (most recent call last): File "managed_agent_testing.py", line 15, in <module> f.deploy(project_name="Test Project") File "/Users/josh/Desktop/code/prefect/src/prefect/core/flow.py", line 1361, in deploy set_schedule_inactive=set_schedule_inactive, File "/Users/josh/Desktop/code/prefect/src/prefect/client/client.py", line 357, in deploy serializedFlow=flow.serialize(build=build), File "/Users/josh/Desktop/code/prefect/src/prefect/core/flow.py", line 1116, in serialize storage = self.storage.build() # type: Optional[Storage] File "/Users/josh/Desktop/code/prefect/src/prefect/environments/storage/docker.py", line 170, in build image_name, image_tag = self.build_image(push=push) File "/Users/josh/Desktop/code/prefect/src/prefect/environments/storage/docker.py", line 219, in build_image "Your flow failed to deserialize in the container; please ensure that all necessary files and dependencies have been included." prefect.utilities.exceptions.SerializationError: Your flow failed to deserialize in the container; please ensure that all necessary files and dependencies have been included.
844
PrefectHQ/prefect
PrefectHQ__prefect-978
d5317a9bcdf1cd73e10f0f5b6948260ceb84cfc4
diff --git a/src/prefect/environments/execution/cloud/environment.py b/src/prefect/environments/execution/cloud/environment.py --- a/src/prefect/environments/execution/cloud/environment.py +++ b/src/prefect/environments/execution/cloud/environment.py @@ -5,6 +5,7 @@ from os import path from typing import Any, List +import cloudpickle import docker import yaml @@ -94,14 +95,13 @@ def run_flow(self) -> None: cluster.adapt(minimum=1, maximum=1) # Load serialized flow from file and run it with a DaskExecutor - schema = prefect.serialization.flow.FlowSchema() with open( prefect.context.get( "flow_file_path", "/root/.prefect/flow_env.prefect" ), - "r", + "rb", ) as f: - flow = schema.load(json.load(f)) + flow = cloudpickle.load(f) executor = DaskExecutor(address=cluster.scheduler_address) FlowRunner(flow=flow).run(executor=executor)
Decoding error when running a stored flow ``` Traceback (most recent call last): File "<string>", line 1, in <module> File "/usr/local/lib/python3.6/site-packages/prefect/environments/execution/cloud/environment.py", line 104, in run_flow flow = schema.load(json.load(f)) File "/usr/local/lib/python3.6/json/__init__.py", line 296, in load return loads(fp.read(), File "/usr/local/lib/python3.6/codecs.py", line 321, in decode (result, consumed) = self._buffer_decode(data, self.errors, final) UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 0: invalid start byte ``` Still investigating the issue
2019-04-24T12:54:06Z
[]
[]
Traceback (most recent call last): File "<string>", line 1, in <module> File "/usr/local/lib/python3.6/site-packages/prefect/environments/execution/cloud/environment.py", line 104, in run_flow flow = schema.load(json.load(f)) File "/usr/local/lib/python3.6/json/__init__.py", line 296, in load return loads(fp.read(), File "/usr/local/lib/python3.6/codecs.py", line 321, in decode (result, consumed) = self._buffer_decode(data, self.errors, final) UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 0: invalid start byte
845
Qiskit/qiskit
Qiskit__qiskit-10126
574da7ee5cfb58cf3d7eb5ef726d15166c5e247a
diff --git a/qiskit/quantum_info/synthesis/qsd.py b/qiskit/quantum_info/synthesis/qsd.py --- a/qiskit/quantum_info/synthesis/qsd.py +++ b/qiskit/quantum_info/synthesis/qsd.py @@ -81,7 +81,7 @@ def qs_decomposition( circ = decomposer_1q(mat) elif dim == 4: if decomposer_2q is None: - if opt_a2: + if opt_a2 and _depth > 0: from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import def decomp_2q(mat): @@ -118,7 +118,7 @@ def decomp_2q(mat): right_circ = _demultiplex(u1, u2, opt_a1=opt_a1, opt_a2=opt_a2, _depth=_depth) circ.append(right_circ.to_instruction(), qr) - if opt_a2 and _depth == 0: + if opt_a2 and _depth == 0 and dim > 4: return _apply_a2(circ) return circ @@ -236,6 +236,8 @@ def _apply_a2(circ): for i, instruction in enumerate(ccirc.data): if instruction.operation.name == "qsd2q": ind2q.append(i) + if not ind2q: + return ccirc # rolling over diagonals ind2 = None # lint for ind1, ind2 in zip(ind2q[0:-1:], ind2q[1::]):
Quantum shannon decomposition failing for some inputs ### Environment - **Qiskit Terra version**: 0.42.1 - **Python version**: 3.10.8 ### What is happening? qs_decomposition in [qsd.py](https://github.com/Qiskit/qiskit-terra/blob/main/qiskit/quantum_info/synthesis/qsd.py) throws an error for some examples, seemingly due to a bug in the code ### How can we reproduce the issue? ``` from qiskit.quantum_info.synthesis.qsd import qs_decomposition qs_decomposition(np.array([[0,1],[1,0]])) ``` Output: ``` Traceback (most recent call last): Cell In[14], line 3 qs_decomposition(np.array([[0,1],[1,0]])) File /opt/conda/lib/python3.10/site-packages/qiskit/quantum_info/synthesis/qsd.py:122 in qs_decomposition return _apply_a2(circ) File /opt/conda/lib/python3.10/site-packages/qiskit/quantum_info/synthesis/qsd.py:253 in _apply_a2 qc3 = two_qubit_decompose.two_qubit_cnot_decompose(mat2) UnboundLocalError: local variable 'mat2' referenced before assignment Use %tb to get the full traceback. ``` Alternatively, ``` qs_decomposition(qiskit.quantum_info.random_unitary(4).to_matrix()) ``` Giving the same error ### What should happen? We should still be able to produce a decomposed circuit from these examples ### Any suggestions? This seems to occur when line 233 of qsd.py in the function ‘_apply_a2()’ does not transpile to include any of the ‘qsd2q’ gate type for an instance. To fix this add something such as the following before the loop on line 242: ``` if not ind2q: return ccirc ``` Additionally should add a test for this kind of case to the [test](https://github.com/Qiskit/qiskit-terra/blob/main/test/python/quantum_info/test_synthesis.py)
I can correct this with the above indicated as long as the reasoning follows Yeah, I believe your reasoning is correct here, thanks - I don't think there's anything to do if there's nothing to decompose. @ewinston can check me, though, and I'll assign him to the PR if you're able to make it. Let us know if not, though, and one of us will. The `_apply_a2` function actually wasn't supposed to by applied for `dim == 2`. I can submit a pr for that fix. Did you ever notice this for dimension > 2?
2023-05-17T15:30:11Z
[]
[]
Traceback (most recent call last): Cell In[14], line 3 qs_decomposition(np.array([[0,1],[1,0]])) File /opt/conda/lib/python3.10/site-packages/qiskit/quantum_info/synthesis/qsd.py:122 in qs_decomposition return _apply_a2(circ) File /opt/conda/lib/python3.10/site-packages/qiskit/quantum_info/synthesis/qsd.py:253 in _apply_a2 qc3 = two_qubit_decompose.two_qubit_cnot_decompose(mat2) UnboundLocalError: local variable 'mat2' referenced before assignment
860
Qiskit/qiskit
Qiskit__qiskit-1020
017e4566bbc91cdecc9181ef0dc46e8656d2e3ac
diff --git a/qiskit/unroll/_dagunroller.py b/qiskit/unroll/_dagunroller.py --- a/qiskit/unroll/_dagunroller.py +++ b/qiskit/unroll/_dagunroller.py @@ -78,11 +78,12 @@ def expand_gates(self, basis=None): gatedefs.append(Gate(children)) # Walk through the DAG and examine each node builtins = ["U", "CX", "measure", "reset", "barrier"] + simulator_builtins = ['snapshot', 'save', 'load', 'noise'] topological_sorted_list = list(nx.topological_sort(self.dag_circuit.multi_graph)) for node in topological_sorted_list: current_node = self.dag_circuit.multi_graph.node[node] if current_node["type"] == "op" and \ - current_node["name"] not in builtins + basis and \ + current_node["name"] not in builtins + basis + simulator_builtins and \ not self.dag_circuit.gates[current_node["name"]]["opaque"]: subcircuit, wires = self._build_subcircuit(gatedefs, basis,
Using simulator instructions crashes the latex drawer <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: latest master - **Python version**: 3.7 - **Operating system**: linux ### What is the current behavior? Attempting to use the latex drawer to render a circuit with simulator instructions stack traces in the dagunroller. For example: ``` Traceback (most recent call last): File "test_qiskit.py", line 67, in <module> visualization.generate_latex_source(qc, filename='out.tex') File "/tmp/qiskit/qiskit-terra/qiskit/tools/visualization/_circuit_visualization.py", line 354, in generate_latex_source json_circuit = transpile(dag_circuit, basis_gates=basis, format='json') File "/tmp/qiskit/qiskit-terra/qiskit/transpiler/_transpiler.py", line 346, in transpile dag = dag_unroller.expand_gates() File "/tmp/qiskit/qiskit-terra/qiskit/unroll/_dagunroller.py", line 86, in expand_gates not self.dag_circuit.gates[current_node["name"]]["opaque"]: KeyError: 'snapshot' ``` It looks like it's trying to treat the snapshot instruction as a gate (which it's not) and that's causing things to crash. ### Steps to reproduce the problem I've been running: ``` import qiskit.extensions.simulator from qiskit import * from qiskit.tools import visualization q = QuantumRegister(2) c = ClassicalRegister(1) qc = QuantumCircuit(q, c) qc.x(q[0]) qc.snapshot(slot=3) qc.x(q[1]) qc.h(q[0]) qc.barrier() qc.measure(q[0], c[0]) visualization.generate_latex_source(qc, filename='out.tex') ``` Also replacing snapshot() with save(), load(), and noise() ### What is the expected behavior? This should draw a circuit (the barriers won't be drawn for the simulator instructions, that's what I was working on adding when I encountered this) and not stack trace. ### Suggested solutions Fix the crash.
2018-10-03T18:28:06Z
[]
[]
Traceback (most recent call last): File "test_qiskit.py", line 67, in <module> visualization.generate_latex_source(qc, filename='out.tex') File "/tmp/qiskit/qiskit-terra/qiskit/tools/visualization/_circuit_visualization.py", line 354, in generate_latex_source json_circuit = transpile(dag_circuit, basis_gates=basis, format='json') File "/tmp/qiskit/qiskit-terra/qiskit/transpiler/_transpiler.py", line 346, in transpile dag = dag_unroller.expand_gates() File "/tmp/qiskit/qiskit-terra/qiskit/unroll/_dagunroller.py", line 86, in expand_gates not self.dag_circuit.gates[current_node["name"]]["opaque"]: KeyError: 'snapshot'
869
Qiskit/qiskit
Qiskit__qiskit-10495
42a0ee84df3e15834b174bdb0295142b987b261f
diff --git a/qiskit/circuit/commutation_checker.py b/qiskit/circuit/commutation_checker.py --- a/qiskit/circuit/commutation_checker.py +++ b/qiskit/circuit/commutation_checker.py @@ -65,10 +65,20 @@ def _hashable_parameters(self, params): return ("fallback", str(params)) def commute( - self, op1: Operation, qargs1: List, cargs1: List, op2: Operation, qargs2: List, cargs2: List - ): + self, + op1: Operation, + qargs1: List, + cargs1: List, + op2: Operation, + qargs2: List, + cargs2: List, + max_num_qubits: int = 3, + ) -> bool: """ - Checks if two Operations commute. + Checks if two Operations commute. The return value of `True` means that the operations + truly commute, and the return value of `False` means that either the operations do not + commute or that the commutation check was skipped (for example, when the operations + have conditions or have too many qubits). Args: op1: first operation. @@ -77,10 +87,14 @@ def commute( op2: second operation. qargs2: second operation's qubits. cargs2: second operation's clbits. + max_num_qubits: the maximum number of qubits to consider, the check may be skipped if + the number of qubits for either operation exceeds this amount. Returns: bool: whether two operations commute. """ + # pylint: disable=too-many-return-statements + # We don't support commutation of conditional gates for now due to bugs in # CommutativeCancellation. See gh-8553. if ( @@ -105,6 +119,10 @@ def commute( if not (intersection_q or intersection_c): return True + # Skip the check if the number of qubits for either operation is too large + if len(qargs1) > max_num_qubits or len(qargs2) > max_num_qubits: + return False + # These lines are adapted from commutation_analysis, which is more restrictive than the # check from dag_dependency when considering nodes with "_directive". It would be nice to # think which optimizations from dag_dependency can indeed be used.
`commutation analysis` lead to `numpy.core._exceptions._ArrayMemoryError` with large `mct` ### Environment - **Qiskit Terra version**: 0.43.1 meta package, terra 0.24.1 - **Python version**: 3.10 - **Operating system**: docker continuumio/miniconda3 ### What is happening? When transpiling (level 2) a circuit with a large multi-cx gate (see doc: [here](https://qiskit.org/documentation/stubs/qiskit.circuit.QuantumCircuit.mct.html)) with a large numebr of qubits (e.g. `28`) the `commutation analysis` pass crashes. ### How can we reproduce the issue? Run this python script: ```python from qiskit import QuantumCircuit, transpile qc = QuantumCircuit(29) qc.mct(list(range(28)), qc.num_qubits - 1) transpile(qc, optimization_level=2) ``` Produces this output and error: ```bash Traceback (most recent call last): File "myfile.py", line 7, in <module> transpile(qc, optimization_level=2) File "...qiskit/compiler/transpiler.py", line 380, in transpile _serial_transpile_circuit( File "...qiskit/compiler/transpiler.py", line 462, in _serial_transpile_circuit result = pass_manager.run(circuit, callback=callback, output_name=outpu t_name) File "...qiskit/transpiler/passmanager.py", line 537, in run return super().run(circuits, output_name, callback) File "...qiskit/transpiler/passmanager.py", line 231, in run return self._run_single_circuit(circuits, output_name, callback) File "...qiskit/transpiler/passmanager.py", line 292, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, call back=callback) File "...qiskit/transpiler/runningpassmanager.py", line 125, in run dag = self._do_pass(pass_, dag, passset.options) File "...qiskit/transpiler/runningpassmanager.py", line 169, in _do_pass dag = self._do_pass(required_pass, dag, options) File "...qiskit/transpiler/runningpassmanager.py", line 173, in _do_pass dag = self._run_this_pass(pass_, dag) File "...qiskit/transpiler/runningpassmanager.py", line 227, in _run_this_pass pass_.run(FencedDAGCircuit(dag)) File "...qiskit/transpiler/passes/optimization/commutation_analysis.py", line 75, in run does_commute = self.comm_checker.commute( File "...qiskit/circuit/commutation_checker.py", line 136, in commute operator_2 = Operator(op2, input_dims=(2,) * len(qarg2), output_dims=(2 ,) * len(qarg2)) File "...qiskit/quantum_info/operators/operator.py", line 85, in __init__ self._data = self._init_instruction(data).data File "...qiskit/quantum_info/operators/operator.py", line 610, in _init_instruction op = Operator(np.eye(dimension)) File "...numpy/lib/twodim_base.py", line 215, in eye m = zeros((N, M), dtype=dtype, order=order) numpy.core._exceptions._ArrayMemoryError: Unable to allocate 2.00 EiB for a n array with shape (536870912, 536870912) and data type float64 ``` ### What should happen? I would expect the optimizer to skip the pass if too large to optimize and leave it unoptimized. ### Any suggestions? I would skip the optimization pass when the number of qubits is too large (precise threshold to be determined based on ram memory of the current machine).
2023-07-25T09:22:38Z
[]
[]
Traceback (most recent call last): File "myfile.py", line 7, in <module> transpile(qc, optimization_level=2) File "...qiskit/compiler/transpiler.py", line 380, in transpile _serial_transpile_circuit( File "...qiskit/compiler/transpiler.py", line 462, in _serial_transpile_circuit result = pass_manager.run(circuit, callback=callback, output_name=outpu t_name) File "...qiskit/transpiler/passmanager.py", line 537, in run return super().run(circuits, output_name, callback)
913
Qiskit/qiskit
Qiskit__qiskit-10521
c8552f6b51a36aa432b21b0d31b88e212a104ca7
diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py --- a/qiskit/circuit/quantumcircuit.py +++ b/qiskit/circuit/quantumcircuit.py @@ -2180,7 +2180,7 @@ def copy(self, name: str | None = None) -> "QuantumCircuit": """Copy the circuit. Args: - name (str): name to be given to the copied circuit. If None, then the name stays the same + name (str): name to be given to the copied circuit. If None, then the name stays the same. Returns: QuantumCircuit: a deepcopy of the current circuit, with the specified name @@ -2222,6 +2222,10 @@ def copy_empty_like(self, name: str | None = None) -> "QuantumCircuit": Returns: QuantumCircuit: An empty copy of self. """ + if not (name is None or isinstance(name, str)): + raise TypeError( + f"invalid name for a circuit: '{name}'. The name must be a string or 'None'." + ) cpy = copy.copy(self) # copy registers correctly, in copy.copy they are only copied via reference cpy.qregs = self.qregs.copy()
`QuantumCircuit.copy(name=?)` allows for any object to be used as name ### Environment - **Qiskit Terra version**: 0.43.1 meta package, terra 0.24.1 - **Python version**: 3.10 - **Operating system**: docker continuumio/miniconda3 ### What is happening? Creating a circuit via copy allows to give an invalid name (not a string), that is then checked only when the circuit is transpiled. In this case the `copy()` api raises no error when passing a circuit as name (see API: [copy](https://qiskit.org/documentation/stubs/qiskit.circuit.QuantumCircuit.copy.html)) ### How can we reproduce the issue? Run this python script: ```python from qiskit import QuantumCircuit from qiskit.compiler import transpile qc_output = QuantumCircuit(2, 2) input_circuit = QuantumCircuit(2, 2) input_circuit.measure([0, 1], [0, 1]) qc_output = qc_output.compose(input_circuit, qubits=range(2)) final_circuit = qc_output.copy(qc_output) final_circuit.measure([0, 1], [0, 1]) print(final_circuit.draw()) print("Circuit name: ", final_circuit.name) transpile(final_circuit, optimization_level=0) ``` Produces this output and error: ```bash ┌─┐ ┌─┐ q_0: ┤M├───┤M├─── └╥┘┌─┐└╥┘┌─┐ q_1: ─╫─┤M├─╫─┤M├ ║ └╥┘ ║ └╥┘ c: 2/═╩══╩══╩══╩═ 0 1 0 1 Circuit name: ┌─┐ q_0: ┤M├─── └╥┘┌─┐ q_1: ─╫─┤M├ ║ └╥┘ c: 2/═╩══╩═ 0 1 Traceback (most recent call last): File "myfile.py", line 13, in <module> transpile(final_circuit, optimization_level=0) File ".../qiskit/compiler/transpiler.py", line 380, in transpile _serial_transpile_circuit( File ".../qiskit/compiler/transpiler.py", line 462, in _serial_transpile_circuit result = pass_manager.run(circuit, callback=callback, output_name=outpu t_name) File ".../qiskit/transpiler/passmanager.py", line 537, in run return super().run(circuits, output_name, callback) File ".../qiskit/transpiler/passmanager.py", line 231, in run return self._run_single_circuit(circuits, output_name, callback) File ".../qiskit/transpiler/passmanager.py", line 292, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, call back=callback) File ".../qiskit/transpiler/runningpassmanager.py", line 127, in run circuit = dag_to_circuit(dag, copy_operations=False) File ".../qiskit/converters/dag_to_circuit.py", line 58, in dag_to_circuit circuit = QuantumCircuit( File ".../qiskit/circuit/quantumcircuit.py", line 260, in __init__ raise CircuitError( qiskit.circuit.exceptions.CircuitError: 'The circuit name should be a strin g (or None to auto-generate a name).' ``` ### What should happen? I would expect the `copy` method to check the name of the circuit, and raise an error if it is not a string, whereas now it is allowed without any check (see [here](https://github.com/Qiskit/qiskit-terra/blob/802a735ebea547d0d96339c2de4a10f04b0ab8a6/qiskit/circuit/quantumcircuit.py#L2231)). Even this is allowed: ```python ... class AnyObject(object): def __init__(self, field): self.field = field new_circuit = qc_output.copy(name=AnyObject("new_circuit")) print(new_circuit.name) ``` Outputting: ``` ... <__main__.AnyObject object at 0x7f7c98809ab0> ``` ### Any suggestions? I think the `copy` method should check the name of the circuit, and raise an error if it is not a string, so that the error is more precise on the line of the `copy()` call rather than the `transpile()`.
This is somewhat a regular part of Python programming; the language doesn't enforce type checking. You'll _always_ be able to put badly typed objects into Python classes, since the language fundamentally doesn't have access control. That said, in this particular case, we _do_ do the manual check in `QuantumCircuit.__init__`, it's not very costly to do, and doesn't have greater performance implications, so it's reasonable that we could do the check in `QuantumCircuit.copy` as well. @jakelishman @MattePalte I would like to solve this issue and contribute to Qiskit. Can you please assign me? Thank you running tox -elint-incr on my repository gives the following error: `ERROR: sympy is imported via qiskit.circuit.quantumcircuit` But sympy is not being imported. **Why is this error showing up?** And by the way, I have fixed this issue and am in the testing phase. Will make a pull request once the above error gets solved. EDIT - I think it is showing up because qiskit/circuit/parameter.py is importing sympy @Abhiraj-Shrotriya For various reasons, Sympy is optionally imported. Tox flags that as an error, but this is not the case in the CI tests. If this is the only error you are facing, you should pass the CI tests. Updating my branch to the latest version of Qiskit:main breaks my Pull Request. The CI tests ( which of course take time) need to rerun. Some of these re-tests fail which passed in the original run. Will not updating my branch affect merging process? You don't need to update your branch to `main` unless there are merge conflicts. We'll handle that automatically during the final merge window.
2023-07-27T19:56:41Z
[]
[]
Traceback (most recent call last): File "myfile.py", line 13, in <module> transpile(final_circuit, optimization_level=0) File ".../qiskit/compiler/transpiler.py", line 380, in transpile _serial_transpile_circuit( File ".../qiskit/compiler/transpiler.py", line 462, in _serial_transpile_circuit result = pass_manager.run(circuit, callback=callback, output_name=outpu t_name) File ".../qiskit/transpiler/passmanager.py", line 537, in run return super().run(circuits, output_name, callback)
916
Qiskit/qiskit
Qiskit__qiskit-10537
4722c50a59157a5be638c8b30a2b77a0b127e4ae
diff --git a/qiskit/qpy/binary_io/circuits.py b/qiskit/qpy/binary_io/circuits.py --- a/qiskit/qpy/binary_io/circuits.py +++ b/qiskit/qpy/binary_io/circuits.py @@ -970,10 +970,13 @@ def write_circuit(file_obj, circuit, metadata_serializer=None): new_custom_operations = list(custom_operations.keys()) while new_custom_operations: operations_to_serialize = new_custom_operations.copy() + new_custom_operations = [] for name in operations_to_serialize: operation = custom_operations[name] - new_custom_operations = _write_custom_operation( - custom_operations_buffer, name, operation, custom_operations + new_custom_operations.extend( + _write_custom_operation( + custom_operations_buffer, name, operation, custom_operations + ) ) file_obj.write(struct.pack(formats.CUSTOM_CIRCUIT_DEF_HEADER_PACK, len(custom_operations)))
QPY invalid payload generation via `compose()` or `QuantumCircuit.control()` ### Environment - **Qiskit Terra version**: 0.23.2 - **Python version**: 3.10 - **Operating system**: Linux ### What is happening? Running `compose()` with `inplace=True` from an input generated with `QuantumCircuit.control()` leads to a circuit that when qpy serialized is not load-able. This points to an internal state that doesn't match the actual data of the instruction object. I expect the mismatch is caused by the number of arguments or qubits the gate reported in qc which is incorrect The specific failure in this case is: ``` Traceback (most recent call last): File "/tmp/test_qpy_roundtrip.py", line 17, in <module> new_qc = load(fd)[0] File "/tmp/foo/lib/python3.10/site-packages/qiskit/qpy/interface.py", line 269, in load loader( File "/tmp/foo/lib/python3.10/site-packages/qiskit/qpy/binary_io/circuits.py", line 905, in read_circuit _read_instruction(file_obj, circ, out_registers, custom_operations, version, vectors) File "/tmp/foo/lib/python3.10/site-packages/qiskit/qpy/binary_io/circuits.py", line 161, in _read_instruction struct.unpack( struct.error: unpack requires a buffer of 33 bytes ``` ### How can we reproduce the issue? ```python import io from qiskit.circuit.random import random_circuit import numpy as np from qiskit import QuantumCircuit from qiskit.qpy import dump, load qc0 = random_circuit(2, 2, seed=1).decompose(reps=1) qc1 = random_circuit(2, 2, seed=1).decompose(reps=1) qc = QuantumCircuit(3) qc.compose(qc0.control(1), [0,1,2], inplace=True) qc.compose(qc1.control(1), [0,1,2], inplace=True) with io.BytesIO() as fd: dump(qc, fd) fd.seek(0) new_qc = load(fd)[0] assert qc == new_qc ``` ### What should happen? This should not error during the `load()` call ### Any suggestions? I believe something about the compose call is corrupting the internal state of the circuit which is leading to a QPY payload that has a mismatch between a size
For anyone experiencing this you can work around this failure by changing: ```python qc.compose(qc0.control(1), [0,1,2], inplace=True) qc.compose(qc1.control(1), [0,1,2], inplace=True) ``` to ```python qc.append(qc0.control(1), [0,1,2]) qc.append(qc1.control(1), [0,1,2]) ``` I think something's a bit odd about `QuantumCircuit.control`: it constructs a gate, from the circuit, controls that, and adds the resulting gate to a circuit. That means that when you compose it onto a circuit, it's _still_ a wrapped custom instruction rather than having been properly inlined. If I change `QuantumCircuit.control` to just return `circuit_to_gate(self).control(...).definition`, the QPY stuff works fine and that code makes more sense. That said, it's not necessarily the correct fix for here (unless there's an actual bug in `QuantumCircuit.control`), because the current form _should_ still be producing a valid circuit that roundtrips through QPY. Hmm, that seems odd qpy shouldn't care about it being wrapped in a custom instruction. I wonder if it's the same bug as https://github.com/Qiskit/qiskit-terra/issues/8941 where the controlled gates are ending up with the same names and that's causing issues. I'm suspicious that there's a mistake in the recursive handling, when there's multiple custom instructions that all contain other custom instructions. I'm fairly confident the issue happens during the QPY dump, not the read. I instrumented `write_circuit` with a `print(circuit.name)`, and it shows an asymmetry between how it handles the two gates - it shows that it touches `qc.data[1].operation._definition` and `qc.data[1].operation.base_gate.definition`, but only one of those two things for `qc.data[0]`. Hi all, thanks for looking into this issue ! The workaround proposed above (with `inplace=False`) will create two empty circuits though right ? From what I could understand, when `qpy` dumps the base circuit (here: https://github.com/Qiskit/qiskit-terra/blob/16f6adb310719619f5cc07d314a95f12d6ea07c4/qiskit/qpy/binary_io/circuits.py#L649) something might be going wrong with the format. So when reading the `.qpy` file, the `reader` manages to read the first base circuit but fails at the second. Thanks again ! The key to the workaround I suggested in https://github.com/Qiskit/qiskit-terra/issues/9746#issuecomment-1458534286 is that it's using the `append()` method instead of `compose()` and that is always done in place. So taking the OP code it returns a circuit that looks like: ``` ┌───────────────┐┌───────────────┐ q_0: ┤0 ├┤0 ├ │ ││ │ q_1: ┤1 c_circuit-88 ├┤1 c_circuit-91 ├ │ ││ │ q_2: ┤2 ├┤2 ├ └───────────────┘└───────────────┘ ``` which decomposed looks like: ``` q_0: ───────■──────────────■─────── ┌──────┴──────┐┌──────┴──────┐ q_1: ┤0 ├┤0 ├ │ circuit-88 ││ circuit-91 │ q_2: ┤1 ├┤1 ├ └─────────────┘└─────────────┘ ``` and another layer deeper is: ``` q_0: ──■───■──────────■───■─────────■───■──────────■───■──────────■───■───────» ┌─┴─┐ │P(-π/4) │ │ │ │ ┌─┴─┐ │P(-π/4) │ │ » q_1: ┤ X ├─■──────────■───┼─────────■───┼────────┤ X ├─■──────────■───┼───────» └─┬─┘ ┌─┴─┐ │P(π/4) ┌─┴─┐ │P(-π/4) └─┬─┘ ┌─┴─┐ │P(π/4) » q_2: ──■────────────┤ X ├─■───────┤ X ├─■──────────■────────────┤ X ├─■───────» └───┘ └───┘ └───┘ » « «q_0: ──■───■──────── « │ │ «q_1: ──■───┼──────── « ┌─┴─┐ │P(-π/4) «q_2: ┤ X ├─■──────── « └───┘ ``` The workaround uses `QuantumCircuit.append` not `QuantumCircuit.compose` - `append` is in place as well. The difference is that `append` adds things as a single instruction, whereas `compose` "inlines" the given circuit into the existing one. It so happens that `QuantumCircuit.control` does some pretty weird stuff internally, so you probably wouldn't spot the difference in this case, but for most things, it's important for abstract optimisations in the transpiler (overuse of `compose` is akin to prematurely inlining functions in a classical language). I don't believe there's anything wrong with the QPY format itself, but I am suspicious that the recursive handling of custom gates (of which this is an example) might be skipping one of the necessary circuits when there's several compound custom gates in succession. Oh I didn't notice the `compose` to `append` switch sorry ! Indeed that's a good fix ! Interestingly, as of 0.25.0, QPY now throws an error during the re-load, which might suggest where the underlying bug was: ``` --------------------------------------------------------------------------- error Traceback (most recent call last) <ipython-input-1-2524f28d100e> in <module> 15 dump(qc, fd) 16 fd.seek(0) ---> 17 new_qc = load(fd)[0] 18 ~/code/qiskit/terra/qiskit/qpy/interface.py in load(file_obj, metadata_deserializer) 267 for _ in range(data.num_programs): 268 programs.append( --> 269 loader( 270 file_obj, 271 data.qpy_version, ~/code/qiskit/terra/qiskit/qpy/binary_io/circuits.py in read_circuit(file_obj, version, metadata_deserializer) 1087 custom_operations = _read_custom_operations(file_obj, version, vectors) 1088 for _instruction in range(num_instructions): -> 1089 _read_instruction(file_obj, circ, out_registers, custom_operations, version, vectors) 1090 1091 # Read calibrations ~/code/qiskit/terra/qiskit/qpy/binary_io/circuits.py in _read_instruction(file_obj, circuit, registers, custom_operations, version, vectors) 176 else: 177 instruction = formats.CIRCUIT_INSTRUCTION_V2._make( --> 178 struct.unpack( 179 formats.CIRCUIT_INSTRUCTION_V2_PACK, 180 file_obj.read(formats.CIRCUIT_INSTRUCTION_V2_SIZE), error: unpack requires a buffer of 33 bytes ``` (or alternatively I broke something with #10392) Oh no wait, sorry, I _totally_ forgot what the top comment says - the error's the exact same. That's what I get for trying to come back to an issue after a couple of months and not taking the time to re-read everything properly.
2023-07-31T17:26:31Z
[]
[]
Traceback (most recent call last): File "/tmp/test_qpy_roundtrip.py", line 17, in <module> new_qc = load(fd)[0] File "/tmp/foo/lib/python3.10/site-packages/qiskit/qpy/interface.py", line 269, in load loader( File "/tmp/foo/lib/python3.10/site-packages/qiskit/qpy/binary_io/circuits.py", line 905, in read_circuit _read_instruction(file_obj, circ, out_registers, custom_operations, version, vectors) File "/tmp/foo/lib/python3.10/site-packages/qiskit/qpy/binary_io/circuits.py", line 161, in _read_instruction struct.unpack( struct.error: unpack requires a buffer of 33 bytes
919
Qiskit/qiskit
Qiskit__qiskit-1118
02200d2cdbbc5057062c35f9002463db7795cdf0
diff --git a/qiskit/backends/aer/statevector_simulator.py b/qiskit/backends/aer/statevector_simulator.py --- a/qiskit/backends/aer/statevector_simulator.py +++ b/qiskit/backends/aer/statevector_simulator.py @@ -56,8 +56,6 @@ def _run_job(self, job_id, qobj): QobjInstruction(name='snapshot', params=[final_state_key]) ) result = super()._run_job(job_id, qobj) - # Replace backend name with current backend - result.backend_name = self.name # Extract final state snapshot and move to 'statevector' data field for experiment_result in result.results.values(): snapshots = experiment_result.snapshots diff --git a/qiskit/backends/aer/statevector_simulator_py.py b/qiskit/backends/aer/statevector_simulator_py.py --- a/qiskit/backends/aer/statevector_simulator_py.py +++ b/qiskit/backends/aer/statevector_simulator_py.py @@ -70,8 +70,6 @@ def _run_job(self, job_id, qobj): QobjInstruction(name='snapshot', params=[final_state_key]) ) result = super()._run_job(job_id, qobj) - # Replace backend name with current backend - result.backend_name = self.name # Extract final state snapshot and move to 'statevector' data field for experiment_result in result.results.values(): snapshots = experiment_result.snapshots
Can not combine the Result object from the same backend (statevector) <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: the master branch - **Python version**: 3.6.5 - **Operating system**: macOS 10.13 ### What is the current behavior? raise error ``` Traceback (most recent call last): File "/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py", line 125, in __add__ copy_of_self += other File "/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py", line 108, in __iadd__ raise QISKitError('Result objects from different backends cannot be combined.') qiskit._qiskiterror.QISKitError: 'Result objects from different backends cannot be combined.' ``` ### Steps to reproduce the problem Code ```python from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister import qiskit as qk import numpy as np num_qubits = 2 q = QuantumRegister(num_qubits, name='q') c = ClassicalRegister(num_qubits, name='c') circuits = QuantumCircuit(q, c) param_idx = 0 for qubit in range(num_qubits): circuits.u3(0.0, 0.0, 0.0, q[qubit]) circuits.u1(3.0, q[qubit]) # circuits.measure(q, c) my_backend = qk.Aer.get_backend('statevector_simulator') qobj = qk.compile(circuits=circuits, backend=my_backend) job = my_backend.run(qobj) result_a = job.result() qobj = qk.compile(circuits=circuits, backend=my_backend) job = my_backend.run(qobj) result_b = job.result() result = result_a + result_b ``` ### What is the expected behavior? Result objects are combined without error ### Suggested solutions None Note: If I change the backend to `qasm_simulator`, there is no error.
So I dug into this, the underlying issue is that name in most places is not actually a property/string but instead actually a method. So when result is doing the comparison of the backend_name property it's getting a bound method (which doesn't match for different objects) instead of the string it was expecting. We can fix this for the statevector simulator case by changing the result code to call `result.backend_name()` instead of `result.backend_name`. However there are cases where backend_name is a string and doing this will break those. Ideally I'd like to see everything be a string/property since it's static and calling a function seems unecessary, but it looks like the assumption that it's function is in a bunch of other places throughout the code. I'll unravel the ball of yarn and figure out a way to preserve our interface compatibility while making it behave consistently for all the backends. so a quick workaround I can check both? like comparing `result_a.backend_name() == result_b.backend_name() or result_a.backend_name == result_b.backend_name`
2018-10-17T18:52:16Z
[]
[]
Traceback (most recent call last): File "/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py", line 125, in __add__ copy_of_self += other File "/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py", line 108, in __iadd__ raise QISKitError('Result objects from different backends cannot be combined.') qiskit._qiskiterror.QISKitError: 'Result objects from different backends cannot be combined.'
942
Qiskit/qiskit
Qiskit__qiskit-1215
9d603f11a350ee77e5cd3fa02c8e61f40ab44440
diff --git a/qiskit/backends/aer/aerjob.py b/qiskit/backends/aer/aerjob.py --- a/qiskit/backends/aer/aerjob.py +++ b/qiskit/backends/aer/aerjob.py @@ -114,8 +114,12 @@ def status(self): elif self._future.done(): _status = JobStatus.DONE if self._future.exception() is None else JobStatus.ERROR else: - raise JobError('Unexpected behavior of {0}'.format( - self.__class__.__name__)) + # Note: There is an undocumented Future state: PENDING, that seems to show up when + # the job is enqueued, waiting for someone to pick it up. We need to deal with this + # state but there's no public API for it, so we are assuming that if the job is not + # in any of the previous states, is PENDING, ergo INITIALIZING for us. + _status = JobStatus.INITIALIZING + return _status def backend_name(self):
test_compiler breaks AerJob status check <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: master - **Python version**: - **Operating system**: ### What is the current behavior? When running the tests, the following does not work when run *after* the `test_compiler` module: ```python backend = Aer.get_backend('qasm_simulator') job_sim = execute(qc, backend) job_sim.status() ``` ``` Traceback (most recent call last): File "/Users/paul/Desktop/Github_repos/qiskit-core/test/python/test_cpp.py", line 34, in test_aer_status job_sim.status() File "/Users/paul/Desktop/Github_repos/qiskit-core/qiskit/backends/aer/aerjob.py", line 37, in _wrapper return func(self, *args, **kwargs) File "/Users/paul/Desktop/Github_repos/qiskit-core/qiskit/backends/aer/aerjob.py", line 118, in status self.__class__.__name__)) qiskit.backends.joberror.JobError: 'Unexpected behavior of AerJob' ``` However, if run *before* that module, it works fine. This is true both when running locally and on Travis. This is blocking #975. ### Steps to reproduce the problem ### What is the expected behavior? ### Suggested solutions
I'm taking over this one
2018-11-05T10:38:23Z
[]
[]
Traceback (most recent call last): File "/Users/paul/Desktop/Github_repos/qiskit-core/test/python/test_cpp.py", line 34, in test_aer_status job_sim.status() File "/Users/paul/Desktop/Github_repos/qiskit-core/qiskit/backends/aer/aerjob.py", line 37, in _wrapper return func(self, *args, **kwargs) File "/Users/paul/Desktop/Github_repos/qiskit-core/qiskit/backends/aer/aerjob.py", line 118, in status self.__class__.__name__)) qiskit.backends.joberror.JobError: 'Unexpected behavior of AerJob'
958
Qiskit/qiskit
Qiskit__qiskit-1284
3fb837f1afd6be505c3139d5f226936d7ff9a1fc
diff --git a/examples/python/teleport.py b/examples/python/teleport.py --- a/examples/python/teleport.py +++ b/examples/python/teleport.py @@ -47,6 +47,7 @@ qc.measure(q[1], c1[0]) # Apply a correction +qc.barrier(q) qc.z(q[2]).c_if(c0, 1) qc.x(q[2]).c_if(c1, 1) qc.measure(q[2], c2[0]) @@ -57,17 +58,32 @@ ############################################################### # First version: not mapped -qobj = compile(qc, backend=backend, coupling_map=None, shots=1024) +initial_layout = {("q", 0): ("q", 0), ("q", 1): ("q", 1), + ("q", 2): ("q", 2)} +qobj = compile(qc, backend=backend, coupling_map=None, shots=1024, initial_layout=initial_layout) job = backend.run(qobj) +qobj_exp = qobj.experiments[0] +print(qobj_exp.header.qubit_labels) +print(qobj_exp.header.compiled_circuit_qasm) +print(qobj_exp.header.clbit_labels) +for i in qobj_exp.instructions: + print(i) + result = job.result() print(result) print(result.get_counts(qc)) # Second version: mapped to 2x8 array coupling graph -qobj = compile(qc, backend=backend, coupling_map=coupling_map, shots=1024) +qobj = compile(qc, backend=backend, coupling_map=coupling_map, shots=1024,initial_layout=initial_layout) +qobj_exp = qobj.experiments[0] +print(qobj_exp.header.qubit_labels) +qobj_exp.header.compiled_circuit_qasm = "" +print(qobj_exp.header.compiled_circuit_qasm) +print(qobj_exp.header.clbit_labels) +for i in qobj_exp.instructions: + print(i) job = backend.run(qobj) result = job.result() - print(result) print(result.get_counts(qc)) diff --git a/qiskit/_quantumcircuit.py b/qiskit/_quantumcircuit.py --- a/qiskit/_quantumcircuit.py +++ b/qiskit/_quantumcircuit.py @@ -11,7 +11,10 @@ Quantum circuit object. """ import itertools +import warnings from collections import OrderedDict +from copy import deepcopy + from qiskit.qasm import _qasm from qiskit.unrollers import _unroller @@ -107,9 +110,9 @@ def __init__(self, *regs, name=None): self.data = [] # This is a map of registers bound to this circuit, by name. - self.qregs = OrderedDict() - self.cregs = OrderedDict() - self.add(*regs) + self.qregs = [] + self.cregs = [] + self.add_register(*regs) @classmethod def _increment_instances(cls): @@ -138,10 +141,10 @@ def has_register(self, register): """ has_reg = False if (isinstance(register, QuantumRegister) and - register in self.qregs.values()): + register in self.qregs): has_reg = True elif (isinstance(register, ClassicalRegister) and - register in self.cregs.values()): + register in self.cregs): has_reg = True return has_reg @@ -160,8 +163,15 @@ def combine(self, rhs): self._check_compatible_regs(rhs) # Make new circuit with combined registers - combined_qregs = {**self.qregs, **rhs.qregs}.values() - combined_cregs = {**self.cregs, **rhs.cregs}.values() + combined_qregs = deepcopy(self.qregs) + combined_cregs = deepcopy(self.cregs) + + for element in rhs.qregs: + if element not in self.qregs: + combined_qregs.append(element) + for element in rhs.cregs: + if element not in self.cregs: + combined_cregs.append(element) circuit = QuantumCircuit(*combined_qregs, *combined_cregs) for gate in itertools.chain(self.data, rhs.data): gate.reapply(circuit) @@ -182,8 +192,12 @@ def extend(self, rhs): self._check_compatible_regs(rhs) # Add new registers - self.qregs.update(rhs.qregs) - self.cregs.update(rhs.cregs) + for element in rhs.qregs: + if element not in self.qregs: + self.qregs.append(element) + for element in rhs.cregs: + if element not in self.cregs: + self.cregs.append(element) # Add new gates for gate in rhs.data: @@ -211,19 +225,27 @@ def _attach(self, instruction): self.data.append(instruction) return instruction - def add(self, *regs): + def add_register(self, *regs): """Add registers.""" for register in regs: - if register.name in self.qregs or register.name in self.cregs: + if register in self.qregs or register in self.cregs: raise QISKitError("register name \"%s\" already exists" % register.name) if isinstance(register, QuantumRegister): - self.qregs[register.name] = register + self.qregs.append(register) elif isinstance(register, ClassicalRegister): - self.cregs[register.name] = register + self.cregs.append(register) else: raise QISKitError("expected a register") + def add(self, *regs): + """Add registers.""" + + warnings.warn('The add() function is deprecated and will be ' + 'removed in a future release. Instead use ' + 'QuantumCircuit.add_register().', DeprecationWarning) + self.add_register(*regs) + def _check_qreg(self, register): """Raise exception if r is not in this circuit or not qreg.""" if not isinstance(register, QuantumRegister): @@ -263,12 +285,14 @@ def _check_dups(self, qubits): def _check_compatible_regs(self, rhs): """Raise exception if the circuits are defined on incompatible registers""" - lhs_regs = {**self.qregs, **self.cregs} - rhs_regs = {**rhs.qregs, **rhs.cregs} - common_registers = lhs_regs.keys() & rhs_regs.keys() - for name in common_registers: - if lhs_regs[name] != rhs_regs[name]: - raise QISKitError("circuits are not compatible") + + list1 = self.qregs + self.cregs + list2 = rhs.qregs + rhs.cregs + for element1 in list1: + for element2 in list2: + if element2.name == element1.name: + if element1 != element2: + raise QISKitError("circuits are not compatible") def _gate_string(self, name): """Return a QASM string for the named gate.""" @@ -292,9 +316,9 @@ def qasm(self): for gate_name in self.definitions: if self.definitions[gate_name]["print"]: string_temp += self._gate_string(gate_name) - for register in self.qregs.values(): + for register in self.qregs: string_temp += register.qasm() + "\n" - for register in self.cregs.values(): + for register in self.cregs: string_temp += register.qasm() + "\n" for instruction in self.data: string_temp += instruction.qasm() + "\n" diff --git a/qiskit/_register.py b/qiskit/_register.py --- a/qiskit/_register.py +++ b/qiskit/_register.py @@ -84,3 +84,24 @@ def __iter__(self): form `tuple (Register, int)`. """ return zip([self]*self.size, range(self.size)) + + def __eq__(self, other): + """Two Registers are the same if they are of the same type + (i.e. quantum/classical), and have the same name and size. + + Args: + other (Register): other Register + + Returns: + bool: are self and other equal. + """ + res = False + if type(self) is type(other) and \ + self.name == other.name and \ + self.size == other.size: + res = True + return res + + def __hash__(self): + """Make object hashable, based on the name and size to hash.""" + return hash(str(type(self)) + self.name + str(self.size)) diff --git a/qiskit/dagcircuit/_dagcircuit.py b/qiskit/dagcircuit/_dagcircuit.py --- a/qiskit/dagcircuit/_dagcircuit.py +++ b/qiskit/dagcircuit/_dagcircuit.py @@ -50,13 +50,13 @@ def __init__(self): # Map from a wire's name (reg,idx) to a Bool that is True if the # wire is a classical bit and False if the wire is a qubit. - self.wire_type = {} + self.wire_type = OrderedDict() # Map from wire names (reg,idx) to input nodes of the graph - self.input_map = {} + self.input_map = OrderedDict() # Map from wire names (reg,idx) to output nodes of the graph - self.output_map = {} + self.output_map = OrderedDict() # Running count of the total number of nodes self.node_counter = 0 @@ -83,7 +83,7 @@ def __init__(self): self.cregs = OrderedDict() # Map of user defined gates to ast nodes defining them - self.gates = {} + self.gates = OrderedDict() # Output precision for printing floats self.prec = 10 @@ -1356,9 +1356,9 @@ def fromQuantumCircuit(circuit, expand_gates=True): """ dagcircuit = DAGCircuit() dagcircuit.name = circuit.name - for register in circuit.qregs.values(): + for register in circuit.qregs: dagcircuit.add_qreg(register) - for register in circuit.cregs.values(): + for register in circuit.cregs: dagcircuit.add_creg(register) # Add user gate definitions for name, data in circuit.definitions.items(): diff --git a/qiskit/extensions/simulator/load.py b/qiskit/extensions/simulator/load.py --- a/qiskit/extensions/simulator/load.py +++ b/qiskit/extensions/simulator/load.py @@ -47,7 +47,7 @@ def load(self, slot): """ tuples = [] if isinstance(self, QuantumCircuit): - for register in self.qregs.values(): + for register in self.qregs: tuples.append(register) if not tuples: raise ExtensionError("no qubits for load") diff --git a/qiskit/extensions/simulator/noise.py b/qiskit/extensions/simulator/noise.py --- a/qiskit/extensions/simulator/noise.py +++ b/qiskit/extensions/simulator/noise.py @@ -46,7 +46,7 @@ def noise(self, switch): """ tuples = [] if isinstance(self, QuantumCircuit): - for register in self.qregs.values(): + for register in self.qregs: tuples.append(register) if not tuples: raise ExtensionError("no qubits for noise") diff --git a/qiskit/extensions/simulator/save.py b/qiskit/extensions/simulator/save.py --- a/qiskit/extensions/simulator/save.py +++ b/qiskit/extensions/simulator/save.py @@ -47,7 +47,7 @@ def save(self, slot): """ tuples = [] if isinstance(self, QuantumCircuit): - for register in self.qregs.values(): + for register in self.qregs: tuples.append(register) if not tuples: raise ExtensionError("no qubits for save") diff --git a/qiskit/extensions/simulator/snapshot.py b/qiskit/extensions/simulator/snapshot.py --- a/qiskit/extensions/simulator/snapshot.py +++ b/qiskit/extensions/simulator/snapshot.py @@ -47,7 +47,7 @@ def snapshot(self, slot): """ tuples = [] if isinstance(self, QuantumCircuit): - for register in self.qregs.values(): + for register in self.qregs: tuples.append(register) if not tuples: raise ExtensionError("no qubits for snapshot") diff --git a/qiskit/extensions/standard/barrier.py b/qiskit/extensions/standard/barrier.py --- a/qiskit/extensions/standard/barrier.py +++ b/qiskit/extensions/standard/barrier.py @@ -38,7 +38,7 @@ def barrier(self, *qargs): qubits = [] if not qargs: # None - for qreg in self.qregs.values(): + for qreg in self.qregs: for j in range(qreg.size): qubits.append((qreg, j)) diff --git a/qiskit/quantum_info/__init__.py b/qiskit/quantum_info/__init__.py --- a/qiskit/quantum_info/__init__.py +++ b/qiskit/quantum_info/__init__.py @@ -10,3 +10,4 @@ from .operators.pauli import Pauli, pauli_group from .states._states import basis_state, random_state, projector from .states._measures import state_fidelity +from .operators._measures import process_fidelity diff --git a/qiskit/quantum_info/operators/_measures.py b/qiskit/quantum_info/operators/_measures.py new file mode 100644 --- /dev/null +++ b/qiskit/quantum_info/operators/_measures.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +# Copyright 2017, IBM. +# +# This source code is licensed under the Apache License, Version 2.0 found in +# the LICENSE.txt file in the root directory of this source tree. + +# pylint: disable=invalid-name,anomalous-backslash-in-string + +""" +A collection of useful quantum information functions for operators. + +""" + +import numpy as np + + +def process_fidelity(channel1, channel2): + """Return the process fidelity between two quantum channels. + + Currently the input must be a unitary (until we decide on the channel) + For a unitary channels the process fidelity is given by + F_p(U, U) = abs(Tr[ U^dagger U ])^2/d^2 + + Args: + channel1 (array_like): a quantum unitary operator. + channel2 (array_like): a quantum unitary operator. + + Returns: + array_like: The state fidelity F(state1, state2). + """ + # convert input to numpy arrays + s1 = np.array(channel1) + s2 = np.array(channel2) + + # fidelity of two unitary vectors + overlap = np.trace(np.dot(s1.conj().transpose(), s2)) + f_p = abs(overlap)**2 / (len(s1)**2) + return f_p diff --git a/qiskit/tools/_compiler.py b/qiskit/tools/_compiler.py --- a/qiskit/tools/_compiler.py +++ b/qiskit/tools/_compiler.py @@ -10,7 +10,6 @@ import uuid import logging - from qiskit import transpiler from qiskit.transpiler._passmanager import PassManager from qiskit.qobj import Qobj, QobjConfig, QobjExperiment, QobjItem, QobjHeader diff --git a/qiskit/unrollers/_circuitbackend.py b/qiskit/unrollers/_circuitbackend.py --- a/qiskit/unrollers/_circuitbackend.py +++ b/qiskit/unrollers/_circuitbackend.py @@ -56,14 +56,14 @@ def new_qreg(self, qreg): qreg = QuantumRegister object """ - self.circuit.add(qreg) + self.circuit.add_register(qreg) def new_creg(self, creg): """Create a new classical register. creg = ClassicalRegister object """ - self.circuit.add(creg) + self.circuit.add_register(creg) def define_gate(self, name, gatedata): """Define a new quantum gate. @@ -77,26 +77,35 @@ def define_gate(self, name, gatedata): def _map_qubit(self, qubit): """Map qubit tuple (regname, index) to (QuantumRegister, index).""" + qregs = self.circuit.qregs - if qubit[0] not in qregs: + regname = qubit[0] + qregs_names = [element.name for element in qregs] + if regname not in qregs_names: raise _backenderror.BackendError( "qreg %s does not exist" % qubit[0]) - return (qregs[qubit[0]], qubit[1]) + index = qregs_names.index(regname) + return (qregs[index], qubit[1]) def _map_bit(self, bit): """Map bit tuple (regname, index) to (ClassicalRegister, index).""" cregs = self.circuit.cregs - if bit[0] not in cregs: + regname = bit[0] + cregs_names = [element.name for element in cregs] + if regname not in cregs_names: raise _backenderror.BackendError( "creg %s does not exist" % bit[0]) - return (cregs[bit[0]], bit[1]) + index = cregs_names.index(regname) + return (cregs[index], bit[1]) def _map_creg(self, creg): """Map creg name to ClassicalRegister.""" cregs = self.circuit.cregs - if creg not in cregs: + cregs_names = [element.name for element in cregs] + if creg not in cregs_names: raise _backenderror.BackendError("creg %s does not exist" % creg) - return cregs[creg] + index = cregs_names.index(creg) + return cregs[index] def u(self, arg, qubit, nested_scope=None): """Fundamental single qubit gate. diff --git a/qiskit/unrollers/_jsonbackend.py b/qiskit/unrollers/_jsonbackend.py --- a/qiskit/unrollers/_jsonbackend.py +++ b/qiskit/unrollers/_jsonbackend.py @@ -34,6 +34,8 @@ ] } """ +from collections import OrderedDict + from qiskit.unrollers._backenderror import BackendError from qiskit.unrollers._unrollerbackend import UnrollerBackend @@ -60,8 +62,8 @@ def __init__(self, basis=None): self._number_of_cbits = 0 self._qubit_order = [] self._cbit_order = [] - self._qubit_order_internal = {} - self._cbit_order_internal = {} + self._qubit_order_internal = OrderedDict() + self._cbit_order_internal = OrderedDict() self.creg = None self.cval = None diff --git a/qiskit/unrollers/_unroller.py b/qiskit/unrollers/_unroller.py --- a/qiskit/unrollers/_unroller.py +++ b/qiskit/unrollers/_unroller.py @@ -8,6 +8,7 @@ """ OPENQASM interpreter. """ +from collections import OrderedDict from qiskit._quantumregister import QuantumRegister from qiskit._classicalregister import ClassicalRegister from ._unrollererror import UnrollerError @@ -32,11 +33,11 @@ def __init__(self, ast, backend=None, precision=15, filename=None): # OPENQASM version number self.version = 0.0 # Dict of qreg names and sizes - self.qregs = {} + self.qregs = OrderedDict() # Dict of creg names and sizes - self.cregs = {} + self.cregs = OrderedDict() # Dict of gates names and properties - self.gates = {} + self.gates = OrderedDict() # List of dictionaries mapping local parameter ids to expression Nodes self.arg_stack = [{}] # List of dictionaries mapping local bit ids to global ids (name, idx)
The `test_online_qasm_simulator_two_registers` is failing in master ### What is the current behavior? The test is failing in the `master` branch with the following error: ``` ====================================================================== FAIL: test_online_qasm_simulator_two_registers (python.ibmq.test_ibmq_qasm_simulator.TestIbmqQasmSimulator) Test online_qasm_simulator_two_registers. ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/travis/build/Qiskit/qiskit-terra/test/python/common.py", line 373, in _wrapper return decorated_func(self, *args, **kwargs) File "/home/travis/build/Qiskit/qiskit-terra/test/python/ibmq/test_ibmq_qasm_simulator.py", line 109, in test_online_qasm_simulator_two_registers self.assertEqual(result1, {'00 01': 1024}) AssertionError: {'01 00': 1024} != {'00 01': 1024} - {'01 00': 1024} ? --- + {'00 01': 1024} ? +++ ``` ### Steps to reproduce the problem The failure is intermittent. It seems related with running all the test suite. When running isolated, it seems not to be failing. ### What is the expected behavior? The test should pass regardless of other tests. ### Suggested solutions It will be temporally marked as an expected failure but we should dig into what happens.
It finally failed for me locally in the tests. However, I ran the test 1000 times by itself and could not reproduce it
2018-11-17T17:51:49Z
[]
[]
Traceback (most recent call last): File "/home/travis/build/Qiskit/qiskit-terra/test/python/common.py", line 373, in _wrapper return decorated_func(self, *args, **kwargs) File "/home/travis/build/Qiskit/qiskit-terra/test/python/ibmq/test_ibmq_qasm_simulator.py", line 109, in test_online_qasm_simulator_two_registers self.assertEqual(result1, {'00 01': 1024}) AssertionError: {'01 00': 1024} != {'00 01': 1024}
971
Qiskit/qiskit
Qiskit__qiskit-1295
77dc51b93e7312bbff8f5acf7d8242232bd6624f
diff --git a/qiskit/backends/ibmq/credentials/_configrc.py b/qiskit/backends/ibmq/credentials/_configrc.py --- a/qiskit/backends/ibmq/credentials/_configrc.py +++ b/qiskit/backends/ibmq/credentials/_configrc.py @@ -9,6 +9,7 @@ Utilities for reading and writing credentials from and to configuration files. """ +import warnings import os from ast import literal_eval from collections import OrderedDict @@ -116,15 +117,17 @@ def store_credentials(credentials, overwrite=False, filename=None): location is used (`HOME/.qiskit/qiskitrc`). Raises: - QISKitError: If credentials already exists and overwrite=False; or if - the account_name could not be assigned. + QISKitError: if the account_name could not be assigned. """ # Read the current providers stored in the configuration file. filename = filename or DEFAULT_QISKITRC_FILE stored_credentials = read_credentials_from_qiskitrc(filename) + # Check if duplicated credentials are already stored. By convention, + # we assume (hub, group, project) is always unique. if credentials.unique_id() in stored_credentials and not overwrite: - raise QISKitError('Credentials already present and overwrite=False') + warnings.warn('Credentials already present. Set overwrite=True to overwrite.') + return # Append and write the credentials to file. stored_credentials[credentials.unique_id()] = credentials diff --git a/qiskit/backends/ibmq/credentials/credentials.py b/qiskit/backends/ibmq/credentials/credentials.py --- a/qiskit/backends/ibmq/credentials/credentials.py +++ b/qiskit/backends/ibmq/credentials/credentials.py @@ -22,7 +22,7 @@ class Credentials(object): """IBM Q account credentials. Note that, by convention, two credentials that have the same hub, group - and token (regardless of other attributes) are considered equivalent. + and project (regardless of other attributes) are considered equivalent. The `unique_id()` returns the unique identifier. """ diff --git a/qiskit/backends/ibmq/ibmqprovider.py b/qiskit/backends/ibmq/ibmqprovider.py --- a/qiskit/backends/ibmq/ibmqprovider.py +++ b/qiskit/backends/ibmq/ibmqprovider.py @@ -116,7 +116,7 @@ def enable_account(self, token, url=QE_URL, **kwargs): self._append_account(credentials) - def save_account(self, token, url=QE_URL, **kwargs): + def save_account(self, token, url=QE_URL, overwrite=False, **kwargs): """Save the account to disk for future use. Login into Quantum Experience or IBMQ using the provided credentials, @@ -127,20 +127,13 @@ def save_account(self, token, url=QE_URL, **kwargs): token (str): Quantum Experience or IBM Q API token. url (str): URL for Quantum Experience or IBM Q (for IBM Q, including the hub, group and project in the URL). + overwrite (bool): overwrite existing credentials. **kwargs (dict): * proxies (dict): Proxy configuration for the API. * verify (bool): If False, ignores SSL certificates errors """ credentials = Credentials(token, url, **kwargs) - - # Check if duplicated credentials are already stored. By convention, - # we assume (hub, group, project) is always unique. - stored_credentials = read_credentials_from_qiskitrc() - - if credentials.unique_id() in stored_credentials.keys(): - warnings.warn('Credentials are already stored.') - else: - store_credentials(credentials) + store_credentials(credentials, overwrite=overwrite) def active_accounts(self): """List all accounts currently in the session.
credentials failed for qiskit ver 0.6.1 <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: 0.6.1 - **Python version**: 3.7.0 - **Operating system**:MAC OSX 10.13.6 ### What is the current behavior? After I acquired fresh token from https://quantumexperience.ng.bluemix.net/qx/account/advanced IBMQ.load_accounts() fails. ### Steps to reproduce the problem ``` from qiskit import IBMQ myToken='b6abe11442c9a...' IBMQ.save_account(myToken) IBMQ.load_accounts() ``` Results with ``` Traceback (most recent call last): File "/anaconda3/lib/python3.7/site-packages/qiskit/backends/ibmq/ibmqsingleprovider.py", line 71, in _authenticate credentials.verify) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 478, in __init__ self.req = _Request(token, config=config, verify=verify) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 253, in __init__ ntlm_credentials=self.ntlm_credentials) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 95, in __init__ self.obtain_token(config=self.config) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 159, in obtain_token raise CredentialsError('error during login: %s' % error_message) IBMQuantumExperience.IBMQuantumExperience.CredentialsError: error during login: Wrong user or password, check your credentials. ``` ### What is the expected behavior? Would be better if IBMQ.load_accounts() accepted me. All worked well w/ ver 0.5. ### Suggested solutions
Can you try enable_account or regenerating the token. Your code should work. If you type `IBMQ.stored_accounts()` do you see the account. @pacomf I can confirm this has happened to me today as well. I cant reproduce the bug, i regenerate my APIToken and it works fine using qiskit terra... is it still happening? Can you send me more details? It happened for about 5 hours on the weekend. However, @nonhermitian could run at the same time and then it started working again. Mmmm, maybe an issue with the API... we will investigate it I will add that, when it happened to me, I could log into some accounts and not others. Hi @jaygambetta, your tip helped. IBMQ.stored_accounts() has returned some old token, not the new one. Looks like IBMQ.save_account(myToken) is unable to replace token if it exist - I leave it to you to decide if it is a bug or a feature. My hack around it is to execute first: IBMQ.delete_accounts() to clear my old token. So this sequence always works: ` IBMQ.delete_accounts() myToken='b6abe11442c9a...' IBMQ.save_account(myToken) IBMQ.load_accounts() ` I can move on, closing thus ticket. Thanks for help Jan Let's leave this open and investigate whether there's a bug with `IBMQ.save_account()` re-writing old tokens. @diego-plan9 can you please have a look? Yes - thanks @balewski for the information, which is spot on - currently, `IBMQ.save_account()` will just print a warning and do nothing else if old credentials are present: https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/backends/ibmq/ibmqprovider.py#L140-L143 > Looks like IBMQ.save_account(myToken) is unable to replace token if it exist - I leave it to you to decide if it is a bug or a feature. Actually ... I can't decide if it is a bug or a feature either! :thinking: In the original draft implementation, the `.save_account()` (`.add_account()` by that time) method was [raising an Exception](https://github.com/Qiskit/qiskit-terra/blob/746245e29c5cadc44dc37851b19a4150b4e86cd8/qiskit/backends/ibmq/ibmqprovider.py#L111) in the case of trying to store a duplicate account. This was later changed to a warning, I'm unsure if by design and as a hard requisite from Jupyter-users needs, or also related to the slight tuning of the method functionality (ie. not authenticating during the call, just storing in disk). So I'm actually de-assigning myself, as probably the rest of the team has a more fresh view of the design decisions related to #1000. I think we have several options: * consider that not overwriting and raising a warning is indeed the desired behavior: the main drawback is that the warning might be easy to miss (and was probably the source of confusion in this issue). * tune the method a bit in order to accept an `overwrite=True` optional parameter or a similar approach: the `credentials` module already has the needed parts in place, the main drawback would be that we touch a bit the public API. * be a bit more restrictive and promote the warning back to an exception: it might affect users running the method twice and already used to not raising a warning (ie. maybe notebook users). One way or the other, I think we need to make sure that the flow for updating an existing stored token is a bit smoother than the delete-save workaround proposed by @balewski, as it seems a relatively common use case. From external user perspective: It happens rather often that the ibmq_16_melbourne or even sometimes ibmqx4 does not accept the job, throws some 'general error', despite your web-page says both hardwares are operational. Then, it is natural to (wrongly?) guess perhaps my token is invalid. Then, I'd ask for a new token and try to use it - hoping it will help. For such train of though the natural solution is assume 'user knows what he wants'. If user wants to replace the token by calling save_account(), just replace it. You can issue a warning that there was an old token (still valid), but why not just replace token each time user calls IBMQ.save_account(myToken) ? Would this have any negative effect on your end? Thanks Jan I think save_account should not raise an exception. Overwriting is not bad behavior. Similar to overwriting a key in a dict or something. Should just work. @ajavadia is there an update. Hi, there is some inconsistency between the devices status you show here: https://quantumexperience.ng.bluemix.net/qx/account/advanced and actual avaliability. At this moment, both ibmqx4 and ibmq_16_melbourne are reported to work. However,. when I try to submit my circuit using Qiskit ver: 0.6.1 I get the error below for either. Got a 400 code response to https://quantumexperience.ng.bluemix.net/api/Jobs?access_token=VCgYWnMUUBaYeT5gSmGO14cX93Foo4rccsLUVvIjf3bwYEZNjxlDcRmPArS2wZ25: {"error":{"status":400,"message":"Generic error","code":"GENERIC_ERROR"}} Note, my token is correct, because I can submit the circuit to your simulator 'backend': 'ibmq_qasm_simulator', 'jobId2': '1814808', 'startTime': '2018-11-09 17:53:28'} Can you have a look ? Thanks Jan
2018-11-19T08:27:15Z
[]
[]
Traceback (most recent call last): File "/anaconda3/lib/python3.7/site-packages/qiskit/backends/ibmq/ibmqsingleprovider.py", line 71, in _authenticate credentials.verify) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 478, in __init__ self.req = _Request(token, config=config, verify=verify) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 253, in __init__ ntlm_credentials=self.ntlm_credentials) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 95, in __init__ self.obtain_token(config=self.config) File "/anaconda3/lib/python3.7/site-packages/IBMQuantumExperience/IBMQuantumExperience.py", line 159, in obtain_token raise CredentialsError('error during login: %s' % error_message) IBMQuantumExperience.IBMQuantumExperience.CredentialsError: error during login: Wrong user or password, check your credentials.
975
Qiskit/qiskit
Qiskit__qiskit-1436
259c10580d22122e739ed466d306dcd5adb2027f
diff --git a/qiskit/qobj/_qobj.py b/qiskit/qobj/_qobj.py --- a/qiskit/qobj/_qobj.py +++ b/qiskit/qobj/_qobj.py @@ -50,8 +50,6 @@ def _expand_item(cls, obj): return [cls._expand_item(item) for item in obj] if isinstance(obj, dict): return {key: cls._expand_item(value) for key, value in obj.items()} - if isinstance(obj, QobjItem): - return obj.as_dict() if isinstance(obj, numpy.integer): return int(obj) if isinstance(obj, numpy.float): @@ -61,9 +59,11 @@ def _expand_item(cls, obj): if isinstance(obj, sympy.Basic): return float(obj.evalf()) if isinstance(obj, numpy.ndarray): - return obj.tolist() + return cls._expand_item(obj.tolist()) if isinstance(obj, complex): return [obj.real, obj.imag] + if hasattr(obj, 'as_dict'): + return obj.as_dict() return obj @classmethod
crash when set initial_state with complex vector for simulator <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: the master branch (Dec. 4th) - **Python version**: 3.7.1 - **Operating system**: macOS 10.13 ### What is the current behavior? Encounter JSON encoding error ### Steps to reproduce the problem running with the following qasm and setting ``` OPENQASM 2.0; include "qelib1.inc"; qreg q[1]; u1(3.14159265358979) q[0]; {'shots': 1, 'config': {'initial_state': array([0.93130364-0.02274014j, 0.2641254 +0.2497883j ])}} ``` error message: ``` Traceback (most recent call last): File "/Users/rchen/Developer/Quantum/temp/aqua/test/test_operator.py", line 136, in test_create_from_matrix non_matrix_mode = op.eval('paulis', circuit, backend, run_config=run_config)[0] File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/operator.py", line 779, in eval has_shared_circuits=has_shared_circuits) File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/utils/run_circuits.py", line 151, in compile_and_run_circuits return _reuse_shared_circuits(circuits, backend, backend_config, compile_config, run_config, qjob_config) File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/utils/run_circuits.py", line 110, in _reuse_shared_circuits show_circuit_summary=show_circuit_summary) File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/utils/run_circuits.py", line 239, in compile_and_run_circuits results.append(job.result(**qjob_config)) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/aerjob.py", line 37, in _wrapper return func(self, *args, **kwargs) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/aerjob.py", line 92, in result return self._future.result(timeout=timeout) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 432, in result return self.__get_result() File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result raise self._exception File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/statevector_simulator.py", line 71, in _run_job result = super()._run_job(job_id, qobj) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/qasm_simulator.py", line 97, in _run_job result = run(qobj_dict, self._configuration.exe) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/qasm_simulator.py", line 195, in run cin = json.dumps(qobj).encode() File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/encoder.py", line 179, in default raise TypeError(f'Object of type {o.__class__.__name__} ' TypeError: Object of type complex is not JSON serializable ``` ### What is the expected behavior? Without crash. ### Suggested solutions Terra should parse the complex vector to [ [real, imag] [real, imag]]. (I tried with above format, it will work)
@diego-plan9 can you please look into this? passing a `config` kwarg to `execute()` seems to not serialize correctly... I think I need more info - but if you are modifying a `Qobj` instance directly, the data (in this case, I assume is appending the `config`) should be as close to the specs as possible, which would mean that the proper way for that information to be stored would be indeed as a bare list of pairs. @chunfuchen , can you provide more information about how you are using the custom configuration? @diego-plan9 here is the example script ```python from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister import qiskit as qk import numpy as np np.random.seed(0) def generate_circuits(num_qubits, parameters, depth=10): q = QuantumRegister(num_qubits, name='q') c = ClassicalRegister(num_qubits, name='c') circuit = QuantumCircuit(q, c) param_idx = 0 for qubit in range(num_qubits): circuit.u3(parameters[param_idx], 0.0, 0.0, q[qubit]) circuit.u1(parameters[param_idx+1], q[qubit]) param_idx += 2 for block in range(depth): circuit.barrier(q) for qubit in range(num_qubits): circuit.u3(parameters[param_idx], 0.0, 0.0, q[qubit]) circuit.u1(parameters[param_idx+1], q[qubit]) param_idx += 2 # circuit.barrier(q) # circuit.measure(q, c) return circuit num_circuits = 10 num_qubits = 5 depth = 2 #work config = {'config': {'initial_state': [[0.93130364, -0.02274014], [0.2641254, 0.2497883]]}} # does not work # config = {'config': {'initial_state': np.asarray([0.93130364-0.02274014j, 0.2641254 +0.2497883j ])}} num_parameters = num_qubits * (depth + 1) * 2 circuits = [generate_circuits(num_qubits, np.random.rand(num_parameters), depth) for _ in range(num_circuits)] my_backend = qk.Aer.get_backend('statevector_simulator') qobj = qk.compile(circuits=circuits, backend=my_backend, seed=0, config=config) qjob = my_backend.run(qobj) result = qjob.result() print(result.get_statevector(circuits[0])) ``` Thanks @chunfuchen ! I think there are several forces at play here: * the `config` is indeed appended to the `Qobj` "directly" in `circuits_to_qobj`. If we still consider `Qobj` to be a rather dummy and bare container, it would make sense that it is stored in the right format (ie. complex as tuple) one it reaches that point. * whether we can limit all the possible inputs for `config` and reach some kind of universal conversion - which I think we realistically can't . Since it is a field that is loosely defined, and per-backend type, we might be able to perform a "preprocessing" of sorts in the backends, where they might know what configurations are valid for their needs. * backwards-compatibility: I'm not sure what was the expected behaviour pre-0.7! If possible, it would be nice to preserve it. So I'm kind of rebounding the question and the decision to @ajavadia , and in general, the compilers project - we have several options, from specifying that the `config` should be passed in qobj-like format (the second option mentioned by @chunfuchen ), to only perform the conversion on a subset of configurations that we know are supported, to fully delegating on backends. Any ideas? I would say that it should be possible to take a state vector returned by `results.get_statevector()` and feed it into the circuit in `initial_state` with no conversion by the user. Since the former is a NumPy array, the latter should accept that as an input. @diego-plan9 I found a related bug to this in `QobItem`: The line ```python if isinstance(obj, numpy.ndarray): return obj.tolist() ``` should be ```python if isinstance(obj, numpy.ndarray): return cls._expand_item(obj.tolist()) ``` to recursively parse arse a complex array, otherwise it will only convert the array to a list, but not serialize the inner complex numbers correctly.
2018-12-05T20:51:20Z
[]
[]
Traceback (most recent call last): File "/Users/rchen/Developer/Quantum/temp/aqua/test/test_operator.py", line 136, in test_create_from_matrix non_matrix_mode = op.eval('paulis', circuit, backend, run_config=run_config)[0] File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/operator.py", line 779, in eval has_shared_circuits=has_shared_circuits) File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/utils/run_circuits.py", line 151, in compile_and_run_circuits return _reuse_shared_circuits(circuits, backend, backend_config, compile_config, run_config, qjob_config) File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/utils/run_circuits.py", line 110, in _reuse_shared_circuits show_circuit_summary=show_circuit_summary) File "/Users/rchen/Developer/Quantum/temp/aqua/qiskit_aqua/utils/run_circuits.py", line 239, in compile_and_run_circuits results.append(job.result(**qjob_config)) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/aerjob.py", line 37, in _wrapper return func(self, *args, **kwargs) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/aerjob.py", line 92, in result return self._future.result(timeout=timeout) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 432, in result return self.__get_result() File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result raise self._exception File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/statevector_simulator.py", line 71, in _run_job result = super()._run_job(job_id, qobj) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/qasm_simulator.py", line 97, in _run_job result = run(qobj_dict, self._configuration.exe) File "/Users/rchen/Developer/Quantum/qiskit-terra-chenrich/qiskit/backends/aer/qasm_simulator.py", line 195, in run cin = json.dumps(qobj).encode() File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/encoder.py", line 179, in default raise TypeError(f'Object of type {o.__class__.__name__} ' TypeError: Object of type complex is not JSON serializable
997
Qiskit/qiskit
Qiskit__qiskit-1748
186f3bf4fdd2bdcd9d18b2e059aea76209ddda0b
diff --git a/qiskit/tools/visualization/_text.py b/qiskit/tools/visualization/_text.py --- a/qiskit/tools/visualization/_text.py +++ b/qiskit/tools/visualization/_text.py @@ -215,11 +215,13 @@ def __init__(self, label, input_length, order): class BoxOnQuWireBot(MultiBox, BoxOnQuWire): """ Draws the bottom part of a box that affects more than one quantum wire""" - def __init__(self, label, input_length): + def __init__(self, label, input_length, bot_connect='─'): super().__init__(label) self.top_format = "│ %s │" + self.top_pad = " " + self.bot_connect = bot_connect - self.mid_content = self.bot_connect = self.top_connect = "" + self.mid_content = self.top_connect = "" if input_length <= 2: self.top_connect = label @@ -755,9 +757,9 @@ def build_layers(self): layer.set_qubit(instruction['qargs'][0], BoxOnQuWire(TextDrawing.label_for_box(instruction))) - elif len(instruction['qubits']) >= 2 and not instruction['cargs']: + elif len(instruction['qargs']) >= 2 and not instruction['cargs']: # multiple qubit gate - layer.set_qu_multibox(instruction['qubits'], TextDrawing.label_for_box(instruction)) + layer.set_qu_multibox(instruction['qargs'], TextDrawing.label_for_box(instruction)) else: raise VisualizationError( @@ -876,7 +878,8 @@ def connect_with(self, wire_char, label=None): affected_bits[0].connect(wire_char, ['bot']) for affected_bit in affected_bits[1:-1]: affected_bit.connect(wire_char, ['bot', 'top']) - affected_bits[-1].connect(wire_char, ['top'], label) + if not isinstance(affected_bits[-1], MultiBox): + affected_bits[-1].connect(wire_char, ['top'], label) if label: for affected_bit in affected_bits:
Using the rzz gate yields an error on circuit drawing <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: qiskit version 0.7.0 - **Python version**: python 3.6.6 - **Operating system**: Red Hat Entreprise Server 7.4 ### Current behavior At circuit draw I get the following error ```bash Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.6/site-packages/qiskit/circuit/quantumcircuit.py", line 83, in __str__ return str(self.draw(output='text')) File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 413, in __str__ return self.single_string() File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 438, in single_string return "\n".join(self.lines()) File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 472, in lines layers = self.build_layers() File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 758, in build_layers elif len(instruction['qubits']) >= 2 and not instruction['cargs']: KeyError: 'qubits' ``` ### Steps to reproduce the problem ```python from qiskit import * q = QuantumRegister(2) qc = QuantumCircuit(q); qc.rzz(0, q[0], q[1]) print(qc) ``` ### What is the expected behavior? It should draw the circuit with no problem, but here it gives a KeyError on "qubits" ### Suggested solutions Maybe it is expecting "qargs" instead of "qubits"?
2019-02-02T02:13:11Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.6/site-packages/qiskit/circuit/quantumcircuit.py", line 83, in __str__ return str(self.draw(output='text')) File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 413, in __str__ return self.single_string() File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 438, in single_string return "\n".join(self.lines()) File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 472, in lines layers = self.build_layers() File "/usr/local/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 758, in build_layers elif len(instruction['qubits']) >= 2 and not instruction['cargs']: KeyError: 'qubits'
1,037
Qiskit/qiskit
Qiskit__qiskit-1765
ff091d1ceff3454e793d919b0c19e13a601c908f
diff --git a/qiskit/circuit/gate.py b/qiskit/circuit/gate.py --- a/qiskit/circuit/gate.py +++ b/qiskit/circuit/gate.py @@ -21,7 +21,7 @@ def __init__(self, name, params, qargs, circuit=None): name = instruction name string params = list of real parameters (will be converted to symbolic) qargs = list of pairs (QuantumRegister, index) - circuit = QuantumCircuit containing this gate + circuit = QuantumCircuit or CompositeGate containing this gate """ self._is_multi_qubit = False self._qubit_coupling = [qarg[1] for qarg in qargs] diff --git a/qiskit/extensions/simulator/snapshot.py b/qiskit/extensions/simulator/snapshot.py --- a/qiskit/extensions/simulator/snapshot.py +++ b/qiskit/extensions/simulator/snapshot.py @@ -9,6 +9,7 @@ Simulator command to snapshot internal simulator representation. """ from qiskit import QuantumCircuit +from qiskit.circuit import CompositeGate from qiskit import QuantumRegister from qiskit.circuit import Instruction from qiskit.extensions.exceptions import ExtensionError @@ -65,5 +66,6 @@ def snapshot(self, label, snap_type='statevector'): return self._attach(Snapshot(label, snap_type, qubits, self)) -# Add to QuantumCircuit class +# Add to QuantumCircuit and CompositeGate classes QuantumCircuit.snapshot = snapshot +CompositeGate.snapshot = snapshot diff --git a/qiskit/extensions/standard/barrier.py b/qiskit/extensions/standard/barrier.py --- a/qiskit/extensions/standard/barrier.py +++ b/qiskit/extensions/standard/barrier.py @@ -9,6 +9,7 @@ Barrier instruction. """ from qiskit.circuit import QuantumCircuit +from qiskit.circuit import CompositeGate from qiskit.circuit import QuantumRegister from qiskit.circuit import Instruction @@ -57,3 +58,4 @@ def barrier(self, *qargs): QuantumCircuit.barrier = barrier +CompositeGate.barrier = barrier diff --git a/qiskit/extensions/standard/ccx.py b/qiskit/extensions/standard/ccx.py --- a/qiskit/extensions/standard/ccx.py +++ b/qiskit/extensions/standard/ccx.py @@ -8,6 +8,7 @@ """ Toffoli gate. Controlled-Controlled-X. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -79,3 +80,4 @@ def ccx(self, ctl1, ctl2, tgt): QuantumCircuit.ccx = ccx +CompositeGate.ccx = ccx diff --git a/qiskit/extensions/standard/ch.py b/qiskit/extensions/standard/ch.py --- a/qiskit/extensions/standard/ch.py +++ b/qiskit/extensions/standard/ch.py @@ -10,6 +10,7 @@ """ controlled-H gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -84,3 +85,4 @@ def ch(self, ctl, tgt): QuantumCircuit.ch = ch +CompositeGate.ch = ch diff --git a/qiskit/extensions/standard/crz.py b/qiskit/extensions/standard/crz.py --- a/qiskit/extensions/standard/crz.py +++ b/qiskit/extensions/standard/crz.py @@ -8,6 +8,7 @@ """ controlled-rz gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -65,3 +66,4 @@ def crz(self, theta, ctl, tgt): QuantumCircuit.crz = crz +CompositeGate.crz = crz diff --git a/qiskit/extensions/standard/cswap.py b/qiskit/extensions/standard/cswap.py --- a/qiskit/extensions/standard/cswap.py +++ b/qiskit/extensions/standard/cswap.py @@ -8,6 +8,7 @@ """ Fredkin gate. Controlled-SWAP. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -64,3 +65,4 @@ def cswap(self, ctl, tgt1, tgt2): QuantumCircuit.cswap = cswap +CompositeGate.cswap = cswap diff --git a/qiskit/extensions/standard/cu1.py b/qiskit/extensions/standard/cu1.py --- a/qiskit/extensions/standard/cu1.py +++ b/qiskit/extensions/standard/cu1.py @@ -8,6 +8,7 @@ """ controlled-u1 gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -67,3 +68,4 @@ def cu1(self, theta, ctl, tgt): QuantumCircuit.cu1 = cu1 +CompositeGate.cu1 = cu1 diff --git a/qiskit/extensions/standard/cu3.py b/qiskit/extensions/standard/cu3.py --- a/qiskit/extensions/standard/cu3.py +++ b/qiskit/extensions/standard/cu3.py @@ -8,6 +8,7 @@ """ controlled-u3 gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -72,3 +73,4 @@ def cu3(self, theta, phi, lam, ctl, tgt): QuantumCircuit.cu3 = cu3 +CompositeGate.cu3 = cu3 diff --git a/qiskit/extensions/standard/cx.py b/qiskit/extensions/standard/cx.py --- a/qiskit/extensions/standard/cx.py +++ b/qiskit/extensions/standard/cx.py @@ -10,6 +10,7 @@ """ controlled-NOT gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -58,3 +59,4 @@ def cx(self, ctl, tgt): QuantumCircuit.cx = cx +CompositeGate.cx = cx diff --git a/qiskit/extensions/standard/cxbase.py b/qiskit/extensions/standard/cxbase.py --- a/qiskit/extensions/standard/cxbase.py +++ b/qiskit/extensions/standard/cxbase.py @@ -8,6 +8,7 @@ """ Fundamental controlled-NOT gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit.decorators import _op_expand @@ -39,3 +40,4 @@ def cx_base(self, ctl, tgt): QuantumCircuit.cx_base = cx_base +CompositeGate.cx_base = cx_base diff --git a/qiskit/extensions/standard/cy.py b/qiskit/extensions/standard/cy.py --- a/qiskit/extensions/standard/cy.py +++ b/qiskit/extensions/standard/cy.py @@ -10,6 +10,7 @@ """ controlled-Y gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -62,3 +63,4 @@ def cy(self, ctl, tgt): QuantumCircuit.cy = cy +CompositeGate.cy = cy diff --git a/qiskit/extensions/standard/cz.py b/qiskit/extensions/standard/cz.py --- a/qiskit/extensions/standard/cz.py +++ b/qiskit/extensions/standard/cz.py @@ -10,6 +10,7 @@ """ controlled-Phase gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -61,3 +62,4 @@ def cz(self, ctl, tgt): QuantumCircuit.cz = cz +CompositeGate.cz = cz diff --git a/qiskit/extensions/standard/h.py b/qiskit/extensions/standard/h.py --- a/qiskit/extensions/standard/h.py +++ b/qiskit/extensions/standard/h.py @@ -10,6 +10,7 @@ """ Hadamard gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -57,3 +58,4 @@ def h(self, q): QuantumCircuit.h = h +CompositeGate.h = h diff --git a/qiskit/extensions/standard/iden.py b/qiskit/extensions/standard/iden.py --- a/qiskit/extensions/standard/iden.py +++ b/qiskit/extensions/standard/iden.py @@ -10,6 +10,7 @@ """ Identity gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -53,3 +54,4 @@ def iden(self, q): QuantumCircuit.iden = iden +CompositeGate.iden = iden diff --git a/qiskit/extensions/standard/rx.py b/qiskit/extensions/standard/rx.py --- a/qiskit/extensions/standard/rx.py +++ b/qiskit/extensions/standard/rx.py @@ -10,6 +10,7 @@ """ Rotation around the x-axis. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -62,3 +63,4 @@ def rx(self, theta, q): QuantumCircuit.rx = rx +CompositeGate.rx = rx diff --git a/qiskit/extensions/standard/ry.py b/qiskit/extensions/standard/ry.py --- a/qiskit/extensions/standard/ry.py +++ b/qiskit/extensions/standard/ry.py @@ -10,6 +10,7 @@ """ Rotation around the y-axis. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -61,3 +62,4 @@ def ry(self, theta, q): QuantumCircuit.ry = ry +CompositeGate.ry = ry diff --git a/qiskit/extensions/standard/rz.py b/qiskit/extensions/standard/rz.py --- a/qiskit/extensions/standard/rz.py +++ b/qiskit/extensions/standard/rz.py @@ -10,6 +10,7 @@ """ Rotation around the z-axis. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -61,3 +62,4 @@ def rz(self, phi, q): QuantumCircuit.rz = rz +CompositeGate.rz = rz diff --git a/qiskit/extensions/standard/rzz.py b/qiskit/extensions/standard/rzz.py --- a/qiskit/extensions/standard/rzz.py +++ b/qiskit/extensions/standard/rzz.py @@ -8,6 +8,7 @@ """ two-qubit ZZ-rotation gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -60,5 +61,6 @@ def rzz(self, theta, qubit1, qubit2): return self._attach(RZZGate(theta, qubit1, qubit2, self)) -# Add to QuantumCircuit class +# Add to QuantumCircuit and CompositeGate classes QuantumCircuit.rzz = rzz +CompositeGate.rzz = rzz diff --git a/qiskit/extensions/standard/s.py b/qiskit/extensions/standard/s.py --- a/qiskit/extensions/standard/s.py +++ b/qiskit/extensions/standard/s.py @@ -10,6 +10,7 @@ """ S=diag(1,i) Clifford phase gate or its inverse. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -99,3 +100,5 @@ def sdg(self, q): QuantumCircuit.s = s QuantumCircuit.sdg = sdg +CompositeGate.s = s +CompositeGate.sdg = sdg diff --git a/qiskit/extensions/standard/swap.py b/qiskit/extensions/standard/swap.py --- a/qiskit/extensions/standard/swap.py +++ b/qiskit/extensions/standard/swap.py @@ -10,6 +10,7 @@ """ SWAP gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -60,3 +61,4 @@ def swap(self, qubit1, qubit2): QuantumCircuit.swap = swap +CompositeGate.swap = swap diff --git a/qiskit/extensions/standard/t.py b/qiskit/extensions/standard/t.py --- a/qiskit/extensions/standard/t.py +++ b/qiskit/extensions/standard/t.py @@ -10,6 +10,7 @@ """ T=sqrt(S) phase gate or its inverse. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -99,3 +100,5 @@ def tdg(self, q): QuantumCircuit.t = t QuantumCircuit.tdg = tdg +CompositeGate.t = t +CompositeGate.tdg = tdg diff --git a/qiskit/extensions/standard/u0.py b/qiskit/extensions/standard/u0.py --- a/qiskit/extensions/standard/u0.py +++ b/qiskit/extensions/standard/u0.py @@ -10,6 +10,7 @@ """ Single qubit gate cycle idle. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -53,3 +54,4 @@ def u0(self, m, q): QuantumCircuit.u0 = u0 +CompositeGate.u0 = u0 diff --git a/qiskit/extensions/standard/u1.py b/qiskit/extensions/standard/u1.py --- a/qiskit/extensions/standard/u1.py +++ b/qiskit/extensions/standard/u1.py @@ -10,6 +10,7 @@ """ Diagonal single qubit gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -55,3 +56,4 @@ def u1(self, theta, q): QuantumCircuit.u1 = u1 +CompositeGate.u1 = u1 diff --git a/qiskit/extensions/standard/u2.py b/qiskit/extensions/standard/u2.py --- a/qiskit/extensions/standard/u2.py +++ b/qiskit/extensions/standard/u2.py @@ -10,6 +10,7 @@ """ One-pulse single-qubit gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -61,3 +62,4 @@ def u2(self, phi, lam, q): QuantumCircuit.u2 = u2 +CompositeGate.u2 = u2 diff --git a/qiskit/extensions/standard/u3.py b/qiskit/extensions/standard/u3.py --- a/qiskit/extensions/standard/u3.py +++ b/qiskit/extensions/standard/u3.py @@ -10,6 +10,7 @@ """ Two-pulse single-qubit gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -62,3 +63,4 @@ def u3(self, theta, phi, lam, q): QuantumCircuit.u3 = u3 +CompositeGate.u3 = u3 diff --git a/qiskit/extensions/standard/ubase.py b/qiskit/extensions/standard/ubase.py --- a/qiskit/extensions/standard/ubase.py +++ b/qiskit/extensions/standard/ubase.py @@ -10,6 +10,7 @@ """ Element of SU(2). """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit.decorators import _op_expand @@ -46,3 +47,4 @@ def u_base(self, theta, phi, lam, q): QuantumCircuit.u_base = u_base +CompositeGate.u_base = u_base diff --git a/qiskit/extensions/standard/x.py b/qiskit/extensions/standard/x.py --- a/qiskit/extensions/standard/x.py +++ b/qiskit/extensions/standard/x.py @@ -10,6 +10,7 @@ """ Pauli X (bit-flip) gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -59,3 +60,4 @@ def x(self, q): QuantumCircuit.x = x +CompositeGate.x = x diff --git a/qiskit/extensions/standard/y.py b/qiskit/extensions/standard/y.py --- a/qiskit/extensions/standard/y.py +++ b/qiskit/extensions/standard/y.py @@ -10,6 +10,7 @@ """ Pauli Y (bit-phase-flip) gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -54,3 +55,4 @@ def y(self, q): QuantumCircuit.y = y +CompositeGate.y = y diff --git a/qiskit/extensions/standard/z.py b/qiskit/extensions/standard/z.py --- a/qiskit/extensions/standard/z.py +++ b/qiskit/extensions/standard/z.py @@ -10,6 +10,7 @@ """ Pauli Z (phase-flip) gate. """ +from qiskit.circuit import CompositeGate from qiskit.circuit import Gate from qiskit.circuit import QuantumCircuit from qiskit.circuit import QuantumRegister @@ -54,3 +55,4 @@ def z(self, q): QuantumCircuit.z = z +CompositeGate.z = z
CompositeGate became unusable - what is the replacement? ### Informations - **Qiskit Terra version**: 0.7 - **Python version**: 3.6.5 - **Operating system**: Linux ### What is the current behavior? Commit 7485ed924126b0861ef94d35eccef2d3532d70bf removed the `CompositeGate.X = X` from the ` qiskit/extensions/simulator/*.py` files. Because of this, CompositeGate is not usable as before. [This code](https://gist.github.com/nelimee/79f54a75371d65a0c00d59af1cebf874) fails at execution with the error ``` Traceback (most recent call last): File "<input>", line 1, in <module> File "<input>", line 74, in crzz File "<input>", line 64, in __init__ AttributeError: 'CRZZGate' object has no attribute 'cu1' ``` ### Steps to reproduce the problem Download [the code](https://gist.github.com/nelimee/79f54a75371d65a0c00d59af1cebf874) and execute it with a version of Qiskit that contains the modifications of 7485ed924126b0861ef94d35eccef2d3532d70bf. ### What is the expected behavior? The CompositeGate should work as before, i.e. be appended to the quantum circuit. ### Suggested solutions 1. Revert the part of 7485ed924126b0861ef94d35eccef2d3532d70bf that removed the lines `CompositeGate.X = X`. 2. **or** Update the main Changelog with this non-documented removal and provide/document an alternative way to create user-defined custom gates.
thanks for the input we are thinking about how to fix this. @ajavadia and i were discussing today. I think the best idea is to not try and revert but give an update in the change that it is broken but fix with a method that scales better.
2019-02-05T16:03:38Z
[]
[]
Traceback (most recent call last): File "<input>", line 1, in <module> File "<input>", line 74, in crzz File "<input>", line 64, in __init__ AttributeError: 'CRZZGate' object has no attribute 'cu1'
1,039
Qiskit/qiskit
Qiskit__qiskit-1849
91149f910e5530dc01dace328cb6cba0bce950cd
diff --git a/qiskit/tools/visualization/_text.py b/qiskit/tools/visualization/_text.py --- a/qiskit/tools/visualization/_text.py +++ b/qiskit/tools/visualization/_text.py @@ -784,8 +784,11 @@ def build_layers(self): Raises: VisualizationError: When the drawing is, for some reason, impossible to be drawn. """ + wire_names = self.wire_names(with_initial_value=True) + if not wire_names: + return [] - layers = [InputWire.fillup_layer(self.wire_names(with_initial_value=True))] + layers = [InputWire.fillup_layer(wire_names)] for instruction_layer in self.instructions: layer = Layer(self.qregs, self.cregs)
Text circuit drawer raises ValueError if an empty circuit is given <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.7.3 - **Python version**: 3.6.8 - **Operating system**: macOS HighSierra ### What is the current behavior? If I try to draw an empty circuit with the text drawer, it raises ValueError. ### Steps to reproduce the problem ``` # sample.py from qiskit import QuantumCircuit qc = QuantumCircuit() print(qc) ``` ``` $ python sample.py Traceback (most recent call last): File "c.py", line 3, in <module> print(qc) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/circuit/quantumcircuit.py", line 83, in __str__ return str(self.draw(output='text')) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 413, in __str__ return self.single_string() File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 438, in single_string return "\n".join(self.lines()) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 472, in lines layers = self.build_layers() File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 672, in build_layers layers.append(InputWire.fillup_layer(self.wire_names(with_initial_value=True))) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 394, in fillup_layer longest = max([len(name) for name in names]) ValueError: max() arg is an empty sequence ``` ### What is the expected behavior? No ValueError. ### Suggested solutions Check whether `names` is empty or not.
2019-02-22T19:20:00Z
[]
[]
Traceback (most recent call last): File "c.py", line 3, in <module> print(qc) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/circuit/quantumcircuit.py", line 83, in __str__ return str(self.draw(output='text')) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 413, in __str__ return self.single_string() File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 438, in single_string return "\n".join(self.lines()) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 472, in lines layers = self.build_layers() File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 672, in build_layers layers.append(InputWire.fillup_layer(self.wire_names(with_initial_value=True))) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_text.py", line 394, in fillup_layer longest = max([len(name) for name in names]) ValueError: max() arg is an empty sequence
1,051
Qiskit/qiskit
Qiskit__qiskit-1866
fb44b4ad18969a89a03e78c9ca4944750edbacb4
diff --git a/qiskit/tools/visualization/_matplotlib.py b/qiskit/tools/visualization/_matplotlib.py --- a/qiskit/tools/visualization/_matplotlib.py +++ b/qiskit/tools/visualization/_matplotlib.py @@ -462,7 +462,8 @@ def _draw_ops(self, verbose=False): _wide_gate = 'u2 u3 cu2 cu3'.split() _barriers = {'coord': [], 'group': []} next_ops = self._ops.copy() - next_ops.pop(0) + if next_ops: + next_ops.pop(0) this_anc = 0 # @@ -682,8 +683,12 @@ def _draw_ops(self, verbose=False): # # adjust window size and draw horizontal lines # - max_anc = max([q_anchors[ii].get_index() for ii in self._qreg_dict]) - n_fold = (max_anc - 1) // self._style.fold + anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict] + if anchors: + max_anc = max(anchors) + else: + max_anc = 0 + n_fold = max(0, max_anc - 1) // self._style.fold # window size if max_anc > self._style.fold > 0: self._cond['xmax'] = self._style.fold + 1
Matplotlib circuit drawer raises IndexError if there is no gate in QuantumCircuit <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.7.3 - **Python version**: 3.6.8 - **Operating system**: macOS HighSierra ### What is the current behavior? If I try to draw a quantum circuit without any gate, the mpl drawer raises IndexError. The text drawer does not have this issue. ### Steps to reproduce the problem ``` # sample.py from qiskit import QuantumCircuit, QuantumRegister qr = QuantumRegister(1) qc = QuantumCircuit(qr) print(qc) qc.draw(filename='output.pdf', output='mpl') ``` ``` $ python sample.py q0_0: |0> Traceback (most recent call last): File "c.py", line 5, in <module> qc.draw(filename='output.pdf', output='mpl') File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/circuit/quantumcircuit.py", line 353, in draw reverse_bits=reverse_bits) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_circuit_visualization.py", line 237, in circuit_drawer reverse_bits=reverse_bits) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_circuit_visualization.py", line 577, in _matplotlib_circuit_drawer return qcd.draw(filename) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_matplotlib.py", line 343, in draw self._draw_ops(verbose) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_matplotlib.py", line 466, in _draw_ops next_ops.pop(0) IndexError: pop from empty list ``` ### What is the expected behavior? No IndexError. ### Suggested solutions Check whether `next_ops` is empty or not.
@nkanazawa1989 Can you check it?
2019-02-25T21:01:55Z
[]
[]
Traceback (most recent call last): File "c.py", line 5, in <module> qc.draw(filename='output.pdf', output='mpl') File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/circuit/quantumcircuit.py", line 353, in draw reverse_bits=reverse_bits) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_circuit_visualization.py", line 237, in circuit_drawer reverse_bits=reverse_bits) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_circuit_visualization.py", line 577, in _matplotlib_circuit_drawer return qcd.draw(filename) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_matplotlib.py", line 343, in draw self._draw_ops(verbose) File "/Users/ima/envs/vqe2/lib/python3.6/site-packages/qiskit/tools/visualization/_matplotlib.py", line 466, in _draw_ops next_ops.pop(0) IndexError: pop from empty list
1,055
Qiskit/qiskit
Qiskit__qiskit-1944
b56cdf32e67438879faddf91d56eae04724e928b
diff --git a/qiskit/dagcircuit/_dagcircuit.py b/qiskit/dagcircuit/_dagcircuit.py --- a/qiskit/dagcircuit/_dagcircuit.py +++ b/qiskit/dagcircuit/_dagcircuit.py @@ -1336,7 +1336,7 @@ def multigraph_layers(self): next_layer = [] def collect_runs(self, namelist): - """Return a set of runs of "op" nodes with the given names. + """Return a set of non-conditional runs of "op" nodes with the given names. For example, "... h q[0]; cx q[0],q[1]; cx q[0],q[1]; h q[1]; .." would produce the tuple of cx nodes as an element of the set returned @@ -1357,7 +1357,7 @@ def collect_runs(self, namelist): for node in tops_node: nd = self.multi_graph.node[node] if nd["type"] == "op" and nd["name"] in namelist \ - and not nodes_seen[node]: + and nd["condition"] is None and not nodes_seen[node]: group = [node] nodes_seen[node] = True s = list(self.multi_graph.successors(node))
Mapper error when I try to execute QuantumCircuit as an object of a user defined class <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit version**: 0.7.0 - **Python version**: 3.6.7 - **Operating system**: Ubuntu 18.04 ### What is the current behavior? /home/varun/.local/lib/python3.6/site-packages/marshmallow/schema.py:364: ChangedInMarshmallow3Warning: strict=False is not recommended. In marshmallow 3.0, schemas will always be strict. See https://marshmallow.readthedocs.io/en/latest/upgrading.html#schemas-are-always-strict ChangedInMarshmallow3Warning Please provide the cooeficeints for the intital state in the format a+bj Coefficient of state zero1+0j Coefficient of state one0+0j Provide the square of normalisation denominator1 [1.+0.j 0.+0.j] The best backend is ibmq_16_melbourne Traceback (most recent call last): File "class_counts.py", line 183, in <module> main() File "class_counts.py", line 163, in main sim = obj.simulate(p,b) File "class_counts.py", line 111, in simulate dec_state = self.ibmq(b) File "class_counts.py", line 97, in ibmq job_exp = execute(self.qc, backend=backend, shots=shots, max_credits=max_credits) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/tools/compiler.py", line 108, in execute skip_transpiler, seed_mapper, pass_manager, memory) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/tools/compiler.py", line 61, in compile seed_mapper, pass_manager) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/_transpiler.py", line 67, in transpile 'pass_manager': pass_manager}) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/tools/parallel.py", line 93, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/_transpiler.py", line 109, in _transpilation pass_manager=pass_manager) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/_transpiler.py", line 199, in transpile_dag dag = Optimize1qGates().run(dag) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/passes/optimize_1q_gates.py", line 53, in run raise MapperError("internal error") qiskit.mapper._mappererror.MapperError: 'internal error' ### Steps to reproduce the problem The QuantumRegister, ClassicalRegister and QuantumCircuit are intialised when a object is created for the user-defined class I have made. The code ran without errors on the local machine and on the hpc. But when I tried to run it on the ibm Qx_16 it showed this error. ### What is the expected behavior? I expect to get values of get_counts() for 20 different values for a given parameter. For each value a new object is created and the entire circuit is simulated. ### Suggested solutions The problem could be because I create a new object every iteration of the loop. I made sure to delete the object after each iteration though,
Hi @isolatedinformation could you provide some more information about the circuit you were trying to execute when this error occurred? My circuit consisits of 9 qubits and 9 cbits to record thier outcomes. Basically, I was trying to simulate a noisy channel and perform error correction to retireve the initial message. The noisiness of the channel was tuned by a parameter \gamma. For diiferent values of \gamma, I wanted the statistics of the measuremnt outcome and plotted them. For instance, this attached image was the statistics I got for 20 different values of gamma when run on the HPC. ![zero](https://user-images.githubusercontent.com/27089492/52951977-3731a200-33a9-11e9-9c80-dc164d88c2b0.png) I created a class to make sure the entire circuit was re initilased for every iteration of \gamma since each iteration is independent of the other. The objects of the class helped achieve this objective. If it's still not clear, I can email my code to you. I think this is the same issue as #1871
2019-03-10T09:20:58Z
[]
[]
Traceback (most recent call last): File "class_counts.py", line 183, in <module> main() File "class_counts.py", line 163, in main sim = obj.simulate(p,b) File "class_counts.py", line 111, in simulate dec_state = self.ibmq(b) File "class_counts.py", line 97, in ibmq job_exp = execute(self.qc, backend=backend, shots=shots, max_credits=max_credits) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/tools/compiler.py", line 108, in execute skip_transpiler, seed_mapper, pass_manager, memory) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/tools/compiler.py", line 61, in compile seed_mapper, pass_manager) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/_transpiler.py", line 67, in transpile 'pass_manager': pass_manager}) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/tools/parallel.py", line 93, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/_transpiler.py", line 109, in _transpilation pass_manager=pass_manager) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/_transpiler.py", line 199, in transpile_dag dag = Optimize1qGates().run(dag) File "/home/varun/.local/lib/python3.6/site-packages/qiskit/transpiler/passes/optimize_1q_gates.py", line 53, in run raise MapperError("internal error") qiskit.mapper._mappererror.MapperError: 'internal error'
1,064
Qiskit/qiskit
Qiskit__qiskit-1959
d25e58dde25bd6783815828ce72b35935b764eb3
diff --git a/qiskit/qobj/models.py b/qiskit/qobj/models.py --- a/qiskit/qobj/models.py +++ b/qiskit/qobj/models.py @@ -10,7 +10,8 @@ from marshmallow.validate import Length, Range, Regexp from qiskit.validation.base import BaseModel, BaseSchema, bind_schema -from qiskit.validation.fields import Integer, List, Nested, Raw, String +from qiskit.validation.fields import (Integer, List, Nested, String, + InstructionParameter) class QobjConditionalSchema(BaseSchema): @@ -31,7 +32,7 @@ class QobjInstructionSchema(BaseSchema): # Optional properties. qubits = List(Integer(validate=Range(min=0)), validate=Length(min=1)) - params = List(Raw()) + params = List(InstructionParameter()) memory = List(Integer(validate=Range(min=0)), validate=Length(min=1)) conditional = Nested(QobjConditionalSchema) diff --git a/qiskit/validation/fields/__init__.py b/qiskit/validation/fields/__init__.py --- a/qiskit/validation/fields/__init__.py +++ b/qiskit/validation/fields/__init__.py @@ -12,8 +12,8 @@ 1. Distinguish a new type, like the ``Complex`` number in this module. 2. Use a new Marshmallow field not used in ``qiskit`` yet. -Marshamallow fields does not allow model validation so you need to create a new -field, make it subclass of the Marshamallow field *and* ``ModelTypeValidator``, +Marshmallow fields does not allow model validation so you need to create a new +field, make it subclass of the Marshmallow field *and* ``ModelTypeValidator``, and redefine ``valid_types`` to be the list of valid types. Usually, **the same types this field deserializes to**. For instance:: @@ -24,45 +24,16 @@ class Boolean(marshmallow.fields.Boolean, ModelTypeValidator): See ``ModelTypeValidator`` for more subclassing options. """ + from datetime import date, datetime from marshmallow import fields as _fields -from marshmallow.utils import is_collection from qiskit.validation import ModelTypeValidator from qiskit.validation.fields.polymorphic import ByAttribute, ByType, TryFrom from qiskit.validation.fields.containers import Nested, List - -class Complex(ModelTypeValidator): - """Field for complex numbers. - - Field for parsing complex numbers: - * deserializes to Python's `complex`. - * serializes to a tuple of 2 decimals `(real, imaginary)` - """ - - valid_types = (complex, ) - - default_error_messages = { - 'invalid': '{input} cannot be parsed as a complex number.', - 'format': '"{input}" cannot be formatted as complex number.', - } - - def _serialize(self, value, attr, obj): - try: - return [value.real, value.imag] - except AttributeError: - self.fail('format', input=value) - - def _deserialize(self, value, attr, data): - if not is_collection(value) or len(value) != 2: - self.fail('invalid', input=value) - - try: - return complex(*value) - except (ValueError, TypeError): - self.fail('invalid', input=value) +from .custom import Complex, InstructionParameter class String(_fields.String, ModelTypeValidator): diff --git a/qiskit/validation/fields/custom.py b/qiskit/validation/fields/custom.py new file mode 100644 --- /dev/null +++ b/qiskit/validation/fields/custom.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019, IBM. +# +# This source code is licensed under the Apache License, Version 2.0 found in +# the LICENSE.txt file in the root directory of this source tree. + +"""Fields custom to Terra to be used with Qiskit validated classes.""" + +import numpy +import sympy + +from marshmallow.utils import is_collection + +from qiskit.validation import ModelTypeValidator + + +class Complex(ModelTypeValidator): + """Field for complex numbers. + + Field for parsing complex numbers: + * deserializes to Python's `complex`. + * serializes to a tuple of 2 decimals `(real, imaginary)` + """ + + valid_types = (complex, ) + + default_error_messages = { + 'invalid': '{input} cannot be parsed as a complex number.', + 'format': '"{input}" cannot be formatted as complex number.', + } + + def _serialize(self, value, attr, obj): + try: + return [value.real, value.imag] + except AttributeError: + self.fail('format', input=value) + + def _deserialize(self, value, attr, data): + if not is_collection(value) or len(value) != 2: + self.fail('invalid', input=value) + + try: + return complex(*value) + except (ValueError, TypeError): + self.fail('invalid', input=value) + + +class InstructionParameter(ModelTypeValidator): + """Field for objects used in instruction parameters. + + This field provides support for parsing objects of types that uses by + qobj.experiments.instructions.parameters: + * basic Python types: complex, int, float, str + * ``numpy``: integer, float + * ``sympy``: Symbol, Basic + + Note that by using this field, serialization-deserialization round-tripping + becomes not possible, as certain types serialize to the same Python basic + type (for example, numpy.float and regular float). If possible, it is + recommended that more specific and defined fields are used instead. + """ + valid_types = (complex, int, float, str, + numpy.integer, numpy.float, sympy.Basic, sympy.Symbol) + + def _serialize(self, value, attr, obj): + # pylint: disable=too-many-return-statements + if isinstance(value, (float, int, str)): + return value + if isinstance(value, complex): + return [value.real, value.imag] + if isinstance(value, numpy.integer): + return int(value) + if isinstance(value, numpy.float): + return float(value) + if isinstance(value, sympy.Symbol): + return str(value) + if isinstance(value, sympy.Basic): + if value.is_imaginary: + return [float(sympy.re(value)), float(sympy.im(value))] + else: + return float(value.evalf()) + + return self.fail('invalid', input=value) + + def _deserialize(self, value, attr, data): + if is_collection(value) and len(value) != 2: + return complex(*value) + + return value
ghz example is failing in qobj <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: - **Python version**: - **Operating system**: ### What is the current behavior? ### Steps to reproduce the problem ### What is the expected behavior? ``` >>> python examples/python/ghz.py ``` ``` Traceback (most recent call last): File "examples/python/ghz.py", line 59, in <module> result = job.result() File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 186, in result job_response = self._wait_for_result(timeout=timeout, wait=wait) File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 190, in _wait_for_result self._wait_for_submission(timeout) File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 391, in _wait_for_submission raise self._future_captured_exception File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 330, in _submit_callback submit_info = self._api.run_job(self._qobj_payload, backend_name=backend_name) File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/api/ibmqconnector.py", line 144, in run_job job = self.req.post(url, data=json.dumps(data)) File "/anaconda3/lib/python3.6/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/anaconda3/lib/python3.6/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/anaconda3/lib/python3.6/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/anaconda3/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'Zero' is not JSON serializable ``` Not sure what is causing this serialization error. @diego-plan9 can you take a look?
It seems quite similar to the errors in Aer that caused two tests to be skipped during the Qobj PR (as `IBMQProvider` contains also code very similar to Aer's to deal with noise model, etc) - I'll check with @chriseclectic as they can probably be handled at the same time. a few other examples also fail. it seems the failure happens when trying to run on devices, not Aer. Seems we are being too lax when handling `qobj.experiment.instructions.parameters`, and is rippling up when using types that cannot be serialized by default - will issue a PR shortly!
2019-03-12T11:05:25Z
[]
[]
Traceback (most recent call last): File "examples/python/ghz.py", line 59, in <module> result = job.result() File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 186, in result job_response = self._wait_for_result(timeout=timeout, wait=wait) File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 190, in _wait_for_result self._wait_for_submission(timeout) File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 391, in _wait_for_submission raise self._future_captured_exception File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/ibmqjob.py", line 330, in _submit_callback submit_info = self._api.run_job(self._qobj_payload, backend_name=backend_name) File "/anaconda3/lib/python3.6/site-packages/qiskit/providers/ibmq/api/ibmqconnector.py", line 144, in run_job job = self.req.post(url, data=json.dumps(data)) File "/anaconda3/lib/python3.6/json/__init__.py", line 231, in dumps return _default_encoder.encode(obj) File "/anaconda3/lib/python3.6/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/anaconda3/lib/python3.6/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/anaconda3/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'Zero' is not JSON serializable
1,066
Qiskit/qiskit
Qiskit__qiskit-2149
edc96e5f0581ab6aee40013e7a8e3c6c50feda8e
diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py --- a/qiskit/circuit/quantumcircuit.py +++ b/qiskit/circuit/quantumcircuit.py @@ -676,6 +676,14 @@ def assign_variables(self, value_dict): new_circuit.variable_table[variable] = value_dict return new_circuit + @property + def unassigned_variables(self): + """Returns a set containing any variables which have not yet been assigned.""" + return {variable + for variable, parameterized_instructions in self.variable_table.items() + if any(instruction.params[parameter_index].free_symbols + for instruction, parameter_index in parameterized_instructions)} + def _circuit_from_qasm(qasm): # pylint: disable=cyclic-import diff --git a/qiskit/transpiler/preset_passmanagers/default.py b/qiskit/transpiler/preset_passmanagers/default.py --- a/qiskit/transpiler/preset_passmanagers/default.py +++ b/qiskit/transpiler/preset_passmanagers/default.py @@ -25,7 +25,8 @@ from ..passes.mapping.extend_layout import ExtendLayout -def default_pass_manager(basis_gates, coupling_map, initial_layout, seed_mapper): +def default_pass_manager(basis_gates, coupling_map, initial_layout, + skip_numeric_passes, seed_mapper): """ The default pass manager that maps to the coupling map. @@ -33,6 +34,7 @@ def default_pass_manager(basis_gates, coupling_map, initial_layout, seed_mapper) basis_gates (list[str]): list of basis gate names supported by the target. Default: ['u1','u2','u3','cx','id'] initial_layout (Layout or None): If None, trivial layout will be chosen. + skip_numeric_passes (bool): If true, skip passes which require fixed parameter values coupling_map (CouplingMap): coupling map (perhaps custom) to target in mapping. seed_mapper (int or None): random seed for the swap_mapper. @@ -72,8 +74,14 @@ def default_pass_manager(basis_gates, coupling_map, initial_layout, seed_mapper) pass_manager.append(Unroller(['u1', 'u2', 'u3', 'id', 'cx'])) # Simplify single qubit gates and CXs - pass_manager.append([Optimize1qGates(), CXCancellation(), Depth(), FixedPoint('depth')], + if not skip_numeric_passes: + simplification_passes = [Optimize1qGates(), CXCancellation()] + else: + simplification_passes = [CXCancellation()] + + pass_manager.append(simplification_passes + [Depth(), FixedPoint('depth')], do_while=lambda property_set: not property_set['depth_fixed_point']) + return pass_manager diff --git a/qiskit/transpiler/transpiler.py b/qiskit/transpiler/transpiler.py --- a/qiskit/transpiler/transpiler.py +++ b/qiskit/transpiler/transpiler.py @@ -101,12 +101,15 @@ def _transpilation(circuit, basis_gates=None, coupling_map=None, if pass_manager and not pass_manager.working_list: return circuit + is_parametric_circuit = bool(circuit.unassigned_variables) + dag = circuit_to_dag(circuit) del circuit final_dag = transpile_dag(dag, basis_gates=basis_gates, coupling_map=coupling_map, initial_layout=initial_layout, + skip_numeric_passes=is_parametric_circuit, seed_mapper=seed_mapper, pass_manager=pass_manager) @@ -117,7 +120,8 @@ def _transpilation(circuit, basis_gates=None, coupling_map=None, # pylint: disable=redefined-builtin def transpile_dag(dag, basis_gates=None, coupling_map=None, - initial_layout=None, seed_mapper=None, pass_manager=None): + initial_layout=None, skip_numeric_passes=None, + seed_mapper=None, pass_manager=None): """Transform a dag circuit into another dag circuit (transpile), through consecutive passes on the dag. @@ -135,6 +139,7 @@ def transpile_dag(dag, basis_gates=None, coupling_map=None, eg. [[0, 2], [1, 2], [1, 3], [3, 4]} initial_layout (Layout or None): A layout object + skip_numeric_passes (bool): If true, skip passes which require fixed parameter values seed_mapper (int): random seed_mapper for the swap mapper pass_manager (PassManager): pass manager instance for the transpilation process If None, a default set of passes are run. @@ -164,6 +169,7 @@ def transpile_dag(dag, basis_gates=None, coupling_map=None, pass_manager = default_pass_manager(basis_gates, CouplingMap(coupling_map), initial_layout, + skip_numeric_passes, seed_mapper=seed_mapper) else: pass_manager = default_pass_manager_simulator(basis_gates)
Transpiling parameterized circuits for device backends raises ``` >>> qobj = qk.compile(qc, backend=FakeTokyo()) /Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/compiler.py:50: DeprecationWarning: qiskit.compile() is deprecated and will be removed in Qiskit Terra 0.9. Please use qiskit.transpile() to transform circuits and qiskit.assemble_circuits() to produce qobj. DeprecationWarning) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/compiler.py", line 67, in compile initial_layout, seed_mapper, pass_manager) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpiler.py", line 85, in transpile 'pass_manager': pass_manager}) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 93, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpiler.py", line 143, in _transpilation pass_manager=pass_manager) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpiler.py", line 234, in transpile_dag dag = pass_manager.run_passes(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 129, in run_passes dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 169, in _do_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/optimize_1q_gates.py", line 62, in run left_parameters = tuple([float(x) for x in left_parameters]) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/optimize_1q_gates.py", line 62, in <listcomp> left_parameters = tuple([float(x) for x in left_parameters]) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/lib/python3.5/site-packages/sympy/core/expr.py", line 256, in __float__ raise TypeError("can't convert expression to float") TypeError: can't convert expression to float ```
I'm not sure this is going to be fixable, we switched optimize 1q gates in #1738 to resolve things numerically which resulted in a very significant speed up. If we're passing in sympy expressions for parameters which are not able to represented as a float I don't see how we can run optimize 1q on them short of adding back sympy to the pass (which I don't want to do) so that we can call simplify() in the hopes that the terms in the expressions for gates cancel without needing the undefined parameter in the expression to do the evaluation. (I'm not sure if it could provide any meaningful optimization if the parameters aren't defined though) We need to make the transpiler be smart here and use a PassManager that does not invoke `Optimize1qGates` (or any other pass that involves numerical optimizations based on gate parameters -- another example is the `ConsolidateBlocks` pass in #2134)
2019-04-17T22:14:56Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/compiler.py", line 67, in compile initial_layout, seed_mapper, pass_manager) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpiler.py", line 85, in transpile 'pass_manager': pass_manager}) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 93, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpiler.py", line 143, in _transpilation pass_manager=pass_manager) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpiler.py", line 234, in transpile_dag dag = pass_manager.run_passes(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 129, in run_passes dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 169, in _do_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/optimize_1q_gates.py", line 62, in run left_parameters = tuple([float(x) for x in left_parameters]) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/optimize_1q_gates.py", line 62, in <listcomp> left_parameters = tuple([float(x) for x in left_parameters]) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/lib/python3.5/site-packages/sympy/core/expr.py", line 256, in __float__ raise TypeError("can't convert expression to float") TypeError: can't convert expression to float
1,113
Qiskit/qiskit
Qiskit__qiskit-2169
da338d8ff9ca7e67ec675aac9414b9976341c580
diff --git a/qiskit/visualization/matplotlib.py b/qiskit/visualization/matplotlib.py --- a/qiskit/visualization/matplotlib.py +++ b/qiskit/visualization/matplotlib.py @@ -787,9 +787,15 @@ def _draw_ops(self, verbose=False): def param_parse(v, pimode=False): for i, e in enumerate(v): if pimode: - v[i] = MatplotlibDrawer.format_pi(e) + try: + v[i] = MatplotlibDrawer.format_pi(e) + except TypeError: + v[i] = str(e) else: - v[i] = MatplotlibDrawer.format_numeric(e) + try: + v[i] = MatplotlibDrawer.format_numeric(e) + except TypeError: + v[i] = str(e) if v[i].startswith('-'): v[i] = '$-$' + v[i][1:] param = ', '.join(v)
Visualization support for parameterized circuits Following #2103, gate params can be sympy expressions ``` >>> theta = sympy.Symbol('theta') >>> qc = qk.QuantumCircuit(cr, qr) >>> qc.rx(theta, qr[0]) <qiskit.extensions.standard.rx.RXGate object at 0x116d5c1d0> >>> qc.measure(qr[0], cr[0]) <qiskit.circuit.measure.Measure object at 0x11ed71c50> >>> print(qc.draw()) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 437, in __str__ return self.single_string() File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 462, in single_string return "\n".join(self.lines()) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 497, in lines layers = self.build_layers() File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 820, in build_layers self._instruction_to_gate(instruction, layer) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 780, in _instruction_to_gate BoxOnQuWire(TextDrawing.label_for_box(instruction))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 618, in label_for_box params = TextDrawing.params_for_label(instruction) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 610, in params_for_label return ['%.5g' % i for i in instruction.op.params File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 611, in <listcomp> if not isinstance(i, (numpy.ndarray, sympy.Matrix))] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/lib/python3.5/site-packages/sympy/core/expr.py", line 256, in __float__ raise TypeError("can't convert expression to float") TypeError: can't convert expression to float ```
Is this on all backends or just text? Ok, I just tested this locally, mpl also fails for the same reason casting trying to cast the sympy expression to a float which it can't do because there is no value. But latex actually works: ![param_tex](https://user-images.githubusercontent.com/2447371/56429144-dd0d6a00-628f-11e9-92f0-f4c28a757b50.png) without any modifications. I think this is a first where latex is the one without a bug! :) I'm self-assigning the text part. Well done latex drawer! :) Text drawer fix in #2168. When merged, this issue should be renamed to be mpl specific.
2019-04-22T16:42:48Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 437, in __str__ return self.single_string() File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 462, in single_string return "\n".join(self.lines()) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 497, in lines layers = self.build_layers() File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 820, in build_layers self._instruction_to_gate(instruction, layer) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 780, in _instruction_to_gate BoxOnQuWire(TextDrawing.label_for_box(instruction))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 618, in label_for_box params = TextDrawing.params_for_label(instruction) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 610, in params_for_label return ['%.5g' % i for i in instruction.op.params File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/visualization/text.py", line 611, in <listcomp> if not isinstance(i, (numpy.ndarray, sympy.Matrix))] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/lib/python3.5/site-packages/sympy/core/expr.py", line 256, in __float__ raise TypeError("can't convert expression to float") TypeError: can't convert expression to float
1,118
Qiskit/qiskit
Qiskit__qiskit-2350
09ed6a15b068259d5e36d55aa0973af5b8099287
diff --git a/qiskit/converters/qobj_to_circuits.py b/qiskit/converters/qobj_to_circuits.py --- a/qiskit/converters/qobj_to_circuits.py +++ b/qiskit/converters/qobj_to_circuits.py @@ -30,7 +30,7 @@ def qobj_to_circuits(qobj): """ warnings.warn('qiskit.converters.qobj_to_circuit() is deprecated and will ' 'be removed in Qiskit Terra 0.9. Please use ' - 'qiskit.compiler.disassemble_circuits() to convert a qobj ' + 'qiskit.assembler.disassemble() to convert a qobj ' 'to list of circuits.', DeprecationWarning) variables = disassemble(qobj)
disassemble_circuits() suggested in qobj_to_circuits.py DeprecationWarning doesn't exist <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.8.0 - **Python version**: 3.7.2 - **Operating system**: macOS `qobj_to_circuits` gives the following `DeprecationWarning`: ```python .../qiskit/converters/qobj_to_circuits.py:34: DeprecationWarning: qiskit.converters.qobj_to_circuit() is deprecated and will be removed in Qiskit Terra 0.9. Please use qiskit.compiler.disassemble_circuits() to convert a qobj to list of circuits. ``` but `qiskit.compiler.disassemble_circuits()` doesn't exist. ### What is the current behavior? ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: cannot import name 'disassemble_circuits' from 'qiskit.compiler' (/Users/matteo/Work/projects/ibmq/env/lib/python3.7/site-packages/qiskit/compiler/__init__.py) ``` ### Steps to reproduce the problem 1. Installed qiskit in a new python virtualenv with `pip install qiskit` 2. `from qiskit.compiler import disassemble_circuits` ``` >>> qiskit.__qiskit_version__ {'qiskit': '0.10.0', 'qiskit-terra': '0.8.0', 'qiskit-ignis': '0.1.1', 'qiskit-aer': '0.2.0', 'qiskit-ibmq-provider': '0.2.1', 'qiskit-aqua': '0.5.0'} ``` ### What is the expected behavior? If a function is deprecated, and the warning suggests to use a new function, this function should exist in the current release. ### Suggested solutions Implement the function or change the deprecation warning.
Sorry there seems to be a mistake in the deprecation message. For now please use ```from qiskit.assembler import disassemble``` @mtreinish I think `disassemble` should be added under the `qiskit.compile` namespace. yeah this is a bug we did not update the disassemble warning when we changed API. This was probably my fault. It was when I originally added the function (and deprecation message) in #2137 but it looks like that was changed in #2244 right before the release without updating the deprecation message. so yeah my fault :-(
2019-05-08T14:09:44Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: cannot import name 'disassemble_circuits' from 'qiskit.compiler' (/Users/matteo/Work/projects/ibmq/env/lib/python3.7/site-packages/qiskit/compiler/__init__.py)
1,148
Qiskit/qiskit
Qiskit__qiskit-2573
3aa97b11f3104113d0ae4e754da8f7e75d07a917
diff --git a/qiskit/dagcircuit/dagcircuit.py b/qiskit/dagcircuit/dagcircuit.py --- a/qiskit/dagcircuit/dagcircuit.py +++ b/qiskit/dagcircuit/dagcircuit.py @@ -1142,7 +1142,8 @@ def collect_runs(self, namelist): s = list(self._multi_graph.successors(node)) while len(s) == 1 and \ s[0].type == "op" and \ - s[0].name in namelist: + s[0].name in namelist and \ + s[0].condition is None: group.append(s[0]) nodes_seen[s[0]] = True s = list(self._multi_graph.successors(s[0]))
internal error from optimize_1q_gates from conditional cy gate ``` >>> qc = qk.QuantumCircuit(2,2) >>> qc.cy(0,1).c_if(qc.cregs[0], 0) <qiskit.circuit.instructionset.InstructionSet object at 0x12c540160> >>> qk.transpile(qc, backend=FakeTenerife()) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 147, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_configs))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 100, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 168, in _transpile_circuit return transpile_circuit(circuit, transpile_config) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpile_circuit.py", line 62, in transpile_circuit return pass_manager.run(circuit) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 147, in run dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 180, in _do_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/optimize_1q_gates.py", line 53, in run raise TranspilerError("internal error") qiskit.transpiler.exceptions.TranspilerError: 'internal error' ```
It looks like this is potentially caused by an issue in `dagcircuit.collect_runs()` It's supposed to return a list of non-conditional runs of op nodes with the given names. But in the cy().c_if() example above a conditional is being returned which is triggering the if for a condition here: https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/transpiler/passes/optimize_1q_gates.py#L50
2019-06-04T18:14:51Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 147, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_configs))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 100, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 168, in _transpile_circuit return transpile_circuit(circuit, transpile_config) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpile_circuit.py", line 62, in transpile_circuit return pass_manager.run(circuit) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 147, in run dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 180, in _do_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/optimize_1q_gates.py", line 53, in run raise TranspilerError("internal error") qiskit.transpiler.exceptions.TranspilerError: 'internal error'
1,194
Qiskit/qiskit
Qiskit__qiskit-2661
2151cb92497836577d9610d28d78dade1b566f24
diff --git a/qiskit/version.py b/qiskit/version.py --- a/qiskit/version.py +++ b/qiskit/version.py @@ -108,7 +108,10 @@ def _get_qiskit_versions(): pass cmd = [sys.executable, '-m', 'pip', 'freeze'] - reqs = _minimal_ext_cmd(cmd) + try: + reqs = _minimal_ext_cmd(cmd) + except Exception: + return out_dict reqs_dict = {} for req in reqs.split(): req_parts = req.decode().split('==')
Initializing __qiskit_version__ raises OSError ### Information - **Qiskit Terra version**: 0.9.0, master branch - **Python version**: 3.6.6 - **Operating system**: Debian GNU/Linux 9 (stretch) ### What is the current behavior? Since Terra 0.9.0 the way to find out the used Qiskit-packages has changed. In version.py, the method `_get_qiskit_versions()` is called, which calls `_minimal_ext_cmd` to get the pip freeze output to parse for the used qiskit package versions. The call to this function raises an OSError because subprocess.Popen has returncode 1. Traceback (most recent call last): File "./docs/example_qiskit_entangle.py", line 20, in <module> from qiskit.validation.base import Obj File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/__init__.py", line 47, in <module> from qiskit.providers.basicaer import BasicAer File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/providers/__init__.py", line 19, in <module> from .basebackend import BaseBackend File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/providers/basebackend.py", line 23, in <module> from qiskit.version import VERSION as __version__ File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/version.py", line 117, in <module> __qiskit_version__ = _get_qiskit_versions() File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/version.py", line 86, in _get_qiskit_versions reqs = _minimal_ext_cmd(cmd) File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/version.py", line 41, in _minimal_ext_cmd raise OSError ### Steps to reproduce the problem This happens on our Debian Jenkins environment, where by default the environment variable HOME is set to root directory. Further the Jenkins user is not running under root. We suspect that the pip freeze command uses the HOME directory somehow and fails because it has no rights on the root directory. What we did in our investigation to make it work on our environment, we set 'HOME' to a writeable directory for the Jenkins user and had to add environment variable 'HOME' to the env parameter of subprocess.Popen (in function _minimal_ext_cmd),
I'm not sure what's going on with `pip --freeze` here. I tried `HOME=/ pip --freeze` from within my terra venv (on mac) and it seemed to work okay. It's a bug that we `raise OSError` here. We should include at least stderr and the return code. I agree this is a bug, it's my mistake I actually realized this yesterday after #2652 merged that we don't have any error handling. Nothing should raise an exception from the version module because it gets executed at import time, we should catch it and make it non-fatal. Having the pip versions in qiskit_version is not critical so if pip fails for whatever reason we should just ignore it and move on. I'll push a patch up to fix this shortly.
2019-06-20T14:53:18Z
[]
[]
Traceback (most recent call last): File "./docs/example_qiskit_entangle.py", line 20, in <module> from qiskit.validation.base import Obj File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/__init__.py", line 47, in <module> from qiskit.providers.basicaer import BasicAer File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/providers/__init__.py", line 19, in <module> from .basebackend import BaseBackend File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/providers/basebackend.py", line 23, in <module> from qiskit.version import VERSION as __version__ File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/version.py", line 117, in <module> __qiskit_version__ = _get_qiskit_versions() File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/version.py", line 86, in _get_qiskit_versions reqs = _minimal_ext_cmd(cmd) File "/var/jenkins_home/workspace/SDK_dev/venv/lib/python3.6/site-packages/qiskit/version.py", line 41, in _minimal_ext_cmd raise OSError ### Steps to reproduce the problem This happens on our Debian Jenkins environment, where by default the environment variable HOME is set to root directory. Further the Jenkins user is not running under root. We suspect that the pip freeze command uses the HOME directory somehow and fails because it has no rights on the root directory.
1,206
Qiskit/qiskit
Qiskit__qiskit-2783
d9f36863258dd94d2d84c87f2e8518980a4a9df5
diff --git a/qiskit/dagcircuit/dagcircuit.py b/qiskit/dagcircuit/dagcircuit.py --- a/qiskit/dagcircuit/dagcircuit.py +++ b/qiskit/dagcircuit/dagcircuit.py @@ -256,7 +256,7 @@ def apply_operation_back(self, op, qargs=None, cargs=None, condition=None): cargs = cargs or [] all_cbits = self._bits_in_condition(condition) - all_cbits.extend(cargs) + all_cbits = set(all_cbits).union(cargs) self._check_condition(op.name, condition) self._check_bits(qargs, self.output_map) @@ -799,6 +799,20 @@ def substitute_node_with_dag(self, node, input_dag, wires=None): pred_map, succ_map = self._make_pred_succ_maps(node) full_pred_map, full_succ_map = self._full_pred_succ_maps(pred_map, succ_map, input_dag, wire_map) + + if condition_bit_list: + # If we are replacing a conditional node, map input dag through + # wire_map to verify that it will not modify any of the conditioning + # bits. + condition_bits = set(condition_bit_list) + + for op_node in input_dag.op_nodes(): + mapped_cargs = {wire_map[carg] for carg in op_node.cargs} + + if condition_bits & mapped_cargs: + raise DAGCircuitError('Mapped DAG would alter clbits ' + 'on which it would be conditioned.') + # Now that we know the connections, delete node self._multi_graph.remove_node(node) diff --git a/qiskit/extensions/simulator/snapshot.py b/qiskit/extensions/simulator/snapshot.py --- a/qiskit/extensions/simulator/snapshot.py +++ b/qiskit/extensions/simulator/snapshot.py @@ -20,7 +20,7 @@ from qiskit import QuantumCircuit from qiskit.circuit.quantumregister import QuantumRegister from qiskit.circuit import Instruction -from qiskit.extensions.exceptions import ExtensionError +from qiskit.extensions.exceptions import QiskitError, ExtensionError class Snapshot(Instruction): @@ -89,6 +89,9 @@ def label(self, name): else: raise TypeError('label expects a string') + def c_if(self, classical, val): + raise QiskitError('Snapshots are simulator directives and cannot be conditional.') + def snapshot(self, label, diff --git a/qiskit/extensions/standard/barrier.py b/qiskit/extensions/standard/barrier.py --- a/qiskit/extensions/standard/barrier.py +++ b/qiskit/extensions/standard/barrier.py @@ -18,6 +18,7 @@ from qiskit.circuit import QuantumCircuit from qiskit.circuit.quantumregister import QuantumRegister from qiskit.circuit import Instruction +from qiskit.exceptions import QiskitError class Barrier(Instruction): @@ -34,6 +35,9 @@ def inverse(self): def broadcast_arguments(self, qargs, cargs): yield [qarg for sublist in qargs for qarg in sublist], [] + def c_if(self, classical, val): + raise QiskitError('Barriers are compiler directives and cannot be conditional.') + def barrier(self, *qargs): """Apply barrier to circuit.
Measures conditioned on register containing the target bit generate an invalid DAG When attempting to condition a `measure` on the register containing the target bit: ``` >>> qc = qk.QuantumCircuit(1,1) >>> qc.measure(0,0).c_if(qc.cregs[0],0) >>> qc.depth() 1 >>> qk.converters.circuit_to_dag(qc).depth() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/dagcircuit/dagcircuit.py", line 574, in depth raise DAGCircuitError("not a DAG") qiskit.dagcircuit.exceptions.DAGCircuitError: 'not a DAG' ``` ![image](https://user-images.githubusercontent.com/2241698/58880513-0bab9c80-86a6-11e9-85e7-802bff8954ee.png) The drawer also has difficulty with them: ``` >>> qc = qk.QuantumCircuit(1,1) >>> qc.measure(0,0).c_if(qc.cregs[0],0) >>> print(qc) q_0: |0> c_0: 0 >>> qc = qk.QuantumCircuit(2,1) >>> qc.h([0,1]) >>> qc.measure(0,0) >>> qc.measure(1,0).c_if(qc.cregs[0], 0) >>> print(qc) ┌───┐┌─┐ q_0: |0>┤ H ├┤M├ ├───┤└╥┘ q_1: |0>┤ H ├─╫─ └───┘ ║ c_0: 0 ══════╩═ ``` Conditioning a `measure` on a separate register seems to work okay ``` >>> qr = qk.QuantumRegister(2) >>> cr1 = qk.ClassicalRegister(1) >>> cr2 = qk.ClassicalRegister(1) >>> qc = qk.QuantumCircuit(qr, cr1, cr2) >>> qc.h(qr) >>> qc.measure(qr[0], cr1[0]) >>> qc.measure(qr[1], cr2[0]).c_if(cr1, 0) >>> qc.depth() 3 >>> qk.converters.circuit_to_dag(qc).depth() 3 >>> print(qc) ┌───┐┌─┐ q3_0: |0>┤ H ├┤M├─────── ├───┤└╥┘ ┌─┐ q3_1: |0>┤ H ├─╫───┤M├── └───┘ ║ ┌─┴┴┴─┐ c2_0: 0 ══════╩═╡ = 0 ╞ └──║──┘ c3_0: 0 ═══════════╩═══ ``` ![image](https://user-images.githubusercontent.com/2241698/58881003-39ddac00-86a7-11e9-95b0-ba20623dd2de.png)
TIL that conditional measurements are a thing. This is actually probably not a bug, save for in the random testing. There is no causality defined in this situation, so you get the cyclic graph. I don't see the lack of causality here. `qc.measure(1,0).c_if(qc.cregs[0], 0)` to me decomposes as "Check the value of creg0; if 0, trigger a measure of qubit 1 into clbit 0" which should be well defined. The presence or absence of the measure is dependent on the value of `clbit 0` is at the start of the operation, not what it will be at the end. That said, I don't know of an algorithm or use case that requires a conditional measure, I wouldn't be opposed to not supporting them (this is the first time I came across them as well) but then we should raise early when building them, rather than throwing `not a DAG` down the road. We also allow conditional barriers, by the way, which seems more obviously a bug. Actually I agree with you. I was wrong, there is an order.
2019-07-12T20:40:07Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/dagcircuit/dagcircuit.py", line 574, in depth raise DAGCircuitError("not a DAG") qiskit.dagcircuit.exceptions.DAGCircuitError: 'not a DAG'
1,229
Qiskit/qiskit
Qiskit__qiskit-2931
5303420daa8be87865593b1b5c8a943ae910b82d
diff --git a/qiskit/transpiler/passes/consolidate_blocks.py b/qiskit/transpiler/passes/consolidate_blocks.py --- a/qiskit/transpiler/passes/consolidate_blocks.py +++ b/qiskit/transpiler/passes/consolidate_blocks.py @@ -19,7 +19,7 @@ The blocks are collected by a previous pass, such as Collect2qBlocks. """ -from qiskit.circuit import QuantumRegister, QuantumCircuit, Qubit +from qiskit.circuit import QuantumRegister, QuantumCircuit from qiskit.dagcircuit import DAGCircuit from qiskit.quantum_info.operators import Operator from qiskit.quantum_info.synthesis import TwoQubitBasisDecomposer @@ -57,12 +57,7 @@ def run(self, dag): new_dag.add_creg(creg) # compute ordered indices for the global circuit wires - global_index_map = {} - for wire in dag.wires: - if not isinstance(wire, Qubit): - continue - global_qregs = list(dag.qregs.values()) - global_index_map[wire] = global_qregs.index(wire.register) + wire.index + global_index_map = {wire: idx for idx, wire in enumerate(dag.qubits())} blocks = self.property_set['block_list'] # just to make checking if a node is in any block easier
ConsolidateBlocks raises for CX between two registers From https://travis-ci.com/Qiskit/qiskit-terra/jobs/216588160#L6863: ``` >>> qr1 = qk.QuantumRegister(1) >>> qr2 = qk.QuantumRegister(2) >>> qc = qk.QuantumCircuit(qr2, qr1, cr) >>> qc.cx(qr1[0], qr2[1]) >>> qc.measure(qr1[0], cr[0]) >>> qk.transpile(qc, optimization_level=3) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 187, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_configs))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 100, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 208, in _transpile_circuit return transpile_circuit(circuit, transpile_config) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpile_circuit.py", line 65, in transpile_circuit return pass_manager.run(circuit) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 171, in run dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 202, in _do_pass dag = self._run_this_pass(pass_, dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 215, in _run_this_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/consolidate_blocks.py", line 93, in run subcirc.append(nd.op, [q[block_index_map[i]] for i in nd.qargs]) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 359, in append instructions.add(self._append(instruction, qarg, carg), qarg, carg) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 382, in _append self._check_dups(qargs) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 438, in _check_dups raise QiskitError("duplicate qubit arguments") qiskit.exceptions.QiskitError: 'duplicate qubit arguments' ```
.. and this one if you don't mind @maddy-tod :)
2019-08-07T11:33:54Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 187, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_configs))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 100, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 208, in _transpile_circuit return transpile_circuit(circuit, transpile_config) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/transpile_circuit.py", line 65, in transpile_circuit return pass_manager.run(circuit) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 171, in run dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 202, in _do_pass dag = self._run_this_pass(pass_, dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 215, in _run_this_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/consolidate_blocks.py", line 93, in run subcirc.append(nd.op, [q[block_index_map[i]] for i in nd.qargs]) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 359, in append instructions.add(self._append(instruction, qarg, carg), qarg, carg) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 382, in _append self._check_dups(qargs) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 438, in _check_dups raise QiskitError("duplicate qubit arguments") qiskit.exceptions.QiskitError: 'duplicate qubit arguments'
1,259
Qiskit/qiskit
Qiskit__qiskit-2947
85ec21f6db77a316c661f47a8906e7cadf1b09f9
diff --git a/qiskit/circuit/parameter.py b/qiskit/circuit/parameter.py --- a/qiskit/circuit/parameter.py +++ b/qiskit/circuit/parameter.py @@ -15,6 +15,8 @@ Parameter Class for variable parameters. """ +from uuid import uuid4 + import sympy from .parameterexpression import ParameterExpression @@ -22,6 +24,27 @@ class Parameter(ParameterExpression): """Parameter Class for variable parameters""" + + def __new__(cls, _, uuid=None): + # Parameter relies on self._uuid being set prior to other attributes + # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash + # or __eq__ functions. + + obj = object.__new__(cls) + + if uuid is None: + obj._uuid = uuid4() + else: + obj._uuid = uuid + + return obj + + def __getnewargs__(self): + # Unpickling won't in general call __init__ but will always call + # __new__. Specify arguments to be passed to __new__ when unpickling. + + return (self.name, self._uuid) + def __init__(self, name): self._name = name @@ -48,3 +71,9 @@ def __deepcopy__(self, memo=None): def __repr__(self): return '{}({})'.format(self.__class__.__name__, self.name) + + def __eq__(self, other): + return isinstance(other, Parameter) and self._uuid == other._uuid + + def __hash__(self): + return hash(self._uuid)
assemble.py _expand_parameters(circuits, run_config) apparently broken <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: master - **Python version**: 3.6.8 - **Operating system**: Linux ### What is the current behavior? `Parameter` binding does not succeed as reported by user `@Adrian Auer` in Qiskit Slack. ### Steps to reproduce the problem ``` from qiskit import Aer, QuantumCircuit, QuantumRegister, execute from qiskit.circuit import Parameter # create m = 2 circuits qr = QuantumRegister(1) quantum_circuit_1 = QuantumCircuit(qr) quantum_circuit_2 = QuantumCircuit(qr) theta = Parameter('theta') # add parametrized gates quantum_circuit_1.u3(theta, 0, 0, qr[0]) quantum_circuit_2.u3(theta, 3.14, 0, qr[0]) circuits = [quantum_circuit_1, quantum_circuit_2] # inspect parameters property for circuit in circuits: print(circuit.parameters) # bind parameter to n = 1 values job = execute(circuits, Aer.get_backend('qasm_simulator'), shots=512, parameter_binds=[{theta: 1}]) ``` Result is error: ``` Traceback (most recent call last): File "adrian_auer_example.py", line 25, in <module> parameter_binds=[{theta: 1}]) File "/home/jax/work/QISKit/DEV/qiskit-terra/qiskit/execute.py", line 218, in execute run_config=run_config File "/home/jax/work/QISKit/DEV/qiskit-terra/qiskit/compiler/assemble.py", line 149, in assemble run_config=run_config) File "/home/jax/work/QISKit/DEV/qiskit-terra/qiskit/compiler/assemble.py", line 298, in _expand_parameters 'Circuit parameters: {}').format(all_bind_parameters, all_circuit_parameters)) qiskit.exceptions.QiskitError: 'Mismatch between run_config.parameter_binds and all circuit parameters. Parameter binds: [dict_keys([Parameter(theta)])] Circuit parameters: [{Parameter(theta)}, {Parameter(theta)}]' ``` ### What is the expected behavior? Parameter would bind and circuits would execute. ### Suggested solutions In `qiskit/compiler/assembly.py:_expand_parameters` lines 293-294 both of the following tests are failing: ``` or any(unique_parameters != bind_params for bind_params in all_bind_parameters) \ or any(unique_parameters != parameters for parameters in all_circuit_parameters): ``` It appears to be because `unique_parameters` is a `list` of `Parameter` each of which is being compared to the elements of a list of dictionaries. The comparison should be re-examined so that types match up.
This bug is similar to #2429 . Right now, `Parameter`s depend on python identity for equality, but when they are serialized and sent to another process (here by `parallel_map` inside `transpile` inside `execute`), and return, they are instantiated with a new identity and so no longer treated as equal. The comparison in lines 293-294 of `assembly.py` attempts to check that there is only one set of parameters (`unique_parameters`), which is fully used by every circuit and fully bound by every set of bindings. Just prior to the comparison, `unique_parameters` should hold the single instance of `theta` used to build the circuits, e.g. `{Parameter(theta)}`, but: ``` (Pdb) p unique_parameters {Parameter(theta), Parameter(theta), Parameter(theta)} (Pdb) p [id(p) for p in unique_parameters] [5126415024, 5126414408, 5126158936] ``` As a possible workaround, each circuit could be transpiled one-by-one, and then assembled as a group and executed. Alternately, `phi` could be made a parameter to the `u3` gate, and then bound along with `theta`.
2019-08-08T20:19:27Z
[]
[]
Traceback (most recent call last): File "adrian_auer_example.py", line 25, in <module> parameter_binds=[{theta: 1}]) File "/home/jax/work/QISKit/DEV/qiskit-terra/qiskit/execute.py", line 218, in execute run_config=run_config File "/home/jax/work/QISKit/DEV/qiskit-terra/qiskit/compiler/assemble.py", line 149, in assemble run_config=run_config) File "/home/jax/work/QISKit/DEV/qiskit-terra/qiskit/compiler/assemble.py", line 298, in _expand_parameters 'Circuit parameters: {}').format(all_bind_parameters, all_circuit_parameters)) qiskit.exceptions.QiskitError: 'Mismatch between run_config.parameter_binds and all circuit parameters. Parameter binds: [dict_keys([Parameter(theta)])] Circuit parameters: [{Parameter(theta)}, {Parameter(theta)}]'
1,263
Qiskit/qiskit
Qiskit__qiskit-3051
b0a4d01143133438bd2d123f23b5ac48289ebedf
diff --git a/qiskit/visualization/bloch.py b/qiskit/visualization/bloch.py --- a/qiskit/visualization/bloch.py +++ b/qiskit/visualization/bloch.py @@ -53,6 +53,7 @@ import os import numpy as np +from matplotlib import get_backend import matplotlib.pyplot as plt # pylint: disable=import-error from matplotlib.patches import FancyArrowPatch # pylint: disable=import-error from mpl_toolkits.mplot3d import (Axes3D, proj3d) # pylint: disable=import-error @@ -626,7 +627,9 @@ def save(self, name=None, output='png', dirc=None): self.fig.savefig(name) self.savenum += 1 if self.fig: - plt.close(self.fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(self.fig) def _hide_tick_lines_and_labels(axis): diff --git a/qiskit/visualization/counts_visualization.py b/qiskit/visualization/counts_visualization.py --- a/qiskit/visualization/counts_visualization.py +++ b/qiskit/visualization/counts_visualization.py @@ -25,6 +25,7 @@ from .exceptions import VisualizationError if HAS_MATPLOTLIB: + from matplotlib import get_backend import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator @@ -187,5 +188,7 @@ def plot_histogram(data, figsize=(7, 5), color=None, number_to_keep=None, ax.legend(loc='upper left', bbox_to_anchor=(1.01, 1.0), ncol=1, borderaxespad=0, frameon=True, fontsize=12) if fig: - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig diff --git a/qiskit/visualization/gate_map.py b/qiskit/visualization/gate_map.py --- a/qiskit/visualization/gate_map.py +++ b/qiskit/visualization/gate_map.py @@ -23,6 +23,7 @@ if HAS_MATPLOTLIB: import matplotlib + from matplotlib import get_backend import matplotlib.pyplot as plt # pylint: disable=import-error import matplotlib.patches as mpatches import matplotlib.cm as cm @@ -232,7 +233,9 @@ def plot_gate_map(backend, figsize=None, ax.set_xlim([-1, x_max+1]) ax.set_ylim([-(y_max+1), 1]) if not input_axes: - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig return None @@ -448,6 +451,7 @@ def plot_error_map(backend, figsize=(12, 9), show_title=True): if show_title: fig.suptitle('{name} Error Map'.format(name=backend.name()), fontsize=24, y=0.9) - - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig diff --git a/qiskit/visualization/matplotlib.py b/qiskit/visualization/matplotlib.py --- a/qiskit/visualization/matplotlib.py +++ b/qiskit/visualization/matplotlib.py @@ -26,6 +26,7 @@ import numpy as np try: + from matplotlib import get_backend from matplotlib import patches from matplotlib import pyplot as plt HAS_MATPLOTLIB = True @@ -484,7 +485,9 @@ def draw(self, filename=None, verbose=False): if filename: self.figure.savefig(filename, dpi=self._style.dpi, bbox_inches='tight') - plt.close(self.figure) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(self.figure) return self.figure def _draw_regs(self): diff --git a/qiskit/visualization/pulse_visualization.py b/qiskit/visualization/pulse_visualization.py --- a/qiskit/visualization/pulse_visualization.py +++ b/qiskit/visualization/pulse_visualization.py @@ -22,6 +22,9 @@ from qiskit.visualization.exceptions import VisualizationError from qiskit.visualization.pulse import matplotlib as _matplotlib +if _matplotlib.HAS_MATPLOTLIB: + from matplotlib import get_backend + def pulse_drawer(data, dt=1, style=None, filename=None, interp_method=None, scaling=None, channels_to_plot=None, @@ -50,7 +53,10 @@ def pulse_drawer(data, dt=1, style=None, filename=None, matplotlib.figure: A matplotlib figure object for the pulse envelope Raises: VisualizationError: when invalid data is given or lack of information + ImportError: when matplotlib is not installed """ + if not _matplotlib.HAS_MATPLOTLIB: + raise ImportError('Must have Matplotlib installed.') if isinstance(data, SamplePulse): drawer = _matplotlib.SamplePulseDrawer(style=style) image = drawer.draw(data, dt=dt, interp_method=interp_method, scaling=scaling) @@ -66,7 +72,9 @@ def pulse_drawer(data, dt=1, style=None, filename=None, if filename: image.savefig(filename, dpi=drawer.style.dpi, bbox_inches='tight') - _matplotlib.plt.close(image) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + _matplotlib.plt.close(image) if image and interactive: image.show() return image diff --git a/qiskit/visualization/state_visualization.py b/qiskit/visualization/state_visualization.py --- a/qiskit/visualization/state_visualization.py +++ b/qiskit/visualization/state_visualization.py @@ -26,6 +26,7 @@ from .matplotlib import HAS_MATPLOTLIB if HAS_MATPLOTLIB: + from matplotlib import get_backend from matplotlib.ticker import MaxNLocator from matplotlib import pyplot as plt from matplotlib.patches import FancyArrowPatch @@ -127,7 +128,9 @@ def plot_state_hinton(rho, title='', figsize=None): if title: fig.suptitle(title, fontsize=16) plt.tight_layout() - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig @@ -158,7 +161,9 @@ def plot_bloch_vector(bloch, title="", ax=None, figsize=None): if ax is None: fig = B.fig fig.set_size_inches(figsize[0], figsize[1]) - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig return None @@ -198,7 +203,9 @@ def plot_bloch_multivector(rho, title='', figsize=None): plot_bloch_vector(bloch_state, "qubit " + str(i), ax=ax, figsize=figsize) fig.suptitle(title, fontsize=16) - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig @@ -347,7 +354,9 @@ def plot_state_city(rho, title="", figsize=None, color=None, tick.label.set_fontsize(14) plt.suptitle(title, fontsize=16) plt.tight_layout() - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig @@ -396,7 +405,9 @@ def plot_state_paulivec(rho, title="", figsize=None, color=None): for tick in ax.xaxis.get_major_ticks()+ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) ax.set_title(title, fontsize=16) - plt.close(fig) + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig @@ -626,8 +637,9 @@ def plot_state_qsphere(rho, figsize=None): verticalalignment='center', fontsize=14) fig.tight_layout() - plt.close(fig) - + if get_backend() in ['module://ipykernel.pylab.backend_inline', + 'nbAgg']: + plt.close(fig) return fig
circuit.draw() interactive failed when used in python Shell <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.8.2 - **Python version**: 3.7.3 - **Operating system**: Windows 10 ### What is the current behavior? Having created an simple circuit. Trying to draw it with circuit.draw(output = 'mpl', interactive = True). The command produces some error as follows: >>> circuit.draw(output = 'mpl', interactive = True) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Users\pc\.conda\envs\qcircuit\lib\site-packages\qiskit\circuit\quantumcircuit.py", line 487, in draw justify=justify) File "C:\Users\pc\.conda\envs\qcircuit\lib\site-packages\qiskit\visualization\circuit_visualization.py", line 218, in circuit_drawer image.show() File "C:\Users\pc\AppData\Roaming\Python\Python37\site-packages\matplotlib\figure.py", line 450, in show manager.show() File "C:\Users\pc\AppData\Roaming\Python\Python37\site-packages\matplotlib\backends\_backend_tk.py", line 546, in show self.canvas._tkcanvas.bind("<Destroy>", destroy) File "C:\Users\pc\.conda\envs\qcircuit\lib\tkinter\__init__.py", line 1251, in bind return self._bind(('bind', self._w), sequence, func, add) File "C:\Users\pc\.conda\envs\qcircuit\lib\tkinter\__init__.py", line 1206, in _bind self.tk.call(what + (sequence, cmd)) _tkinter.TclError: can't invoke "bind" command: application has been destroyed ### Steps to reproduce the problem Open anaconda prompt. Activate the environment created specifically for qiskit. Open python shell and type the following commands: >>> import qiskit >>> from qiskit import QuantumCircuit >>> circuit = QuantumCircuit(2,2) >>> circuit.h(0) >>> circuit.draw(output = 'mpl', interactive = True) The error will show up. ### What is the expected behavior? I expect some interactive panel popping up like when I use plt.show() to show some matplotlib figures. If this is not the correct way to get a mpl form of a circuit figure showing please tell me which reference should I be looking into.
Hi @skxsky I agree this is unexpected behaviour. This happens because we close the figure before returning it, to prevent it from rendering twice in Jupyter notebooks. If you run this code from a Jupyter notebook it should work, or you can save the image using `circuit.draw(output = 'mpl', filename="my_circuit.png")`. I will try to have a look at getting the behaviour more inline with what is expected. @maddy-tod I see. So rendering an interactive image in python shell is just not a feature. Apart from simply printing the circuit, are there any other ways to show a clearer image in python shell? If I were to use scripts then saving the image would work fine for me. @skxsky no, printing the circuit or saving the image are the only options at the moment. I will try to get the interactive element fixed ASAP! @maddy-tod Thanks, that solves my puzzles.
2019-08-28T12:02:28Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Users\pc\.conda\envs\qcircuit\lib\site-packages\qiskit\circuit\quantumcircuit.py", line 487, in draw justify=justify) File "C:\Users\pc\.conda\envs\qcircuit\lib\site-packages\qiskit\visualization\circuit_visualization.py", line 218, in circuit_drawer image.show() File "C:\Users\pc\AppData\Roaming\Python\Python37\site-packages\matplotlib\figure.py", line 450, in show manager.show() File "C:\Users\pc\AppData\Roaming\Python\Python37\site-packages\matplotlib\backends\_backend_tk.py", line 546, in show self.canvas._tkcanvas.bind("<Destroy>", destroy) File "C:\Users\pc\.conda\envs\qcircuit\lib\tkinter\__init__.py", line 1251, in bind return self._bind(('bind', self._w), sequence, func, add) File "C:\Users\pc\.conda\envs\qcircuit\lib\tkinter\__init__.py", line 1206, in _bind self.tk.call(what + (sequence, cmd)) _tkinter.TclError: can't invoke "bind" command: application has been destroyed
1,278
Qiskit/qiskit
Qiskit__qiskit-3079
54ea1b9ad78a2ccf3595284410c4da72cb941ef0
diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py --- a/qiskit/circuit/quantumcircuit.py +++ b/qiskit/circuit/quantumcircuit.py @@ -355,24 +355,8 @@ def append(self, instruction, qargs=None, cargs=None): expanded_cargs = [self.cbit_argument_conversion(carg) for carg in cargs or []] instructions = InstructionSet() - - # When broadcasting was handled by decorators (prior to #2282), append - # received multiple distinct instruction instances, one for each expanded - # arg. With broadcasting as part of QuantumCircuit.append, the - # instruction instance is constructed before append is called. However, - # (at least) ParameterTable expects instruction instances to be unique - # within a circuit, so make instruction deepcopies for expanded_args[1:]. - - first_instruction = True for (qarg, carg) in instruction.broadcast_arguments(expanded_qargs, expanded_cargs): - if first_instruction: - instructions.add( - self._append(instruction, qarg, carg), qarg, carg) - first_instruction = False - else: - instructions.add( - self._append(deepcopy(instruction), qarg, carg), qarg, carg) - + instructions.add(self._append(instruction, qarg, carg), qarg, carg) return instructions def _append(self, instruction, qargs, cargs): @@ -410,7 +394,9 @@ def _append(self, instruction, qargs, cargs): for parameter in param.parameters: if parameter in current_parameters: - self._parameter_table[parameter].append((instruction, param_index)) + if not self._check_dup_param_spec(self._parameter_table[parameter], + instruction, param_index): + self._parameter_table[parameter].append((instruction, param_index)) else: if parameter.name in {p.name for p in current_parameters}: raise QiskitError( @@ -419,6 +405,12 @@ def _append(self, instruction, qargs, cargs): return instruction + def _check_dup_param_spec(self, parameter_spec_list, instruction, param_index): + for spec in parameter_spec_list: + if spec[0] is instruction and spec[1] == param_index: + return True + return False + def add_register(self, *regs): """Add registers.""" if not regs:
ParameterTable expects Instructions to be used only once within a circuit See the bug reported in #3008 . The implementation of `ParameterTable` and the associated binding machinery operate under the assumption that a given `Instruction` instance will appear in only one gate in a circuit, but this isn't guaranteed. e.g. a user could write: ``` >>> import qiskit as qk >>> p = qk.circuit.Parameter('p') >>> qc = qk.QuantumCircuit(2) >>> rz = qk.extensions.standard.RZGate(p) >>> qc.append(rz, [0], []) >>> qc.append(rz, [1], []) >>> print(qc) ┌───────┐ q_0: |0>┤ Rz(p) ├ ├───────┤ q_1: |0>┤ Rz(p) ├ └───────┘ >>> qc.bind_parameters({p: 3}) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 875, in bind_parameters new_circuit._bind_parameter(parameter, value) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 897, in _bind_parameter instr.params[param_index] = instr.params[param_index].bind({parameter: value}) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/parameterexpression.py", line 68, in bind self._raise_if_passed_unknown_parameters(parameter_values.keys()) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/parameterexpression.py", line 136, in _raise_if_passed_unknown_parameters [str(p) for p in unknown_parameters])) qiskit.exceptions.QiskitError: "Cannot bind Parameters (['p']) not present in expression." ``` Users of `Instruction.repeat` would see the same problem. #3013 worked around this by forcing deepcopies of instructions when broadcasting, but this doesn't resolve the general problem.
2019-09-06T21:32:06Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 875, in bind_parameters new_circuit._bind_parameter(parameter, value) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 897, in _bind_parameter instr.params[param_index] = instr.params[param_index].bind({parameter: value}) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/parameterexpression.py", line 68, in bind self._raise_if_passed_unknown_parameters(parameter_values.keys()) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/parameterexpression.py", line 136, in _raise_if_passed_unknown_parameters [str(p) for p in unknown_parameters])) qiskit.exceptions.QiskitError: "Cannot bind Parameters (['p']) not present in expression."
1,283
Qiskit/qiskit
Qiskit__qiskit-3675
21a3424368afc75afe3f695f72654320cbb16795
diff --git a/qiskit/converters/ast_to_dag.py b/qiskit/converters/ast_to_dag.py --- a/qiskit/converters/ast_to_dag.py +++ b/qiskit/converters/ast_to_dag.py @@ -48,8 +48,11 @@ from qiskit.extensions.standard.rz import RZGate from qiskit.extensions.standard.cu1 import Cu1Gate from qiskit.extensions.standard.ch import CHGate +from qiskit.extensions.standard.crx import CrxGate +from qiskit.extensions.standard.cry import CryGate from qiskit.extensions.standard.crz import CrzGate from qiskit.extensions.standard.cu3 import Cu3Gate +from qiskit.extensions.standard.rxx import RXXGate from qiskit.extensions.standard.rzz import RZZGate @@ -106,6 +109,7 @@ class AstInterpreter: "sdg": SdgGate, "swap": SwapGate, "rx": RXGate, + "rxx": RXXGate, "ry": RYGate, "rz": RZGate, "rzz": RZZGate, @@ -115,6 +119,8 @@ class AstInterpreter: "cy": CyGate, "cz": CzGate, "ch": CHGate, + "crx": CrxGate, + "cry": CryGate, "crz": CrzGate, "cu1": Cu1Gate, "cu3": Cu3Gate,
Reading QASM strings with Ion-Trap Gates broken <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.11.0 - **Python version**: 3.7.5 - **Operating system**: Windows ### What is the current behavior? Cannot construct a quantum Circuit object for non-superconducting gates, even though ion trap gates (RXX, MS, etc) are somewhat supported. ### Steps to reproduce the problem ```python >>> from qiskit import QuantumCircuit >>> qc = QuantumCircuit(3) >>> qc.rxx(3.14, 0, 2) <qiskit.circuit.instructionset.InstructionSet object at 0x0000018552A839C8> >>> qc.from_qasm_str(qc.qasm()) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "[...]\site-packages\qiskit\circuit\quantumcircuit.py", line 1174, in from_qasm_str return _circuit_from_qasm(qasm) File "[...]\site-packages\qiskit\circuit\quantumcircuit.py", line 1241, in _circuit_from_qasm ast = qasm.parse() File "[...]\site-packages\qiskit\qasm\qasm.py", line 69, in parse return qasm_p.parse(self._data) File "[...]\site-packages\qiskit\qasm\qasmparser.py", line 1089, in parse self.parser.parse(data, lexer=self.lexer, debug=self.parse_deb) File "[...]\site-packages\ply\yacc.py", line 333, in parse return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) File "[...]\site-packages\ply\yacc.py", line 1120, in parseopt_notrack p.callable(pslice) File "[...]\site-packages\qiskit\qasm\qasmparser.py", line 660, in p_unitary_op_4 self.verify_as_gate(program[1], program[5], arglist=program[3]) File "[...]\site-packages\qiskit\qasm\qasmparser.py", line 133, in verify_as_gate + "', line", str(obj.line), 'file', obj.file) qiskit.qasm.exceptions.QasmError: "Cannot find gate definition for 'rxx', line 4 file " >>> QuantumCircuit.from_qasm_str(qc.qasm()) [SAME ERROR MESSAGE] ``` ### What is the expected behavior? Qiskit should be able to generate a ``QuantumCircuit`` object from a QASM string which includes extension gates. ### Suggested solutions It appears that the QASM parser needs to be aware of the non-IBM/superconducting gates. Confirm? So a new "iontrap.inc" file (or similar) needs to be created and placed in a path that the QASM parser can recognize? Is there any documentation on writing a new include file?
Can I have a go at it? I am new to this, so please correct me if I am wrong at any point. I believe we would need to add the rxx gate (and any other gates defined in qiskit/extensions for full support) in qelib1.inc
2020-01-03T14:19:05Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "[...]\site-packages\qiskit\circuit\quantumcircuit.py", line 1174, in from_qasm_str return _circuit_from_qasm(qasm) File "[...]\site-packages\qiskit\circuit\quantumcircuit.py", line 1241, in _circuit_from_qasm ast = qasm.parse() File "[...]\site-packages\qiskit\qasm\qasm.py", line 69, in parse return qasm_p.parse(self._data) File "[...]\site-packages\qiskit\qasm\qasmparser.py", line 1089, in parse self.parser.parse(data, lexer=self.lexer, debug=self.parse_deb) File "[...]\site-packages\ply\yacc.py", line 333, in parse return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) File "[...]\site-packages\ply\yacc.py", line 1120, in parseopt_notrack p.callable(pslice) File "[...]\site-packages\qiskit\qasm\qasmparser.py", line 660, in p_unitary_op_4 self.verify_as_gate(program[1], program[5], arglist=program[3]) File "[...]\site-packages\qiskit\qasm\qasmparser.py", line 133, in verify_as_gate + "', line", str(obj.line), 'file', obj.file) qiskit.qasm.exceptions.QasmError: "Cannot find gate definition for 'rxx', line 4 file "
1,368
Qiskit/qiskit
Qiskit__qiskit-3869
c59783a5739dd7f2d25ead7549bb95c642d69e9a
diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py --- a/qiskit/circuit/quantumcircuit.py +++ b/qiskit/circuit/quantumcircuit.py @@ -124,6 +124,13 @@ class QuantumCircuit: extension_lib = "include \"qelib1.inc\";" def __init__(self, *regs, name=None): + if any([not isinstance(reg, (QuantumRegister, ClassicalRegister)) for reg in regs]): + try: + regs = tuple(int(reg) for reg in regs) + except Exception: + raise CircuitError("Circuit args must be Registers or be castable to an int" + + "(%s '%s' was provided)" + % ([type(reg).__name__ for reg in regs], regs)) if name is None: name = self.cls_prefix() + str(self.cls_instances()) if sys.platform != "win32" and not is_main_process():
QuantumCircuit constructor fails if n_qubits is np.int64 <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. --> ### What is the expected enhancement? ``` Python 3.5.6 |Anaconda, Inc.| (default, Aug 26 2018, 16:30:03) [GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import numpy as np >>> import qiskit as qk >>> n = np.int64(12) >>> qr = qk.QuantumRegister(n) >>> qc = qk.QuantumCircuit(n) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 157, in __init__ self.add_register(*regs) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 541, in add_register if register.name in [reg.name for reg in self.qregs + self.cregs]: AttributeError: 'numpy.int64' object has no attribute 'name' ``` Similar to as was added for `Register`s in #2288, the `QuantumCircuit` constructor should test if a provided argument can be cast to an int before it raises an error. For reference, here is where `Register` does the check: https://github.com/Qiskit/qiskit-terra/blob/2ee7a3a/qiskit/circuit/register.py#L39 and where `QuantumCircuit` makes new `Registers` from the provided `int`: https://github.com/Qiskit/qiskit-terra/blob/703c9a3/qiskit/circuit/quantumcircuit.py#L522
@1ucian0 have you made progress on this? I'd like to give it a go if not (I'm new to qiskit and open source and this looks like a nice first issue to try out!) Sure! Go ahead! Yay great I'll get cracking! Is there a corresponding test file that needs updating as well? Yes, tests should be added (maybe in the files we already have?). You can use the code from OP for that.
2020-02-20T23:12:04Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 157, in __init__ self.add_register(*regs) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 541, in add_register if register.name in [reg.name for reg in self.qregs + self.cregs]: AttributeError: 'numpy.int64' object has no attribute 'name'
1,398
Qiskit/qiskit
Qiskit__qiskit-4366
77b38a925a48f6976b17611a8b7ca3c77b4c827c
diff --git a/qiskit/circuit/library/standard_gates/__init__.py b/qiskit/circuit/library/standard_gates/__init__.py --- a/qiskit/circuit/library/standard_gates/__init__.py +++ b/qiskit/circuit/library/standard_gates/__init__.py @@ -84,5 +84,17 @@ from .y import YGate, CYGate from .z import ZGate, CZGate -from .boolean_logical_gates import logical_and, logical_or from .multi_control_rotation_gates import mcrx, mcry, mcrz + +# deprecated gates +from .boolean_logical_gates import logical_and, logical_or +from .u1 import Cu1Gate +from .u3 import Cu3Gate +from .x import CnotGate, ToffoliGate +from .swap import FredkinGate +from .i import IdGate +from .rx import CrxGate +from .ry import CryGate +from .rz import CrzGate +from .y import CyGate +from .z import CzGate
Old classes are not accessible via qiskit.extensions <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.14.0 - **Python version**: All - **Operating system**: Any ### What is the current behavior? Running ``` from qiskit.extensions import Cu3Gate ``` Raises an `ImportError` ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: cannot import name 'Cu3Gate' from 'qiskit.extensions' (/home/mtreinish/git/qiskit/qiskit/.tox/lint/lib/python3.8/site-packages/qiskit/extensions/__init__.py) ``` as do other classes which were deprecated as part of the 0.13.0 release cleanup. It worked fine on qiskit-terra 0.13.0 (also without a deprecation warning, which I thought it would raise). This is a big breakage and needs to be fixed in a quick 0.14.1 release. ### Steps to reproduce the problem ``` from qiskit.extensions import Cu3Gate ``` ### What is the expected behavior? This works, this is likely fallout from rushing through #4035 ### Suggested solutions Fix this
2020-04-30T21:54:14Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: cannot import name 'Cu3Gate' from 'qiskit.extensions' (/home/mtreinish/git/qiskit/qiskit/.tox/lint/lib/python3.8/site-packages/qiskit/extensions/__init__.py)
1,465
Qiskit/qiskit
Qiskit__qiskit-447
06482631beceba0a3571e925dcdb1a23b97ecdbf
diff --git a/qiskit/mapper/_compiling.py b/qiskit/mapper/_compiling.py --- a/qiskit/mapper/_compiling.py +++ b/qiskit/mapper/_compiling.py @@ -22,7 +22,7 @@ import math import numpy as np -from scipy.linalg import expm +import scipy.linalg as la from ._mappererror import MapperError @@ -40,7 +40,7 @@ def euler_angles_1q(unitary_matrix): small = 1e-10 if unitary_matrix.shape != (2, 2): raise MapperError("compiling.euler_angles_1q expected 2x2 matrix") - phase = np.linalg.det(unitary_matrix)**(-1.0/2.0) + phase = la.det(unitary_matrix)**(-1.0/2.0) U = phase * unitary_matrix # U in SU(2) # OpenQASM SU(2) parameterization: # U[0, 0] = exp(-i(phi+lambda)/2) * cos(theta/2) @@ -78,7 +78,7 @@ def euler_angles_1q(unitary_matrix): Rzlambda = np.array([[np.exp(-1j*lamb/2.0), 0], [0, np.exp(1j*lamb/2.0)]], dtype=complex) V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda)) - if np.linalg.norm(V - U) > small: + if la.norm(V - U) > small: raise MapperError("compiling.euler_angles_1q incorrect result") return theta, phi, lamb, "U(%.15f,%.15f,%.15f)" % (theta, phi, lamb) @@ -159,14 +159,14 @@ def two_qubit_kak(unitary_matrix): """ if unitary_matrix.shape != (4, 4): raise MapperError("compiling.two_qubit_kak expected 4x4 matrix") - phase = np.linalg.det(unitary_matrix)**(-1.0/4.0) + phase = la.det(unitary_matrix)**(-1.0/4.0) # Make it in SU(4), correct phase at the end U = phase * unitary_matrix # B changes to the Bell basis - B = (1.0/math.sqrt(2)) * np.array([[1, 1j, 0, 0], - [0, 0, 1j, 1], - [0, 0, 1j, -1], - [1, -1j, 0, 0]], dtype=complex) + B = (1.0/np.sqrt(2)) * np.array([[1, 1j, 0, 0], + [0, 0, 1j, 1], + [0, 0, 1j, -1], + [1, -1j, 0, 0]], dtype=complex) # U' = Bdag . U . B Uprime = np.dot(np.transpose(B.conjugate()), np.dot(U, B)) # M^2 = trans(U') . U' @@ -174,9 +174,9 @@ def two_qubit_kak(unitary_matrix): # Diagonalize M2 # Must use diagonalization routine which finds a real orthogonal matrix P # when M2 is real. - D, P = np.linalg.eig(M2) + D, P = la.eig(M2) # If det(P) == -1, apply a swap to make P in SO(4) - if abs(np.linalg.det(P)+1) < 1e-5: + if abs(la.det(P)+1) < 1e-5: swap = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], @@ -185,15 +185,15 @@ def two_qubit_kak(unitary_matrix): D = np.diag(np.dot(swap, np.dot(np.diag(D), swap))) Q = np.diag(np.sqrt(D)) # array from elementwise sqrt # Want to take square root so that Q has determinant 1 - if abs(np.linalg.det(Q)+1) < 1e-5: + if abs(la.det(Q)+1) < 1e-5: Q[0, 0] = -Q[0, 0] - Kprime = np.dot(Uprime, np.dot(P, np.dot(np.linalg.inv(Q), + Kprime = np.dot(Uprime, np.dot(P, np.dot(la.inv(Q), np.transpose(P)))) K1 = np.dot(B, np.dot(Kprime, np.dot(P, np.transpose(B.conjugate())))) A = np.dot(B, np.dot(Q, np.transpose(B.conjugate()))) K2 = np.dot(B, np.dot(np.transpose(P), np.transpose(B.conjugate()))) KAK = np.dot(K1, np.dot(A, K2)) - if np.linalg.norm(KAK - U, 2) > 1e-6: + if la.norm(KAK - U, 2) > 1e-6: raise MapperError("compiling.two_qubit_kak: " + "unknown error in KAK decomposition") # Compute parameters alpha, beta, gamma so that @@ -210,9 +210,9 @@ def two_qubit_kak(unitary_matrix): # K1 = kron(U1, U2) and K2 = kron(V1, V2) # Find the matrices U1, U2, V1, V2 L = K1[0:2, 0:2] - if np.linalg.norm(L) < 1e-9: + if la.norm(L) < 1e-9: L = K1[0:2, 2:4] - if np.linalg.norm(L) < 1e-9: + if la.norm(L) < 1e-9: L = K1[2:4, 2:4] Q = np.dot(L, np.transpose(L.conjugate())) U2 = L / np.sqrt(Q[0, 0]) @@ -223,9 +223,9 @@ def two_qubit_kak(unitary_matrix): U1[1, 0] = R[2, 0] U1[1, 1] = R[2, 2] L = K2[0:2, 0:2] - if np.linalg.norm(L) < 1e-9: + if la.norm(L) < 1e-9: L = K2[0:2, 2:4] - if np.linalg.norm(L) < 1e-9: + if la.norm(L) < 1e-9: L = K2[2:4, 2:4] Q = np.dot(L, np.transpose(L.conjugate())) V2 = L / np.sqrt(Q[0, 0]) @@ -235,12 +235,12 @@ def two_qubit_kak(unitary_matrix): V1[0, 1] = R[0, 2] V1[1, 0] = R[2, 0] V1[1, 1] = R[2, 2] - if np.linalg.norm(np.kron(U1, U2) - K1) > 1e-4 or \ - np.linalg.norm(np.kron(V1, V2) - K2) > 1e-4: + if la.norm(np.kron(U1, U2) - K1) > 1e-4 or \ + la.norm(np.kron(V1, V2) - K2) > 1e-4: raise MapperError("compiling.two_qubit_kak: " + "error in SU(2) x SU(2) part") - test = expm(1j*(alpha * xx + beta * yy + gamma * zz)) - if np.linalg.norm(A - test) > 1e-4: + test = la.expm(1j*(alpha * xx + beta * yy + gamma * zz)) + if la.norm(A - test) > 1e-4: raise MapperError("compiling.two_qubit_kak: " + "error in A part") # Circuit that implements K1 * A * K2 (up to phase), using @@ -286,7 +286,7 @@ def two_qubit_kak(unitary_matrix): V = np.dot(g6, V) V = np.dot(g7, V) - if np.linalg.norm(V - U*phase.conjugate()) > 1e-6: + if la.norm(V - U*phase.conjugate()) > 1e-6: raise MapperError("compiling.two_qubit_kak: " + "sequence incorrect, unknown error") @@ -387,11 +387,11 @@ def two_qubit_kak(unitary_matrix): V = np.dot(np.kron(np.identity(2), rz_array(gate["params"][1])), V) # Put V in SU(4) and test up to global phase - V = np.linalg.det(V)**(-1.0/4.0) * V - if np.linalg.norm(V - U) > 1e-6 and \ - np.linalg.norm(1j*V - U) > 1e-6 and \ - np.linalg.norm(-1*V - U) > 1e-6 and \ - np.linalg.norm(-1j*V - U) > 1e-6: + V = la.det(V)**(-1.0/4.0) * V + if la.norm(V - U) > 1e-6 and \ + la.norm(1j*V - U) > 1e-6 and \ + la.norm(-1*V - U) > 1e-6 and \ + la.norm(-1j*V - U) > 1e-6: raise MapperError("compiling.two_qubit_kak: " + "sequence incorrect, unknown error")
two-qubit-kak error when computing phase <!--- Provide a general summary of the issue in the Title above --> When I run the following program, I encountered an error: ``` #setup from qiskit import QuantumProgram import Qconfig qp = QuantumProgram() qp.set_api(Qconfig.APItoken, Qconfig.config['url']) from qiskit.mapper import two_qubit_kak import numpy as np perm = np.array([[0.,0.,0.,1.], [1.,0.,0.,0.], [0.,1.,0.,0.], [0.,0.,1.,0.] ]) permCircuit = two_qubit_kak(perm) print(perm) print(permCircuit) ``` ## Expected Behavior <!--- If you're describing a bug, tell us what should happen --> <!--- If you're suggesting a change/improvement, tell us how it should work --> It should produce a circuit for the unitary matrix below: ``` [ [0,0,0,1], [1,0,0,0], [0,1,0,0], [0,0,1,0] ] ``` ## Current Behavior <!--- If describing a bug, tell us what happens instead of the expected behavior --> <!--- If suggesting a change/improvement, explain the difference from current behavior --> It gives an error message: ``` /Users/rraymondhp/miniconda3/envs/QISKitenv/lib/python3.6/site-packages/qiskit/mapper/_compiling.py:162: RuntimeWarning: invalid value encountered in double_scalars phase = np.linalg.det(unitary_matrix)**(-1.0/4.0) Traceback (most recent call last): File "test_u.py", line 15, in <module> permCircuit = two_qubit_kak(perm) File "/Users/rraymondhp/miniconda3/envs/QISKitenv/lib/python3.6/site-packages/qiskit/mapper/_compiling.py", line 177, in two_qubit_kak D, P = np.linalg.eig(M2) File "/Users/rraymondhp/miniconda3/envs/QISKitenv/lib/python3.6/site-packages/numpy/linalg/linalg.py", line 1143, in eig _assertFinite(a) File "/Users/rraymondhp/miniconda3/envs/QISKitenv/lib/python3.6/site-packages/numpy/linalg/linalg.py", line 216, in _assertFinite raise LinAlgError("Array must not contain infs or NaNs") numpy.linalg.linalg.LinAlgError: Array must not contain infs or NaNs ``` ## Possible Solution <!--- Not obligatory, but suggest a fix/reason for the bug, --> <!--- or ideas how to implement the addition or change --> ## Steps to Reproduce (for bugs) <!--- Provide a link to a live example, or an unambiguous set of steps to --> <!--- reproduce this bug. Include code to reproduce, if relevant --> 1. 2. 3. 4. ## Context <!--- How has this issue affected you? What are you trying to accomplish? --> <!--- Providing context helps us come up with a solution that is most useful in the real world --> ## Your Environment <!--- Include as many relevant details about the environment you experienced the bug in --> * Version used: * Environment name and version (e.g. Python 3.6.1): * Operating System and version:
Hi @rraymondhp the problem is that eig does not behave as needed for all inputs. Here is one way to fix the problem. If M2 is close to real, round it to a real type before calling eig. If M2 is symmetric, use eigh. If M2 is already diagonal, substitute D and P and skip the call to eig. Actually it is a problem that occurs here: https://github.com/QISKit/qiskit-sdk-py/blob/06482631beceba0a3571e925dcdb1a23b97ecdbf/qiskit/mapper/_compiling.py#L162 The NumPy `det` function is being called, and returns a `np.float64` type. The problem occurs when doing `np.float64(-1)**(-1.0/4.0)`, that leads to a `nan` being returned. In contrast, using the SciPy routine `scipy.linalg.det` returns a generic `float` type and the same computation succeeds. This is because the NumPy data types do not support negative fractional powers for negative numbers unless the data type is `complex`. The solution is to either use the SciPy routine, or cast the return value from `np.linalg.det` into `complex`. I can submit a Pull.
2018-05-04T13:41:35Z
[]
[]
Traceback (most recent call last): File "test_u.py", line 15, in <module> permCircuit = two_qubit_kak(perm) File "/Users/rraymondhp/miniconda3/envs/QISKitenv/lib/python3.6/site-packages/qiskit/mapper/_compiling.py", line 177, in two_qubit_kak D, P = np.linalg.eig(M2) File "/Users/rraymondhp/miniconda3/envs/QISKitenv/lib/python3.6/site-packages/numpy/linalg/linalg.py", line 1143, in eig _assertFinite(a) File "/Users/rraymondhp/miniconda3/envs/QISKitenv/lib/python3.6/site-packages/numpy/linalg/linalg.py", line 216, in _assertFinite raise LinAlgError("Array must not contain infs or NaNs") numpy.linalg.linalg.LinAlgError: Array must not contain infs or NaNs
1,481
Qiskit/qiskit
Qiskit__qiskit-4584
93a51c815ffa1f9ee3e894ec4e576f5c75128d74
diff --git a/qiskit/circuit/add_control.py b/qiskit/circuit/add_control.py --- a/qiskit/circuit/add_control.py +++ b/qiskit/circuit/add_control.py @@ -102,12 +102,6 @@ def control(operation: Union[Gate, ControlledGate], # pylint: disable=unused-import import qiskit.circuit.library.standard_gates.multi_control_rotation_gates - # check args - if num_ctrl_qubits == 0: - return operation - elif num_ctrl_qubits < 0: - raise CircuitError('number of control qubits must be positive integer') - q_control = QuantumRegister(num_ctrl_qubits, name='control') q_target = QuantumRegister(operation.num_qubits, name='target') q_ancillae = None # TODO: add diff --git a/qiskit/circuit/controlledgate.py b/qiskit/circuit/controlledgate.py --- a/qiskit/circuit/controlledgate.py +++ b/qiskit/circuit/controlledgate.py @@ -85,10 +85,8 @@ def __init__(self, name: str, num_qubits: int, params: List, qc2.draw() """ super().__init__(name, num_qubits, params, label=label) - if num_ctrl_qubits < num_qubits: - self.num_ctrl_qubits = num_ctrl_qubits - else: - raise CircuitError('number of control qubits must be less than the number of qubits') + self._num_ctrl_qubits = 1 + self.num_ctrl_qubits = num_ctrl_qubits self.base_gate = None if definition: self.definition = definition @@ -132,6 +130,31 @@ def definition(self, excited_def: List): """Set controlled gate definition with closed controls.""" super(Gate, self.__class__).definition.fset(self, excited_def) + @property + def num_ctrl_qubits(self): + """Get number of control qubits. + + Returns: + int: The number of control qubits for the gate. + """ + return self._num_ctrl_qubits + + @num_ctrl_qubits.setter + def num_ctrl_qubits(self, num_ctrl_qubits): + """Set the number of control qubits. + + Args: + num_ctrl_qubits (int): The number of control qubits in [1, num_qubits-1]. + + Raises: + CircuitError: num_ctrl_qubits is not an integer in [1, num_qubits - 1]. + """ + if (num_ctrl_qubits == int(num_ctrl_qubits) and + 1 <= num_ctrl_qubits < self.num_qubits): + self._num_ctrl_qubits = num_ctrl_qubits + else: + raise CircuitError('The number of control qubits must be in [1, num_qubits-1]') + @property def ctrl_state(self) -> int: """Return the control state of the gate as a decimal integer.""" diff --git a/qiskit/circuit/library/standard_gates/x.py b/qiskit/circuit/library/standard_gates/x.py --- a/qiskit/circuit/library/standard_gates/x.py +++ b/qiskit/circuit/library/standard_gates/x.py @@ -739,7 +739,7 @@ class MCXGate(ControlledGate): def __new__(cls, num_ctrl_qubits=None, label=None, ctrl_state=None): """Create a new MCX instance. - Depending on the number of controls, this creates an explicit X, CX, CCX, C3X or C4X + Depending on the number of controls, this creates an explicit CX, CCX, C3X or C4X instance or a generic MCX gate. """ # these gates will always be implemented for all modes of the MCX if the number of control @@ -748,8 +748,6 @@ def __new__(cls, num_ctrl_qubits=None, label=None, ctrl_state=None): 1: CXGate, 2: CCXGate } - if num_ctrl_qubits == 0: - return XGate(label=label) if num_ctrl_qubits in explicit.keys(): gate_class = explicit[num_ctrl_qubits] gate = gate_class.__new__(gate_class, label=label, ctrl_state=ctrl_state)
When `num_ctrl_qubits=0`, creating controlled gates will produce an `AttributeError` <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.14.1 - **Python version**: 3.8 - **Operating system**: both Windows and Linux ### What is the current behavior? When `num_ctrl_qubits=0`, creating controlled gates will produce an `AttributeError`. ### Steps to reproduce the problem ``` >>> from qiskit.circuit.library import ZGate >>> ZGate().control(0) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.8/dist-packages/qiskit/circuit/library/standard_gates/z.py", line 100, in control return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state) File "/usr/local/lib/python3.8/dist-packages/qiskit/circuit/gate.py", line 132, in control return add_control(self, num_ctrl_qubits, label, ctrl_state) File "/usr/local/lib/python3.8/dist-packages/qiskit/circuit/add_control.py", line 70, in add_control cgate.base_gate.label = operation.label AttributeError: 'ZGate' object has no attribute 'base_gate' ``` ### What is the expected behavior? Create a gate without control bits successfully. ### Suggested solutions This bug happens because the `control` function in`qiskit/circuit/add_control.py` [directly returns `operation` (the first argument) when `num_ctrl_qubits=0`](https://github.com/Qiskit/qiskit-terra/blob/4f804108bba528aa95e46838235754778e0cb68c/qiskit/circuit/add_control.py#L106), but the `add_control` function [expects that the return value from the `control` function has the `base_gate` attribute](https://github.com/Qiskit/qiskit-terra/blob/4f804108bba528aa95e46838235754778e0cb68c/qiskit/circuit/add_control.py#L70). Either the `control` function should always return a `Gate` object with the `base_gate` attribute, or the `add_control` function should not assume that the `base_gate` attribute always exists.
Another option would be to raise an exception if num_ctrl_qubits=0. Then whenever `control` is called a `ControlledGate` is always returned. Letting num_ctrl_qubits=0 doesn't seem necessary to support since one could just use the original gate. Indeed a `ControlledGate` with 0 control bits does not seem to be necessary to be supported. However having such support would be a little bit more user-friendly. I discovered this bug when I try to implement "multiply amplitude by -1 when all `n` qubits are `|1>`" for arbitrary `n` with the following code: ```python circuit.append(ZGate().control(n - 1), range(n)) ``` The code fails when `n=1`, so I have to deal with this case separately.
2020-06-16T09:57:15Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.8/dist-packages/qiskit/circuit/library/standard_gates/z.py", line 100, in control return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state) File "/usr/local/lib/python3.8/dist-packages/qiskit/circuit/gate.py", line 132, in control return add_control(self, num_ctrl_qubits, label, ctrl_state) File "/usr/local/lib/python3.8/dist-packages/qiskit/circuit/add_control.py", line 70, in add_control cgate.base_gate.label = operation.label AttributeError: 'ZGate' object has no attribute 'base_gate'
1,505
Qiskit/qiskit
Qiskit__qiskit-4596
9a5d8577c10c58e28cd9d139c6a0aa0faf8bd868
diff --git a/qiskit/transpiler/passes/basis/unroll_3q_or_more.py b/qiskit/transpiler/passes/basis/unroll_3q_or_more.py --- a/qiskit/transpiler/passes/basis/unroll_3q_or_more.py +++ b/qiskit/transpiler/passes/basis/unroll_3q_or_more.py @@ -36,6 +36,9 @@ def run(self, dag): # TODO: allow choosing other possible decompositions rule = node.op.definition if not rule: + if rule == []: # empty node + dag.remove_op_node(node) + continue raise QiskitError("Cannot unroll all 3q or more gates. " "No rule to expand instruction %s." % node.op.name)
​Cannot unroll identity matrix of more than 2 qubits when coupling_map is set <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.14.1 - **Python version**: 3.8 - **Operating system**: both Windows and Linux ### What is the current behavior? The `transpile` function fails to unroll an `UnitaryGate` containing an identity matrix of more than 2 qubits when the `backend` argument is set to be a remote quantum computer or the `coupling_map` argument is set. ### Steps to reproduce the problem ``` >>> import numpy as np >>> from qiskit import IBMQ, QuantumCircuit, transpile >>> from qiskit.extensions import UnitaryGate >>> provider = IBMQ.load_account() >>> backend = provider.get_backend('ibmq_london') # arbitrary backend with at least 3 qubits >>> circuit = QuantumCircuit(3) >>> gate = UnitaryGate(np.eye(2 ** 3)) >>> circuit.append(gate, range(3)) <qiskit.circuit.instructionset.InstructionSet object at 0x7ff8b93a60d0> >>> transpile(circuit, backend=backend) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 210, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/usr/local/lib/python3.8/dist-packages/qiskit/tools/parallel.py", line 105, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 306, in _transpile_circuit return pass_manager.run(circuit, callback=transpile_config['callback'], File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 214, in run return self._run_single_circuit(circuits, output_name, callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 277, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 115, in run dag = self._do_pass(pass_, dag, passset.options) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 145, in _do_pass dag = self._run_this_pass(pass_, dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 157, in _run_this_pass new_dag = pass_.run(dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 39, in run raise QiskitError("Cannot unroll all 3q or more gates. " qiskit.exceptions.QiskitError: 'Cannot unroll all 3q or more gates. No rule to expand instruction circuit9_dg.' ``` Notes: - This bug only happens when the `backend` argument is set to be a remote quantum computer or the `coupling_map` argument is set to be a coupling map of a remote quantum computer. Calling `transpile(circuit, basis_gates=['u1', 'u2', 'u3', 'cx', 'id'])` works fine. - This bug only happens when the `UnitaryGate` contains an identity matrix of more than 2 qubits. ### What is the expected behavior? Successfully transpile the circuit.
2020-06-19T19:50:53Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 210, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/usr/local/lib/python3.8/dist-packages/qiskit/tools/parallel.py", line 105, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 306, in _transpile_circuit return pass_manager.run(circuit, callback=transpile_config['callback'], File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 214, in run return self._run_single_circuit(circuits, output_name, callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 277, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 115, in run dag = self._do_pass(pass_, dag, passset.options) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 145, in _do_pass dag = self._run_this_pass(pass_, dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 157, in _run_this_pass new_dag = pass_.run(dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 39, in run raise QiskitError("Cannot unroll all 3q or more gates. " qiskit.exceptions.QiskitError: 'Cannot unroll all 3q or more gates. No rule to expand instruction circuit9_dg.'
1,508
Qiskit/qiskit
Qiskit__qiskit-4597
81603cc65fc558c2f6b2535d29bd42d62bcc62ea
diff --git a/qiskit/visualization/latex.py b/qiskit/visualization/latex.py --- a/qiskit/visualization/latex.py +++ b/qiskit/visualization/latex.py @@ -292,7 +292,7 @@ def _get_image_depth(self): columns = 2 # add extra column if needed - if self.cregbundle and self.ops[0][0].name == "measure": + if self.cregbundle and (self.ops[0][0].name == "measure" or self.ops[0][0].condition): columns += 1 # all gates take up 1 column except from those with labels (ie cu1) @@ -387,7 +387,7 @@ def _build_latex_array(self, aliases=None): column = 1 # Leave a column to display number of classical registers if needed - if self.cregbundle and self.ops[0][0].name == "measure": + if self.cregbundle and (self.ops[0][0].name == "measure" or self.ops[0][0].condition): column += 1 for layer in self.ops: num_cols_used = 1 @@ -423,8 +423,9 @@ def _build_latex_array(self, aliases=None): temp.sort(key=int) bottom = temp[len(pos_array) - 1] gap = pos_cond - bottom - for i in range(self.cregs[if_reg]): - if if_value[i] == '1': + creg_rng = 1 if self.cregbundle else self.cregs[if_reg] + for i in range(creg_rng): + if (if_value[i] == '1' or (self.cregbundle and int(if_value) > 0)): self._latex[pos_cond + i][column] = \ "\\control \\cw \\cwx[-" + str(gap) + "]" gap = 1 @@ -551,8 +552,9 @@ def _build_latex_array(self, aliases=None): self._latex[pos_1][column] = ("\\gate{%s}" % nm) gap = pos_2 - pos_1 - for i in range(self.cregs[if_reg]): - if if_value[i] == '1': + creg_rng = 1 if self.cregbundle else self.cregs[if_reg] + for i in range(creg_rng): + if (if_value[i] == '1' or (self.cregbundle and int(if_value) > 0)): self._latex[pos_2 + i][column] = \ "\\control \\cw \\cwx[-" + str(gap) + "]" gap = 1 @@ -623,8 +625,9 @@ def _build_latex_array(self, aliases=None): bottom = temp[1] gap = pos_3 - bottom - for i in range(self.cregs[if_reg]): - if if_value[i] == '1': + creg_rng = 1 if self.cregbundle else self.cregs[if_reg] + for i in range(creg_rng): + if (if_value[i] == '1' or (self.cregbundle and int(if_value) > 0)): self._latex[pos_3 + i][column] = \ "\\control \\cw \\cwx[-" + str(gap) + "]" gap = 1 @@ -831,8 +834,9 @@ def _build_latex_array(self, aliases=None): bottom = temp[2] gap = pos_4 - bottom - for i in range(self.cregs[if_reg]): - if if_value[i] == '1': + creg_rng = 1 if self.cregbundle else self.cregs[if_reg] + for i in range(creg_rng): + if (if_value[i] == '1' or (self.cregbundle and int(if_value) > 0)): self._latex[pos_4 + i][column] = \ "\\control \\cw \\cwx[-" + str(gap) + "]" gap = 1
Latex drawer fails with conditional and cregbundle=True <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: Current master - **Python version**: 3.8 - **Operating system**: Ubuntu 18.04 ### What is the current behavior? The latex drawer fails on an index out of range when there is a conditional on the last creg and cregbundle=True. This was discovered in test_teleport in test_visualization.py when circuit_drawer sets the default cregbundle to True. ### Steps to reproduce the problem The following code works with cregbundle=False, ``` from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister qr = QuantumRegister(3, 'q') cr = ClassicalRegister(3, 'c') qc = QuantumCircuit(qr, cr) qc.x(qr[2]).c_if(cr, 2) c = qc.draw(output='latex_source', cregbundle=True) ``` and fails with this if cregbundle is True. ``` Traceback (most recent call last): File "test_latex_creg.py", line 6, in <module> c = qc.draw(output='latex_source', cregbundle=True) File "/home/ed/qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 994, in draw return circuit_drawer(self, scale=scale, File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/circuit_visualization.py", line 306, in circuit_drawer return _generate_latex_source(circuit, File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/circuit_visualization.py", line 605, in _generate_latex_source latex = qcimg.latex() File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/latex.py", line 149, in latex self._build_latex_array(aliases) File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/latex.py", line 561, in _build_latex_array self._latex[pos_2 + i][column] = \ IndexError: list index out of range ``` ### What is the expected behavior? ### Suggested solutions
2020-06-22T06:33:09Z
[]
[]
Traceback (most recent call last): File "test_latex_creg.py", line 6, in <module> c = qc.draw(output='latex_source', cregbundle=True) File "/home/ed/qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 994, in draw return circuit_drawer(self, scale=scale, File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/circuit_visualization.py", line 306, in circuit_drawer return _generate_latex_source(circuit, File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/circuit_visualization.py", line 605, in _generate_latex_source latex = qcimg.latex() File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/latex.py", line 149, in latex self._build_latex_array(aliases) File "/home/ed/qiskit/qiskit-terra/qiskit/visualization/latex.py", line 561, in _build_latex_array self._latex[pos_2 + i][column] = \ IndexError: list index out of range
1,509
Qiskit/qiskit
Qiskit__qiskit-4747
f10e312f09342d152865b26c122eb3ee24c89b2a
diff --git a/qiskit/dagcircuit/dagcircuit.py b/qiskit/dagcircuit/dagcircuit.py --- a/qiskit/dagcircuit/dagcircuit.py +++ b/qiskit/dagcircuit/dagcircuit.py @@ -938,10 +938,16 @@ def node_eq(node_self, node_other): return rx.is_isomorphic_node_match(self._multi_graph, other._multi_graph, node_eq) - def topological_nodes(self): + def topological_nodes(self, key=None): """ Yield nodes in topological order. + Args: + key (Callable): A callable which will take a DAGNode object and + return a string sort key. If not specified the + :attr:`~qiskit.dagcircuit.DAGNode.sort_key` attribute will be + used as the sort key for each node. + Returns: generator(DAGOpNode, DAGInNode, or DAGOutNode): node in topological order """ @@ -949,16 +955,27 @@ def topological_nodes(self): def _key(x): return x.sort_key - return iter(rx.lexicographical_topological_sort(self._multi_graph, key=_key)) + if key is None: + key = _key + + return iter(rx.lexicographical_topological_sort(self._multi_graph, key=key)) - def topological_op_nodes(self): + def topological_op_nodes(self, key=None): """ Yield op nodes in topological order. + Allowed to pass in specific key to break ties in top order + + Args: + key (Callable): A callable which will take a DAGNode object and + return a string sort key. If not specified the + :attr:`~qiskit.dagcircuit.DAGNode.sort_key` attribute will be + used as the sort key for each node. + Returns: generator(DAGOpNode): op node in topological order """ - return (nd for nd in self.topological_nodes() if isinstance(nd, DAGOpNode)) + return (nd for nd in self.topological_nodes(key) if isinstance(nd, DAGOpNode)) def substitute_node_with_dag(self, node, input_dag, wires=None): """Replace one node with dag. diff --git a/qiskit/transpiler/passes/__init__.py b/qiskit/transpiler/passes/__init__.py --- a/qiskit/transpiler/passes/__init__.py +++ b/qiskit/transpiler/passes/__init__.py @@ -174,6 +174,7 @@ from .optimization import Optimize1qGates from .optimization import Optimize1qGatesDecomposition from .optimization import Collect2qBlocks +from .optimization import CollectMultiQBlocks from .optimization import ConsolidateBlocks from .optimization import CommutationAnalysis from .optimization import CommutativeCancellation diff --git a/qiskit/transpiler/passes/optimization/__init__.py b/qiskit/transpiler/passes/optimization/__init__.py --- a/qiskit/transpiler/passes/optimization/__init__.py +++ b/qiskit/transpiler/passes/optimization/__init__.py @@ -15,6 +15,7 @@ from .optimize_1q_gates import Optimize1qGates from .optimize_1q_decomposition import Optimize1qGatesDecomposition from .collect_2q_blocks import Collect2qBlocks +from .collect_multiqubit_blocks import CollectMultiQBlocks from .consolidate_blocks import ConsolidateBlocks from .commutation_analysis import CommutationAnalysis from .commutative_cancellation import CommutativeCancellation diff --git a/qiskit/transpiler/passes/optimization/collect_multiqubit_blocks.py b/qiskit/transpiler/passes/optimization/collect_multiqubit_blocks.py new file mode 100644 --- /dev/null +++ b/qiskit/transpiler/passes/optimization/collect_multiqubit_blocks.py @@ -0,0 +1,226 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2017, 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Collect sequences of uninterrupted gates acting on a number of qubits.""" + +from qiskit.transpiler.basepasses import AnalysisPass +from qiskit.circuit import Gate +from qiskit.dagcircuit import DAGOpNode, DAGInNode + + +class CollectMultiQBlocks(AnalysisPass): + """Collect sequences of uninterrupted gates acting on groups of qubits. + max_block_size specifies the maximum number of qubits that can be acted upon + by any single group of gates + + Traverse the DAG and find blocks of gates that act consecutively on + groups of qubits. Write the blocks to propert_set as a list of blocks + of the form: + [[g0, g1, g2], [g4, g5]] + Blocks are reported in a valid topological order. Further, the gates + within each block are also reported in topological order + Some gates may not be present in any block (e.g. if the number + of operands is greater than max_block_size) + + A Disjont Set Union data structure (DSU) is used to maintain blocks as + gates are processed. This data structure points each qubit to a set at all + times and the sets correspond to current blocks. These change over time + and the data structure allows these changes to be done quickly. + """ + + def __init__(self, max_block_size=2): + super().__init__() + self.parent = {} # parent array for the union + + # the dicts belowed are keyed by a qubit signifying the root of a + # set in the DSU data structure + self.bit_groups = {} # current groups of bits stored at top of trees + self.gate_groups = {} # current gate lists for the groups + + self.max_block_size = max_block_size # maximum block size + + def find_set(self, index): + """DSU function for finding root of set of items + If my parent is myself, I am the root. Otherwise we recursively + find the root for my parent. After that, we assign my parent to be + my root, saving recursion in the future. + """ + + if index not in self.parent: + self.parent[index] = index + self.bit_groups[index] = [index] + self.gate_groups[index] = [] + if self.parent[index] == index: + return index + self.parent[index] = self.find_set(self.parent[index]) + return self.parent[index] + + def union_set(self, set1, set2): + """DSU function for unioning two sets together + Find the roots of each set. Then assign one to have the other + as its parent, thus liking the sets. + Merges smaller set into larger set in order to have better runtime + """ + + set1 = self.find_set(set1) + set2 = self.find_set(set2) + if set1 == set2: + return + if len(self.gate_groups[set1]) < len(self.gate_groups[set2]): + set1, set2 = set2, set1 + self.parent[set2] = set1 + self.gate_groups[set1].extend(self.gate_groups[set2]) + self.bit_groups[set1].extend(self.bit_groups[set2]) + self.gate_groups[set2].clear() + self.bit_groups[set2].clear() + + def run(self, dag): + """Run the CollectMultiQBlocks pass on `dag`. + + The blocks contain "op" nodes in topological sort order + such that all gates in a block act on the same set of + qubits and are adjacent in the circuit. + + The blocks are built by examining predecessors and successors of + "cx" gates in the circuit. u1, u2, u3, cx, id gates will be included. + + After the execution, ``property_set['block_list']`` is set to + a list of tuples of ``DAGNode`` objects + """ + + self.parent = {} # reset all variables on run + self.bit_groups = {} + self.gate_groups = {} + + block_list = [] + + def collect_key(x): + """special key function for topological ordering. + Heuristic for this is to push all gates involving measurement + or barriers, etc. as far back as possible (because they force + blocks to end). After that, we process gates in order of lowest + number of qubits acted on to largest number of qubits acted on + because these have less chance of increasing the size of blocks + The key also processes all the non operation notes first so that + input nodes do not mess with the top sort of op nodes + """ + if isinstance(x, DAGInNode): + return "a" + if not isinstance(x, DAGOpNode): + return "d" + if isinstance(x.op, Gate): + if x.op.is_parameterized() or x.op.condition is not None: + return "c" + return "b" + chr(ord("a") + len(x.qargs)) + return "d" + + op_nodes = dag.topological_op_nodes(key=collect_key) + qubit_indices = {bit: index for index, bit in enumerate(dag.qubits)} + + for nd in op_nodes: + can_process = True + makes_too_big = False + + # check if the node is a gate and if it is parameterized + if ( + nd.op.condition is not None + or nd.op.is_parameterized() + or not isinstance(nd.op, Gate) + ): + can_process = False + + cur_qubits = {qubit_indices[bit] for bit in nd.qargs} + + if can_process: + # if the gate is valid, check if grouping up the bits + # in the gate would fit within our desired max size + c_tops = set() + for bit in cur_qubits: + c_tops.add(self.find_set(bit)) + tot_size = 0 + for group in c_tops: + tot_size += len(self.bit_groups[group]) + if tot_size > self.max_block_size: + makes_too_big = True + + if not can_process: + # resolve the case where we cannot process this node + for bit in cur_qubits: + # create a gate out of me + bit = self.find_set(bit) + if len(self.gate_groups[bit]) == 0: + continue + block_list.append(self.gate_groups[bit][:]) + cur_set = set(self.bit_groups[bit]) + for v in cur_set: + # reset this bit + self.parent[v] = v + self.bit_groups[v] = [v] + self.gate_groups[v] = [] + + if makes_too_big: + # adding in all of the new qubits would make the group too big + # we must block off sub portions of the groups until the new + # group would no longer be too big + savings = {} + tot_size = 0 + for bit in cur_qubits: + top = self.find_set(bit) + if top in savings.keys(): + savings[top] = savings[top] - 1 + else: + savings[top] = len(self.bit_groups[top]) - 1 + tot_size += len(self.bit_groups[top]) + slist = [] + for item, value in savings.items(): + slist.append((value, item)) + slist.sort(reverse=True) + savings_need = tot_size - self.max_block_size + for item in slist: + # remove groups until the size created would be acceptable + # start with blocking out the group that would decrease + # the new size the most. This heuristic for which blocks we + # create does not necessarily give the optimal blocking. Other + # heuristics may be worth considering + if savings_need > 0: + savings_need = savings_need - item[0] + if len(self.gate_groups[item[1]]) >= 1: + block_list.append(self.gate_groups[item[1]][:]) + cur_set = set(self.bit_groups[item[1]]) + for v in cur_set: + self.parent[v] = v + self.bit_groups[v] = [v] + self.gate_groups[v] = [] + + if can_process: + # if the operation is a gate, either skip it if it is too large + # or group up all of the qubits involved in the gate + if len(cur_qubits) > self.max_block_size: + # gates acting on more qubits than max_block_size cannot + # be a part of any block and thus we skip them here. + # we have already finalized the blocks involving the gate's + # qubits in the above makes_too_big block + continue # unable to be part of a group + prev = -1 + for bit in cur_qubits: + if prev != -1: + self.union_set(prev, bit) + prev = bit + self.gate_groups[self.find_set(prev)].append(nd) + # need to turn all groups that still exist into their own blocks + for index in self.parent: + if self.parent[index] == index and len(self.gate_groups[index]) != 0: + block_list.append(self.gate_groups[index][:]) + + self.property_set["block_list"] = block_list + + return dag
​Cannot unroll identity matrix of more than 2 qubits when coupling_map is set <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.14.1 - **Python version**: 3.8 - **Operating system**: both Windows and Linux ### What is the current behavior? The `transpile` function fails to unroll an `UnitaryGate` containing an identity matrix of more than 2 qubits when the `backend` argument is set to be a remote quantum computer or the `coupling_map` argument is set. ### Steps to reproduce the problem ``` >>> import numpy as np >>> from qiskit import IBMQ, QuantumCircuit, transpile >>> from qiskit.extensions import UnitaryGate >>> provider = IBMQ.load_account() >>> backend = provider.get_backend('ibmq_london') # arbitrary backend with at least 3 qubits >>> circuit = QuantumCircuit(3) >>> gate = UnitaryGate(np.eye(2 ** 3)) >>> circuit.append(gate, range(3)) <qiskit.circuit.instructionset.InstructionSet object at 0x7ff8b93a60d0> >>> transpile(circuit, backend=backend) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 210, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/usr/local/lib/python3.8/dist-packages/qiskit/tools/parallel.py", line 105, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 306, in _transpile_circuit return pass_manager.run(circuit, callback=transpile_config['callback'], File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 214, in run return self._run_single_circuit(circuits, output_name, callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 277, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 115, in run dag = self._do_pass(pass_, dag, passset.options) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 145, in _do_pass dag = self._run_this_pass(pass_, dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 157, in _run_this_pass new_dag = pass_.run(dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 39, in run raise QiskitError("Cannot unroll all 3q or more gates. " qiskit.exceptions.QiskitError: 'Cannot unroll all 3q or more gates. No rule to expand instruction circuit9_dg.' ``` Notes: - This bug only happens when the `backend` argument is set to be a remote quantum computer or the `coupling_map` argument is set to be a coupling map of a remote quantum computer. Calling `transpile(circuit, basis_gates=['u1', 'u2', 'u3', 'cx', 'id'])` works fine. - This bug only happens when the `UnitaryGate` contains an identity matrix of more than 2 qubits. ### What is the expected behavior? Successfully transpile the circuit.
2020-07-17T17:23:30Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 210, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/usr/local/lib/python3.8/dist-packages/qiskit/tools/parallel.py", line 105, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/usr/local/lib/python3.8/dist-packages/qiskit/compiler/transpile.py", line 306, in _transpile_circuit return pass_manager.run(circuit, callback=transpile_config['callback'], File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 214, in run return self._run_single_circuit(circuits, output_name, callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passmanager.py", line 277, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 115, in run dag = self._do_pass(pass_, dag, passset.options) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 145, in _do_pass dag = self._run_this_pass(pass_, dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/runningpassmanager.py", line 157, in _run_this_pass new_dag = pass_.run(dag) File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 54, in run decomposition = self.run(decomposition) # recursively unroll File "/usr/local/lib/python3.8/dist-packages/qiskit/transpiler/passes/basis/unroll_3q_or_more.py", line 39, in run raise QiskitError("Cannot unroll all 3q or more gates. " qiskit.exceptions.QiskitError: 'Cannot unroll all 3q or more gates. No rule to expand instruction circuit9_dg.'
1,531
Qiskit/qiskit
Qiskit__qiskit-4840
1468ffb55a70c949147bac3cac052bd483b801bd
diff --git a/qiskit/transpiler/passes/optimization/consolidate_blocks.py b/qiskit/transpiler/passes/optimization/consolidate_blocks.py --- a/qiskit/transpiler/passes/optimization/consolidate_blocks.py +++ b/qiskit/transpiler/passes/optimization/consolidate_blocks.py @@ -60,7 +60,10 @@ def __init__(self, self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate) elif basis_gates is not None: kak_basis_gate = unitary_synthesis._choose_kak_gate(basis_gates) - self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate) + if kak_basis_gate is not None: + self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate) + else: + self.decomposer = None else: self.decomposer = TwoQubitBasisDecomposer(CXGate())
Transpiling 1q circuit at optimization level 3 breaks in 1q basis <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 5bdd12db84b2ef13292d26cd6750fa8fabd61bb3 - **Python version**: 3.7.5 - **Operating system**: MacOs Catalina ### What is the current behavior? The following code breaks if `basis_gates=['u3']` and `optimization_level=3`. It works for `basis_gates=['u3','cx']` or if `optimization_level<=2`. ```python qc = QuantumCircuit(1) qc.x(0) transpile(qc, basis_gates=['u3'], optimization_level=3) ``` Edit: changed to a simpler example. ### Traceback ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/compiler/transpile.py", line 214, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/tools/parallel.py", line 108, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/compiler/transpile.py", line 304, in _transpile_circuit pass_manager = level_3_pass_manager(pass_manager_config) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/transpiler/preset_passmanagers/level3.py", line 176, in level_3_pass_manager ConsolidateBlocks(basis_gates=basis_gates), File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/transpiler/basepasses.py", line 31, in __call__ pass_instance = type.__call__(cls, *args, **kwargs) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/transpiler/passes/optimization/consolidate_blocks.py", line 63, in __init__ self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/quantum_info/synthesis/two_qubit_decompose.py", line 293, in __init__ basis = self.basis = TwoQubitWeylDecomposition(Operator(gate).data) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/quantum_info/operators/operator.py", line 103, in __init__ raise QiskitError("Invalid input data format for Operator") qiskit.exceptions.QiskitError: 'Invalid input data format for Operator' ``` ### What is the expected behavior? Transpile with only a single qubit gate, if possible (or throw a meaningful error).
i got expected behaviour , try updating or moving to another low version of terra or other On stable it still works, right, but we should fix it on the master version (ideally before the release). Which versions did you try for transpiling? I'm running on 5bdd12db84b2ef13292d26cd6750fa8fabd61bb3 (version of July 30th).
2020-07-31T20:17:47Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/compiler/transpile.py", line 214, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/tools/parallel.py", line 108, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/compiler/transpile.py", line 304, in _transpile_circuit pass_manager = level_3_pass_manager(pass_manager_config) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/transpiler/preset_passmanagers/level3.py", line 176, in level_3_pass_manager ConsolidateBlocks(basis_gates=basis_gates), File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/transpiler/basepasses.py", line 31, in __call__ pass_instance = type.__call__(cls, *args, **kwargs) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/transpiler/passes/optimization/consolidate_blocks.py", line 63, in __init__ self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/quantum_info/synthesis/two_qubit_decompose.py", line 293, in __init__ basis = self.basis = TwoQubitWeylDecomposition(Operator(gate).data) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/quantum_info/operators/operator.py", line 103, in __init__ raise QiskitError("Invalid input data format for Operator") qiskit.exceptions.QiskitError: 'Invalid input data format for Operator'
1,547
Qiskit/qiskit
Qiskit__qiskit-4887
5edca05b5188373726f3cc667e1f05bb067048a3
diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py --- a/qiskit/circuit/quantumcircuit.py +++ b/qiskit/circuit/quantumcircuit.py @@ -25,6 +25,7 @@ from qiskit.util import is_main_process from qiskit.circuit.instruction import Instruction from qiskit.circuit.gate import Gate +from qiskit.circuit.parameter import Parameter from qiskit.qasm.qasm import Qasm from qiskit.circuit.exceptions import CircuitError from .parameterexpression import ParameterExpression @@ -823,6 +824,12 @@ def append(self, instruction, qargs=None, cargs=None): if not isinstance(instruction, Instruction) and hasattr(instruction, "to_instruction"): instruction = instruction.to_instruction() + # Make copy of parameterized gate instances + if hasattr(instruction, 'params'): + is_parameter = any([isinstance(param, Parameter) for param in instruction.params]) + if is_parameter: + instruction = copy.deepcopy(instruction) + expanded_qargs = [self.qbit_argument_conversion(qarg) for qarg in qargs or []] expanded_cargs = [self.cbit_argument_conversion(carg) for carg in cargs or []]
Re-using parameterized gate instances breaks upon in-place parameter assigning <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: e03cb1b7fd - **Python version**: 3.7.7 - **Operating system**: macOS catalina ### What is the current behavior? When placing the same parameterized gate into multiple circuits and binding the parameter value in place, the parameter in the gate instance is updated but the circuits containing this gate still have the old parameter in their parameter table (at least I think that's what's happening). This leads to issues as: ```python >>> from qiskit.circuit.library import RXGate >>> from qiskit.circuit import QuantumCircuit, Parameter >>> a, b = Parameter('a'), Parameter('b') >>> rx = RXGate(a) >>> qc0, qc1 = QuantumCircuit(1), QuantumCircuit(1) >>> qc0.append(rx, [0]) >>> qc1.append(rx, [0]) >>> qc0.assign_parameters({a: b}, inplace=True) >>> qc1.to_gate() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 836, in to_gate return circuit_to_gate(self, parameter_map, label=label) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/converters/circuit_to_gate.py", line 88, in circuit_to_gate target = circuit.assign_parameters(parameter_dict, inplace=False) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1669, in assign_parameters bound_circuit._substitute_parameter(parameter, value) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1735, in _substitute_parameter new_param = instr.params[param_index].subs({old_parameter: new_parameter_expr}) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/parameter.py", line 55, in subs return parameter_map[self] KeyError: Parameter(b) ``` I don't think this is very pressing but definitely should be fixed in the future. ### Suggested solutions * Copy the gate parameters upon appending, or * Make the circuit not explicitly store the parameters since they are already in the gate These options have different behaviour, in the second one can change the parameter of the gates "from the outside" by modifying the gate instance. Probably the safer behaviour is therefore the first option.
2020-08-07T01:40:10Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 836, in to_gate return circuit_to_gate(self, parameter_map, label=label) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/converters/circuit_to_gate.py", line 88, in circuit_to_gate target = circuit.assign_parameters(parameter_dict, inplace=False) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1669, in assign_parameters bound_circuit._substitute_parameter(parameter, value) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1735, in _substitute_parameter new_param = instr.params[param_index].subs({old_parameter: new_parameter_expr}) File "/Users/jul/Work/Qiskit/qiskit-terra/qiskit/circuit/parameter.py", line 55, in subs return parameter_map[self] KeyError: Parameter(b)
1,557
Qiskit/qiskit
Qiskit__qiskit-4955
6220b8cddebd1cd24c2b1eef1cdf258979649550
diff --git a/qiskit/extensions/unitary.py b/qiskit/extensions/unitary.py --- a/qiskit/extensions/unitary.py +++ b/qiskit/extensions/unitary.py @@ -19,7 +19,7 @@ from qiskit.circuit import Gate, ControlledGate from qiskit.circuit import QuantumCircuit -from qiskit.circuit import QuantumRegister +from qiskit.circuit import QuantumRegister, Qubit from qiskit.circuit.exceptions import CircuitError from qiskit.circuit._utils import _compute_control_matrix from qiskit.circuit.library.standard_gates import U3Gate @@ -213,9 +213,14 @@ def validate_parameter(self, parameter): def unitary(self, obj, qubits, label=None): """Apply unitary gate to q.""" + gate = UnitaryGate(obj, label=label) if isinstance(qubits, QuantumRegister): qubits = qubits[:] - return self.append(UnitaryGate(obj, label=label), qubits, []) + # for single qubit unitary gate, allow an 'int' or a 'list of ints' as qubits. + if gate.num_qubits == 1: + if isinstance(qubits, (int, Qubit)) or len(qubits) > 1: + qubits = [qubits] + return self.append(gate, qubits, []) QuantumCircuit.unitary = unitary
QuantumCircuit.unitary doesn't accept single integer qargs <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: master @ 251930a - **Python version**: 3.5 - **Operating system**: osx ### What is the current behavior? ``` >>> import qiskit as qk >>> qc = qk.QuantumCircuit(1) >>> qc.x(0) >>> qc.barrier(0) >>> qc.unitary([[0,1], [1,0]], 0) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/extensions/unitary.py", line 211, in unitary return self.append(UnitaryGate(obj, label=label), qubits, []) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 796, in append for (qarg, carg) in instruction.broadcast_arguments(expanded_qargs, expanded_cargs): File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/gate.py", line 212, in broadcast_arguments 'The amount of qubit/clbit arguments does not match the gate expectation.') qiskit.circuit.exceptions.CircuitError: 'The amount of qubit/clbit arguments does not match the gate expectation.' >>> qc.unitary([[0,1], [1,0]], [0]) # This works ``` ### Steps to reproduce the problem ### What is the expected behavior? ### Suggested solutions
2020-08-20T10:37:25Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/extensions/unitary.py", line 211, in unitary return self.append(UnitaryGate(obj, label=label), qubits, []) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 796, in append for (qarg, carg) in instruction.broadcast_arguments(expanded_qargs, expanded_cargs): File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/circuit/gate.py", line 212, in broadcast_arguments 'The amount of qubit/clbit arguments does not match the gate expectation.') qiskit.circuit.exceptions.CircuitError: 'The amount of qubit/clbit arguments does not match the gate expectation.'
1,569
Qiskit/qiskit
Qiskit__qiskit-5020
9165f396a124fdc43e5288a7c9f3c2f70018ad66
diff --git a/qiskit/circuit/instruction.py b/qiskit/circuit/instruction.py --- a/qiskit/circuit/instruction.py +++ b/qiskit/circuit/instruction.py @@ -267,7 +267,20 @@ def inverse(self): """ if self.definition is None: raise CircuitError("inverse() not implemented for %s." % self.name) - inverse_gate = self.copy(name=self.name + '_dg') + + from qiskit.circuit import QuantumCircuit, Gate # pylint: disable=cyclic-import + if self.num_clbits: + inverse_gate = Instruction(name=self.name + '_dg', + num_qubits=self.num_qubits, + num_clbits=self.num_clbits, + params=self.params.copy()) + + else: + inverse_gate = Gate(name=self.name + '_dg', + num_qubits=self.num_qubits, + params=self.params.copy()) + + inverse_gate.definition = QuantumCircuit(*self.definition.qregs, *self.definition.cregs) inverse_gate.definition._data = [(inst.inverse(), qargs, cargs) for inst, qargs, cargs in reversed(self._definition)] diff --git a/qiskit/circuit/library/standard_gates/sx.py b/qiskit/circuit/library/standard_gates/sx.py --- a/qiskit/circuit/library/standard_gates/sx.py +++ b/qiskit/circuit/library/standard_gates/sx.py @@ -76,6 +76,10 @@ def _define(self): qc.data = rules self.definition = qc + def inverse(self): + """Return inverse SX gate (i.e. SXdg).""" + return SXdgGate() + def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None): """Return a (multi-)controlled-SX gate. @@ -150,6 +154,10 @@ def _define(self): qc.data = rules self.definition = qc + def inverse(self): + """Return inverse SXdg gate (i.e. SX).""" + return SXGate() + def to_matrix(self): """Return a numpy.array for the SXdg gate.""" return numpy.array([[1 - 1j, 1 + 1j], diff --git a/qiskit/circuit/library/standard_gates/x.py b/qiskit/circuit/library/standard_gates/x.py --- a/qiskit/circuit/library/standard_gates/x.py +++ b/qiskit/circuit/library/standard_gates/x.py @@ -515,7 +515,7 @@ def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None): def inverse(self): """Invert this gate. The C3X is its own inverse.""" - return C3XGate(angle=self._angle, ctrl_state=self.ctrl_state) + return C3XGate(angle=-self._angle, ctrl_state=self.ctrl_state) # This matrix is only correct if the angle is pi/4 # def to_matrix(self): @@ -747,6 +747,10 @@ def __init__(self, num_ctrl_qubits, label=None, ctrl_state=None, _name='mcx'): num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state, base_gate=XGate()) + def inverse(self): + """Invert this gate. The MCX is its own inverse.""" + return MCXGate(num_ctrl_qubits=self.num_ctrl_qubits, ctrl_state=self.ctrl_state) + @staticmethod def get_num_ancilla_qubits(num_ctrl_qubits, mode='noancilla'): """Get the number of required ancilla qubits without instantiating the class. @@ -806,6 +810,10 @@ class MCXGrayCode(MCXGate): def __init__(self, num_ctrl_qubits, label=None, ctrl_state=None): super().__init__(num_ctrl_qubits, label=label, ctrl_state=ctrl_state, _name='mcx_gray') + def inverse(self): + """Invert this gate. The MCX is its own inverse.""" + return MCXGrayCode(num_ctrl_qubits=self.num_ctrl_qubits, ctrl_state=self.ctrl_state) + def _define(self): """Define the MCX gate using the Gray code.""" # pylint: disable=cyclic-import @@ -835,6 +843,10 @@ def get_num_ancilla_qubits(num_ctrl_qubits, mode='recursion'): """Get the number of required ancilla qubits.""" return MCXGate.get_num_ancilla_qubits(num_ctrl_qubits, mode) + def inverse(self): + """Invert this gate. The MCX is its own inverse.""" + return MCXRecursive(num_ctrl_qubits=self.num_ctrl_qubits, ctrl_state=self.ctrl_state) + def _define(self): """Define the MCX gate using recursion.""" # pylint: disable=cyclic-import @@ -891,6 +903,12 @@ def __init__(self, num_ctrl_qubits, dirty_ancillas=False, label=None, ctrl_state super().__init__(num_ctrl_qubits, label=label, ctrl_state=ctrl_state, _name='mcx_vchain') self._dirty_ancillas = dirty_ancillas + def inverse(self): + """Invert this gate. The MCX is its own inverse.""" + return MCXVChain(num_ctrl_qubits=self.num_ctrl_qubits, + dirty_ancillas=self._dirty_ancillas, + ctrl_state=self.ctrl_state) + @staticmethod def get_num_ancilla_qubits(num_ctrl_qubits, mode='v-chain'): """Get the number of required ancilla qubits."""
Inverse of MCX gates generated without qubits for ancillae <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: master @ 7d79ab0 - **Python version**: 3.5 - **Operating system**: osx ### What is the current behavior? Inverting an mcx gate (with ancilla) falls back to `ControlledGate.inverse` which builds the definition of the inverse gate without ancilla. This leads to an error when transpiling because the inverse gate uses fewer qubits than the original. ### Steps to reproduce the problem ``` >>> import qiskit as qk >>> qc = qk.QuantumCircuit(5) >>> qc.mcx([0,1,2],4,[3], mode='v-chain') >>> qk.transpile(qc, basis_gates=['u3', 'cx']).count_ops() OrderedDict([('u3', 16), ('cx', 12)]) >>> qk.transpile(qc.inverse(), basis_gates=['u3', 'cx']).count_ops() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 214, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 106, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 315, in _transpile_circuit output_name=transpile_config['output_name']) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 212, in run return self._run_single_circuit(circuits, output_name, callback) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 275, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 113, in run dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 143, in _do_pass dag = self._run_this_pass(pass_, dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 155, in _run_this_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/basis/unroll_custom_definitions.py", line 88, in run dag.substitute_node_with_dag(node, unrolled_dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/dagcircuit/dagcircuit.py", line 842, in substitute_node_with_dag self._check_wires_list(wires, node) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/dagcircuit/dagcircuit.py", line 716, in _check_wires_list % (wire_tot, len(wires))) qiskit.dagcircuit.exceptions.DAGCircuitError: 'expected 5 wires, got 4' ``` ``` >>> print(MCXVChain(num_ctrl_qubits=3).definition) ┌───────┐ ┌───────┐ q_0: ┤0 ├─────┤0 ├ │ │ │ │ q_1: ┤1 ├─────┤1 ├ │ │ │ │ q_2: ┤ RCCX ├──■──┤ RCCX ├ │ │┌─┴─┐│ │ q_3: ┤ ├┤ X ├┤ ├ │ │└─┬─┘│ │ q_4: ┤2 ├──■──┤2 ├ └───────┘ └───────┘ >>> print(MCXVChain(num_ctrl_qubits=3).inverse().definition) q_0: ──■── │ q_1: ──■── │ q_2: ──■── ┌─┴─┐ q_3: ┤ X ├ └───┘ ``` ### Suggested solutions Adding something to `class MCXGate` like ``` def inverse(self): return self.copy() ```
2020-09-02T21:06:10Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 214, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/tools/parallel.py", line 106, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/compiler/transpile.py", line 315, in _transpile_circuit output_name=transpile_config['output_name']) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 212, in run return self._run_single_circuit(circuits, output_name, callback) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passmanager.py", line 275, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 113, in run dag = self._do_pass(pass_, dag, passset.options) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 143, in _do_pass dag = self._run_this_pass(pass_, dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 155, in _run_this_pass new_dag = pass_.run(dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/transpiler/passes/basis/unroll_custom_definitions.py", line 88, in run dag.substitute_node_with_dag(node, unrolled_dag) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/dagcircuit/dagcircuit.py", line 842, in substitute_node_with_dag self._check_wires_list(wires, node) File "/Users/kevin.krsulichibm.com/q/qiskit-terra/qiskit/dagcircuit/dagcircuit.py", line 716, in _check_wires_list % (wire_tot, len(wires))) qiskit.dagcircuit.exceptions.DAGCircuitError: 'expected 5 wires, got 4'
1,578
Qiskit/qiskit
Qiskit__qiskit-5051
2a2b504f97e555571ec952d057aedcb8c344b1bb
diff --git a/qiskit/execute.py b/qiskit/execute.py --- a/qiskit/execute.py +++ b/qiskit/execute.py @@ -243,8 +243,7 @@ def execute(experiments, backend, coupling_map=coupling_map, seed_transpiler=seed_transpiler, backend_properties=backend_properties, - initial_layout=initial_layout, - backend=backend) + initial_layout=initial_layout) experiments = pass_manager.run(experiments) else: # transpiling the circuits using given transpile options
Cannot use `execute` with a `PassManager` <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.15.1 - **Python version**: 3.8.5 - **Operating system**: Ubuntu 18.04 ### What is the current behavior? Providing any `PassManager` to the `execute` method will throw an exception as it fails the `_check_conflicting_arguments` call - this method fails if a backend (a _required_ argument) is provided. ### Steps to reproduce the problem ``` from qiskit.transpiler import PassManager from qiskit import QuantumCircuit, execute from qiskit.providers.aer import Aer qc = QuantumCircuit(2) qc.h(0) qc.measure_all() b = Aer.get_backend('qasm_simulator') job = execute(qc, b, pass_manager = PassManager()) ``` ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/will/miniconda3/envs/dev/lib/python3.8/site-packages/qiskit/execute.py", line 243, in execute _check_conflicting_argument(optimization_level=optimization_level, File "/home/will/miniconda3/envs/dev/lib/python3.8/site-packages/qiskit/execute.py", line 302, in _check_conflicting_argument raise QiskitError("The parameters pass_manager conflicts with the following " qiskit.exceptions.QiskitError: 'The parameters pass_manager conflicts with the following parameter(s): backend.' ``` ### What is the expected behavior? This should not throw an exception since there are no conflicts here. ### Suggested solutions Remove `backend` from the list of arguments checked for conflicts. Or, alternatively, deprecate the use of the `pass_manager` argument (it is currently unusable anyway) to encourage users to use `PassManager.run()` and `assemble` instead.
Can I help with this? I think it is, indeed, a bug. I will remove `backend` as conflicting. > it is currently unusable anyway. I'm curious. Why?
2020-09-10T01:12:26Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/will/miniconda3/envs/dev/lib/python3.8/site-packages/qiskit/execute.py", line 243, in execute _check_conflicting_argument(optimization_level=optimization_level, File "/home/will/miniconda3/envs/dev/lib/python3.8/site-packages/qiskit/execute.py", line 302, in _check_conflicting_argument raise QiskitError("The parameters pass_manager conflicts with the following " qiskit.exceptions.QiskitError: 'The parameters pass_manager conflicts with the following parameter(s): backend.'
1,585
Qiskit/qiskit
Qiskit__qiskit-5060
1c89cc59c8ec45311748e3f4c37e843068155d51
diff --git a/qiskit/circuit/library/template_circuits/__init__.py b/qiskit/circuit/library/template_circuits/__init__.py new file mode 100644 --- /dev/null +++ b/qiskit/circuit/library/template_circuits/__init__.py @@ -0,0 +1,11 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2017, 2020. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals.
Aqua build fails: `No module named 'qiskit.circuit.library.template_circuits'` <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: Latest sources after https://github.com/Qiskit/qiskit-terra/commit/1c89cc59c8ec45311748e3f4c37e843068155d51 - **Python version**: any - **Operating system**: any ### What is the current behavior? 1. `from qiskit.circuit.library import NLocal` fails. 2. The aqua build fails: ``` Failed to import test module: test.optimization Traceback (most recent call last): File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/unittest/loader.py", line 470, in _find_test_path package = self._get_module_from_name(name) File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/unittest/loader.py", line 377, in _get_module_from_name __import__(name) File "/home/runner/work/qiskit-aqua/qiskit-aqua/test/__init__.py", line 15, in <module> from .base_test_case import QiskitBaseTestCase File "/home/runner/work/qiskit-aqua/qiskit-aqua/test/base_test_case.py", line 23, in <module> from qiskit.aqua import set_logging_level, QiskitLogDomains File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/qiskit/__init__.py", line 41, in <module> import qiskit.extensions File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/qiskit/extensions/__init__.py", line 47, in <module> from qiskit.circuit.library.standard_gates import * File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/qiskit/circuit/library/__init__.py", line 245, in <module> from .template_circuits import * ModuleNotFoundError: No module named 'qiskit.circuit.library.template_circuits' ``` ### Steps to reproduce the problem Logs from the aqua build: https://github.com/Qiskit/qiskit-aqua/runs/1100653018 ### What is the expected behavior? No errors. ### Suggested solutions There's missing `qiskit/circuit/library/template_circuits/__init__.py`
2020-09-11T14:15:39Z
[]
[]
Traceback (most recent call last): File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/unittest/loader.py", line 470, in _find_test_path package = self._get_module_from_name(name) File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/unittest/loader.py", line 377, in _get_module_from_name __import__(name) File "/home/runner/work/qiskit-aqua/qiskit-aqua/test/__init__.py", line 15, in <module> from .base_test_case import QiskitBaseTestCase File "/home/runner/work/qiskit-aqua/qiskit-aqua/test/base_test_case.py", line 23, in <module> from qiskit.aqua import set_logging_level, QiskitLogDomains File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/qiskit/__init__.py", line 41, in <module> import qiskit.extensions File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/qiskit/extensions/__init__.py", line 47, in <module> from qiskit.circuit.library.standard_gates import * File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/qiskit/circuit/library/__init__.py", line 245, in <module> from .template_circuits import * ModuleNotFoundError: No module named 'qiskit.circuit.library.template_circuits'
1,587
Qiskit/qiskit
Qiskit__qiskit-5286
70f476bba8041b5e2ca4df923f3db829f60876e3
diff --git a/qiskit/quantum_info/states/statevector.py b/qiskit/quantum_info/states/statevector.py --- a/qiskit/quantum_info/states/statevector.py +++ b/qiskit/quantum_info/states/statevector.py @@ -677,6 +677,7 @@ def _evolve_instruction(statevec, obj, qargs=None): obj.name, type(obj.definition))) if obj.definition.global_phase: statevec._data *= np.exp(1j * float(obj.definition.global_phase)) + qubits = {qubit: i for i, qubit in enumerate(obj.definition.qubits)} for instr, qregs, cregs in obj.definition: if cregs: raise QiskitError( @@ -684,8 +685,8 @@ def _evolve_instruction(statevec, obj, qargs=None): instr.name)) # Get the integer position of the flat register if qargs is None: - new_qargs = [tup.index for tup in qregs] + new_qargs = [qubits[tup] for tup in qregs] else: - new_qargs = [qargs[tup.index] for tup in qregs] + new_qargs = [qargs[qubits[tup]] for tup in qregs] Statevector._evolve_instruction(statevec, instr, qargs=new_qargs) return statevec
Statevector.from_instruction fails for custom controlled gates <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: latest - **Python version**: - **Operating system**: ### What is the current behavior? ```python import numpy as np from qiskit import QuantumCircuit from qiskit.quantum_info import Statevector ## Create 7mod15 gate N = 15 m = int(np.ceil(np.log2(N))) U_qc = QuantumCircuit(m) U_qc.x(range(m)) U_qc.swap(1, 2) U_qc.swap(2, 3) U_qc.swap(0, 3) U = U_qc.to_gate() U.name ='{}Mod{}'.format(7, N) U_cntrl = U.control() qc = QuantumCircuit(5) qc.append(U_cntrl, range(5)) Statevector.from_instruction(qc) ``` gives Traceback (most recent call last): File "<ipython-input-127-b464b6ab1295>", line 1, in <module> Statevector.from_instruction(qc) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 529, in from_instruction return Statevector._evolve_instruction(vec, instruction) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 690, in _evolve_instruction Statevector._evolve_instruction(statevec, instr, qargs=new_qargs) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 690, in _evolve_instruction Statevector._evolve_instruction(statevec, instr, qargs=new_qargs) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 661, in _evolve_instruction return Statevector._evolve_operator(statevec, Operator(mat), qargs=qargs) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 642, in _evolve_operator np.reshape(statevec.data, pre_tensor_shape), axes), contract_shape) File "<__array_function__ internals>", line 6, in transpose File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 653, in transpose return _wrapfunc(a, 'transpose', axes) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 58, in _wrapfunc return bound(*args, **kwds) ValueError: axes don't match array ### Steps to reproduce the problem ### What is the expected behavior? ### Suggested solutions
2020-10-23T21:36:20Z
[]
[]
Traceback (most recent call last): File "<ipython-input-127-b464b6ab1295>", line 1, in <module> Statevector.from_instruction(qc) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 529, in from_instruction return Statevector._evolve_instruction(vec, instruction) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 690, in _evolve_instruction Statevector._evolve_instruction(statevec, instr, qargs=new_qargs) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 690, in _evolve_instruction Statevector._evolve_instruction(statevec, instr, qargs=new_qargs) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 661, in _evolve_instruction return Statevector._evolve_operator(statevec, Operator(mat), qargs=qargs) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/quantum_info/states/statevector.py", line 642, in _evolve_operator np.reshape(statevec.data, pre_tensor_shape), axes), contract_shape) File "<__array_function__ internals>", line 6, in transpose File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 653, in transpose return _wrapfunc(a, 'transpose', axes) File "/opt/miniconda3/envs/qiskit/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 58, in _wrapfunc return bound(*args, **kwds) ValueError: axes don't match array
1,614
Qiskit/qiskit
Qiskit__qiskit-5554
21e2898c711790a6200dc90ee0952c90471dd711
diff --git a/qiskit/quantum_info/synthesis/one_qubit_decompose.py b/qiskit/quantum_info/synthesis/one_qubit_decompose.py --- a/qiskit/quantum_info/synthesis/one_qubit_decompose.py +++ b/qiskit/quantum_info/synthesis/one_qubit_decompose.py @@ -19,8 +19,8 @@ import scipy.linalg as la from qiskit.circuit.quantumcircuit import QuantumCircuit -from qiskit.circuit.library.standard_gates import (PhaseGate, U3Gate, - U1Gate, RXGate, RYGate, +from qiskit.circuit.library.standard_gates import (UGate, PhaseGate, U3Gate, + U2Gate, U1Gate, RXGate, RYGate, RZGate, RGate, SXGate) from qiskit.exceptions import QiskitError from qiskit.quantum_info.operators.predicates import is_unitary_matrix @@ -29,6 +29,7 @@ ONE_QUBIT_EULER_BASIS_GATES = { 'U3': ['u3'], + 'U321': ['u3', 'u2', 'u1'], 'U': ['u'], 'PSX': ['p', 'sx'], 'U1X': ['u1', 'rx'], @@ -69,6 +70,9 @@ class OneQubitEulerDecomposer: * - 'U3' - :math:`Z(\phi) Y(\theta) Z(\lambda)` - :math:`e^{i\gamma} U_3(\theta,\phi,\lambda)` + * - 'U321' + - :math:`Z(\phi) Y(\theta) Z(\lambda)` + - :math:`e^{i\gamma} U_3(\theta,\phi,\lambda)` * - 'U' - :math:`Z(\phi) Y(\theta) Z(\lambda)` - :math:`e^{i\gamma} U_3(\theta,\phi,\lambda)` @@ -93,7 +97,7 @@ class OneQubitEulerDecomposer: def __init__(self, basis='U3'): """Initialize decomposer - Supported bases are: 'U', 'PSX', 'ZSX', 'U3', 'U1X', 'RR', 'ZYZ', 'ZXZ', 'XYX'. + Supported bases are: 'U', 'PSX', 'ZSX', 'U321', 'U3', 'U1X', 'RR', 'ZYZ', 'ZXZ', 'XYX'. Args: basis (str): the decomposition basis [Default: 'U3'] @@ -155,6 +159,7 @@ def basis(self): def basis(self, basis): """Set the decomposition basis.""" basis_methods = { + 'U321': (self._params_u3, self._circuit_u321), 'U3': (self._params_u3, self._circuit_u3), 'U': (self._params_u3, self._circuit_u), 'PSX': (self._params_u1x, self._circuit_psx), @@ -280,13 +285,12 @@ def _circuit_zxz(theta, phi, lam, phase, - simplify=False, + simplify=True, atol=DEFAULT_ATOL): + circuit = QuantumCircuit(1, global_phase=phase) if simplify and np.isclose(theta, 0.0, atol=atol): - circuit = QuantumCircuit(1, global_phase=phase) circuit.append(RZGate(phi + lam), [0]) return circuit - circuit = QuantumCircuit(1, global_phase=phase) if not simplify or not np.isclose(lam, 0.0, atol=atol): circuit.append(RZGate(lam), [0]) if not simplify or not np.isclose(theta, 0.0, atol=atol): @@ -326,6 +330,24 @@ def _circuit_u3(theta, circuit.append(U3Gate(theta, phi, lam), [0]) return circuit + @staticmethod + def _circuit_u321(theta, + phi, + lam, + phase, + simplify=True, + atol=DEFAULT_ATOL): + rtol = 1e-9 # default is 1e-5, too far from atol=1e-12 + circuit = QuantumCircuit(1, global_phase=phase) + if simplify and (np.isclose(theta, 0.0, atol=atol, rtol=rtol)): + if not np.isclose(phi+lam, [0.0, 2*np.pi], atol=atol, rtol=rtol).any(): + circuit.append(U1Gate(_mod2pi(phi+lam)), [0]) + elif simplify and np.isclose(theta, np.pi/2, atol=atol, rtol=rtol): + circuit.append(U2Gate(phi, lam), [0]) + else: + circuit.append(U3Gate(theta, phi, lam), [0]) + return circuit + @staticmethod def _circuit_u(theta, phi, @@ -335,7 +357,7 @@ def _circuit_u(theta, atol=DEFAULT_ATOL): # pylint: disable=unused-argument circuit = QuantumCircuit(1, global_phase=phase) - circuit.u(theta, phi, lam, 0) + circuit.append(UGate(theta, phi, lam), [0]) return circuit @staticmethod diff --git a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py --- a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py +++ b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py @@ -13,12 +13,14 @@ """Optimize chains of single-qubit gates using Euler 1q decomposer""" import logging +import copy import numpy as np from qiskit.quantum_info import Operator from qiskit.transpiler.basepasses import TransformationPass from qiskit.quantum_info.synthesis import one_qubit_decompose +from qiskit.circuit.library.standard_gates import U3Gate from qiskit.converters import circuit_to_dag logger = logging.getLogger(__name__) @@ -40,9 +42,22 @@ def __init__(self, basis=None): if basis: self.basis = [] basis_set = set(basis) - for basis_name, gates in one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES.items(): + euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES + for euler_basis_name, gates in euler_basis_gates.items(): if set(gates).issubset(basis_set): - self.basis.append(one_qubit_decompose.OneQubitEulerDecomposer(basis_name)) + basis_copy = copy.copy(self.basis) + for base in basis_copy: + # check if gates are a superset of another basis + # and if so, remove that basis + if set(euler_basis_gates[base.basis]).issubset(set(gates)): + self.basis.remove(base) + # check if the gates are a subset of another basis + elif set(gates).issubset(set(euler_basis_gates[base.basis])): + break + # if not a subset, add it to the list + else: + self.basis.append(one_qubit_decompose.OneQubitEulerDecomposer( + euler_basis_name)) def run(self, dag): """Run the Optimize1qGatesDecomposition pass on `dag`. @@ -59,14 +74,21 @@ def run(self, dag): runs = dag.collect_1q_runs() identity_matrix = np.eye(2) for run in runs: - # Don't try to optimize a single 1q gate + single_u3 = False + # Don't try to optimize a single 1q gate, except for U3 if len(run) <= 1: params = run[0].op.params # Remove single identity gates if len(params) > 0 and np.array_equal(run[0].op.to_matrix(), identity_matrix): dag.remove_op_node(run[0]) - continue + continue + if (isinstance(run[0].op, U3Gate) and + np.isclose(float(params[0]), [0, np.pi/2], + atol=1e-12, rtol=0).any()): + single_u3 = True + else: + continue new_circs = [] operator = Operator(run[0].op) @@ -76,7 +98,8 @@ def run(self, dag): new_circs.append(decomposer(operator)) if new_circs: new_circ = min(new_circs, key=len) - if len(run) > len(new_circ): + if (len(run) > len(new_circ) or (single_u3 and + new_circ.data[0][0].name != 'u3')): new_dag = circuit_to_dag(new_circ) dag.substitute_node_with_dag(run[0], new_dag) # Delete the other nodes in the run diff --git a/qiskit/transpiler/preset_passmanagers/level1.py b/qiskit/transpiler/preset_passmanagers/level1.py --- a/qiskit/transpiler/preset_passmanagers/level1.py +++ b/qiskit/transpiler/preset_passmanagers/level1.py @@ -40,7 +40,6 @@ from qiskit.transpiler.passes import FixedPoint from qiskit.transpiler.passes import Depth from qiskit.transpiler.passes import RemoveResetInZeroState -from qiskit.transpiler.passes import Optimize1qGates from qiskit.transpiler.passes import Optimize1qGatesDecomposition from qiskit.transpiler.passes import ApplyLayout from qiskit.transpiler.passes import CheckCXDirection @@ -179,11 +178,7 @@ def _direction_condition(property_set): def _opt_control(property_set): return not property_set['depth_fixed_point'] - if basis_gates and ('u1' in basis_gates or 'u2' in basis_gates or - 'u3' in basis_gates): - _opt = [Optimize1qGates(basis_gates), CXCancellation()] - else: - _opt = [Optimize1qGatesDecomposition(basis_gates), CXCancellation()] + _opt = [Optimize1qGatesDecomposition(basis_gates), CXCancellation()] # 10. Schedule the circuit only when scheduling_method is supplied if scheduling_method: diff --git a/qiskit/transpiler/preset_passmanagers/level2.py b/qiskit/transpiler/preset_passmanagers/level2.py --- a/qiskit/transpiler/preset_passmanagers/level2.py +++ b/qiskit/transpiler/preset_passmanagers/level2.py @@ -41,7 +41,6 @@ from qiskit.transpiler.passes import FixedPoint from qiskit.transpiler.passes import Depth from qiskit.transpiler.passes import RemoveResetInZeroState -from qiskit.transpiler.passes import Optimize1qGates from qiskit.transpiler.passes import Optimize1qGatesDecomposition from qiskit.transpiler.passes import CommutativeCancellation from qiskit.transpiler.passes import ApplyLayout @@ -175,11 +174,7 @@ def _direction_condition(property_set): def _opt_control(property_set): return not property_set['depth_fixed_point'] - if basis_gates and ('u1' in basis_gates or 'u2' in basis_gates or - 'u3' in basis_gates): - _opt = [Optimize1qGates(basis_gates), CommutativeCancellation()] - else: - _opt = [Optimize1qGatesDecomposition(basis_gates), CommutativeCancellation()] + _opt = [Optimize1qGatesDecomposition(basis_gates), CommutativeCancellation()] # 9. Schedule the circuit only when scheduling_method is supplied if scheduling_method: diff --git a/qiskit/transpiler/preset_passmanagers/level3.py b/qiskit/transpiler/preset_passmanagers/level3.py --- a/qiskit/transpiler/preset_passmanagers/level3.py +++ b/qiskit/transpiler/preset_passmanagers/level3.py @@ -42,7 +42,6 @@ from qiskit.transpiler.passes import FixedPoint from qiskit.transpiler.passes import Depth from qiskit.transpiler.passes import RemoveResetInZeroState -from qiskit.transpiler.passes import Optimize1qGates from qiskit.transpiler.passes import Optimize1qGatesDecomposition from qiskit.transpiler.passes import CommutativeCancellation from qiskit.transpiler.passes import OptimizeSwapBeforeMeasure @@ -180,23 +179,13 @@ def _opt_control(property_set): _meas = [OptimizeSwapBeforeMeasure(), RemoveDiagonalGatesBeforeMeasure()] - if basis_gates and ('u1' in basis_gates or 'u2' in basis_gates or - 'u3' in basis_gates): - _opt = [ - Collect2qBlocks(), - ConsolidateBlocks(basis_gates=basis_gates), - UnitarySynthesis(basis_gates), - Optimize1qGates(basis_gates), - CommutativeCancellation(), - ] - else: - _opt = [ - Collect2qBlocks(), - ConsolidateBlocks(basis_gates=basis_gates), - UnitarySynthesis(basis_gates), - Optimize1qGatesDecomposition(basis_gates), - CommutativeCancellation(), - ] + _opt = [ + Collect2qBlocks(), + ConsolidateBlocks(basis_gates=basis_gates), + UnitarySynthesis(basis_gates), + Optimize1qGatesDecomposition(basis_gates), + CommutativeCancellation(), + ] # Schedule the circuit only when scheduling_method is supplied if scheduling_method:
Transpilation fails with snapshot instruction <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: master - **Python version**: 3.8.5 - **Operating system**: Linux ### What is the current behavior? Transpiling a simple circuit, which contains a snapshot, with basis gates `['u', 'cx']`, raises an error. The same happens with more sets of basis gates: `['r', 'cz'], ['rz', 'rx', 'cz'], ['p', 'sx', 'cx']`. However transpiling the same circuit with basis gates `['u3', 'cx']` is OK. Note: this is working well with the stable version, the bug appears only in the master version. ### Steps to reproduce the problem ``` from qiskit import QuantumCircuit, transpile from qiskit.providers.aer import QasmSimulator from qiskit.providers.aer.extensions import snapshot_statevector backend = QasmSimulator() circ = QuantumCircuit(1) circ.z(0) circ.snapshot_statevector('final') transpile(circ, backend, basis_gates=['u3', 'cx']) print("Transpilation with ['u3', 'cx'] is fine") transpile(circ, backend, basis_gates=['u', 'cx']) print("Transpilation with ['u', 'cx'] is fine") ``` results with ``` (YaelEnv) yaelbh@iris-quantum2:~/work/not_qiskit$ python snapshot_invalid.py /home/yaelbh/work/terra/System/qiskit/__init__.py:69: RuntimeWarning: Could not import the IBMQ provider from the qiskit-ibmq-provider package. Install qiskit-ibmq-provider or check your installation. warnings.warn('Could not import the IBMQ provider from the ' /opt/anaconda3/envs/YaelEnv/lib/python3.8/site-packages/qiskit/aqua/operators/operator_globals.py:48: DeprecationWarning: `from_label` is deprecated and will be removed no earlier than 3 months after the release date. Use Pauli(label) instead. X = make_immutable(PrimitiveOp(Pauli.from_label('X'))) Transpilation with ['u3', 'cx'] is fine Traceback (most recent call last): File "snapshot_invalid.py", line 14, in <module> transpile(circ, backend, basis_gates=['u', 'cx']) File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 241, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/home/yaelbh/work/terra/System/qiskit/tools/parallel.py", line 112, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 324, in _transpile_circuit result = pass_manager.run(circuit, callback=transpile_config['callback'], File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 225, in run return self._run_single_circuit(circuits, output_name, callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 288, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 113, in run dag = self._do_pass(pass_, dag, passset.options) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 144, in _do_pass dag = self._run_this_pass(pass_, dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 156, in _run_this_pass new_dag = pass_.run(dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py", line 87, in run operator = Operator(qc) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 85, in __init__ self._data = self._init_instruction(data).data File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 496, in _init_instruction op._append_instruction(instruction) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 554, in _append_instruction self._append_instruction(instr, qargs=new_qargs) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 532, in _append_instruction raise QiskitError('Cannot apply Instruction: {}'.format(obj.name)) qiskit.exceptions.QiskitError: 'Cannot apply Instruction: snapshot' ```
2020-12-21T18:51:00Z
[]
[]
Traceback (most recent call last): File "snapshot_invalid.py", line 14, in <module> transpile(circ, backend, basis_gates=['u', 'cx']) File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 241, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/home/yaelbh/work/terra/System/qiskit/tools/parallel.py", line 112, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 324, in _transpile_circuit result = pass_manager.run(circuit, callback=transpile_config['callback'], File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 225, in run return self._run_single_circuit(circuits, output_name, callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 288, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 113, in run dag = self._do_pass(pass_, dag, passset.options) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 144, in _do_pass dag = self._run_this_pass(pass_, dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 156, in _run_this_pass new_dag = pass_.run(dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py", line 87, in run operator = Operator(qc) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 85, in __init__ self._data = self._init_instruction(data).data File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 496, in _init_instruction op._append_instruction(instruction) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 554, in _append_instruction self._append_instruction(instr, qargs=new_qargs) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 532, in _append_instruction raise QiskitError('Cannot apply Instruction: {}'.format(obj.name)) qiskit.exceptions.QiskitError: 'Cannot apply Instruction: snapshot'
1,652
Qiskit/qiskit
Qiskit__qiskit-5570
74ed881a9fcad89a5e8f41a0cfdeffb98a8e3051
diff --git a/qiskit/dagcircuit/dagcircuit.py b/qiskit/dagcircuit/dagcircuit.py --- a/qiskit/dagcircuit/dagcircuit.py +++ b/qiskit/dagcircuit/dagcircuit.py @@ -1384,6 +1384,8 @@ def filter_fn(node): return node.type == 'op' and len(node.qargs) == 1 \ and len(node.cargs) == 0 and node.condition is None \ and not node.op.is_parameterized() \ + and isinstance(node.op, Gate) \ + and hasattr(node.op, '__array__') group_list = rx.collect_runs(self._multi_graph, filter_fn) return set(tuple(x) for x in group_list)
Transpilation fails with snapshot instruction <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: master - **Python version**: 3.8.5 - **Operating system**: Linux ### What is the current behavior? Transpiling a simple circuit, which contains a snapshot, with basis gates `['u', 'cx']`, raises an error. The same happens with more sets of basis gates: `['r', 'cz'], ['rz', 'rx', 'cz'], ['p', 'sx', 'cx']`. However transpiling the same circuit with basis gates `['u3', 'cx']` is OK. Note: this is working well with the stable version, the bug appears only in the master version. ### Steps to reproduce the problem ``` from qiskit import QuantumCircuit, transpile from qiskit.providers.aer import QasmSimulator from qiskit.providers.aer.extensions import snapshot_statevector backend = QasmSimulator() circ = QuantumCircuit(1) circ.z(0) circ.snapshot_statevector('final') transpile(circ, backend, basis_gates=['u3', 'cx']) print("Transpilation with ['u3', 'cx'] is fine") transpile(circ, backend, basis_gates=['u', 'cx']) print("Transpilation with ['u', 'cx'] is fine") ``` results with ``` (YaelEnv) yaelbh@iris-quantum2:~/work/not_qiskit$ python snapshot_invalid.py /home/yaelbh/work/terra/System/qiskit/__init__.py:69: RuntimeWarning: Could not import the IBMQ provider from the qiskit-ibmq-provider package. Install qiskit-ibmq-provider or check your installation. warnings.warn('Could not import the IBMQ provider from the ' /opt/anaconda3/envs/YaelEnv/lib/python3.8/site-packages/qiskit/aqua/operators/operator_globals.py:48: DeprecationWarning: `from_label` is deprecated and will be removed no earlier than 3 months after the release date. Use Pauli(label) instead. X = make_immutable(PrimitiveOp(Pauli.from_label('X'))) Transpilation with ['u3', 'cx'] is fine Traceback (most recent call last): File "snapshot_invalid.py", line 14, in <module> transpile(circ, backend, basis_gates=['u', 'cx']) File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 241, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/home/yaelbh/work/terra/System/qiskit/tools/parallel.py", line 112, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 324, in _transpile_circuit result = pass_manager.run(circuit, callback=transpile_config['callback'], File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 225, in run return self._run_single_circuit(circuits, output_name, callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 288, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 113, in run dag = self._do_pass(pass_, dag, passset.options) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 144, in _do_pass dag = self._run_this_pass(pass_, dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 156, in _run_this_pass new_dag = pass_.run(dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py", line 87, in run operator = Operator(qc) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 85, in __init__ self._data = self._init_instruction(data).data File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 496, in _init_instruction op._append_instruction(instruction) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 554, in _append_instruction self._append_instruction(instr, qargs=new_qargs) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 532, in _append_instruction raise QiskitError('Cannot apply Instruction: {}'.format(obj.name)) qiskit.exceptions.QiskitError: 'Cannot apply Instruction: snapshot' ```
Oddly enough, I was just working on this as part of #5554. This is caused by `Optimize1qGatesDecomposition.run()` calling `Operator(qc)`. Operator will fail for 'snapshot', 'delay', and 'reset'. The reason it works with 'u3' in the basis is that if the basis has 'u1', 'u2', or 'u3', the optimizer calls `Optimize1qGates.run()` instead, which does not call `Operator()`. It should run ok with `optimization_level=0` for transpile.
2021-01-01T10:05:49Z
[]
[]
Traceback (most recent call last): File "snapshot_invalid.py", line 14, in <module> transpile(circ, backend, basis_gates=['u', 'cx']) File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 241, in transpile circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args))) File "/home/yaelbh/work/terra/System/qiskit/tools/parallel.py", line 112, in parallel_map return [task(values[0], *task_args, **task_kwargs)] File "/home/yaelbh/work/terra/System/qiskit/compiler/transpile.py", line 324, in _transpile_circuit result = pass_manager.run(circuit, callback=transpile_config['callback'], File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 225, in run return self._run_single_circuit(circuits, output_name, callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passmanager.py", line 288, in _run_single_circuit result = running_passmanager.run(circuit, output_name=output_name, callback=callback) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 113, in run dag = self._do_pass(pass_, dag, passset.options) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 144, in _do_pass dag = self._run_this_pass(pass_, dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/runningpassmanager.py", line 156, in _run_this_pass new_dag = pass_.run(dag) File "/home/yaelbh/work/terra/System/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py", line 87, in run operator = Operator(qc) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 85, in __init__ self._data = self._init_instruction(data).data File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 496, in _init_instruction op._append_instruction(instruction) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 554, in _append_instruction self._append_instruction(instr, qargs=new_qargs) File "/home/yaelbh/work/terra/System/qiskit/quantum_info/operators/operator.py", line 532, in _append_instruction raise QiskitError('Cannot apply Instruction: {}'.format(obj.name)) qiskit.exceptions.QiskitError: 'Cannot apply Instruction: snapshot'
1,656
Qiskit/qiskit
Qiskit__qiskit-5755
123d829acb824ba906a10d2f06b92891a9f34221
diff --git a/qiskit/pulse/schedule.py b/qiskit/pulse/schedule.py --- a/qiskit/pulse/schedule.py +++ b/qiskit/pulse/schedule.py @@ -24,6 +24,8 @@ from collections import defaultdict from typing import List, Tuple, Iterable, Union, Dict, Callable, Set, Optional, Any +import numpy as np + from qiskit.circuit.parameter import Parameter from qiskit.circuit.parameterexpression import ParameterExpression, ParameterValueType from qiskit.pulse.channels import Channel @@ -521,7 +523,7 @@ def _add_timeslots(self, Raises: PulseError: If timeslots overlap or an invalid start time is provided. """ - if not isinstance(time, int): + if not np.issubdtype(type(time), np.integer): raise PulseError("Schedule start time must be an integer.") other_timeslots = _get_timeslots(schedule)
Pulse Schedule durations fail with numpy integers <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.16.1 - **Python version**: 3.7.6 - **Operating system**: Linux via WSL ### What is the current behavior? ``PulseError`` (from https://github.com/Qiskit/qiskit-terra/blob/7710880167ddf6e11922ce608e6579e304c7eb04/qiskit/pulse/schedule.py#L520-L521) when a previous duration was a numpy integer. ### Steps to reproduce the problem Works: ```python import qiskit.pulse as qp chan = qp.DriveChannel(0) with qp.build() as sched: qp.delay(5, chan) qp.play(qp.library.Constant(50, 1.0), chan) ``` Doesn't work: ```python import qiskit.pulse as qp import numpy as np chan = qp.DriveChannel(0) with qp.build() as sched: qp.delay(np.int32(5), chan) qp.play(qp.library.Constant(50, 1.0), chan) ``` Error flags on the last call to ``qp.play()``: ``` >>> with qp.build() as bad_sched: ... qp.delay(np.int32(5), chan) ... qp.play(qp.library.Constant(50, 1.0), chan) ... Traceback (most recent call last): File "<stdin>", line 3, in <module> File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 1384, in play append_instruction(instructions.Play(pulse, channel)) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 630, in append_instruction _active_builder().append_instruction(instruction) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 247, in wrapper return function(self, *args, **kwargs) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 416, in append_instruction self.context_schedule.append(instruction, inplace=True) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 308, in append return self.insert(time, schedule, name=name, inplace=inplace) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 253, in insert return self._mutable_insert(start_time, schedule) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 266, in _mutable_insert self._add_timeslots(start_time, schedule) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 477, in _add_timeslots raise PulseError("Schedule start time must be an integer.") qiskit.pulse.exceptions.PulseError: 'Schedule start time must be an integer.' ``` ### What is the expected behavior? Qiskit schedules should accept numpy integers for durations. An example of why is for an easier time generating scans using e.g. ``np.linspace()`` ### Suggested solutions Change the ``isinstance(time, int)`` check to ``isinstance(time, (int, np.integer))``
2021-01-31T03:08:41Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 3, in <module> File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 1384, in play append_instruction(instructions.Play(pulse, channel)) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 630, in append_instruction _active_builder().append_instruction(instruction) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 247, in wrapper return function(self, *args, **kwargs) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 416, in append_instruction self.context_schedule.append(instruction, inplace=True) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 308, in append return self.insert(time, schedule, name=name, inplace=inplace) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 253, in insert return self._mutable_insert(start_time, schedule) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 266, in _mutable_insert self._add_timeslots(start_time, schedule) File "/nix/store/z8hw09q3vlpxgl01d6iwpimw7fdlvqsk-python3-3.7.6-env/lib/python3.7/site-packages/qiskit/pulse/schedule.py", line 477, in _add_timeslots raise PulseError("Schedule start time must be an integer.") qiskit.pulse.exceptions.PulseError: 'Schedule start time must be an integer.'
1,687
Qiskit/qiskit
Qiskit__qiskit-5807
20a0c6d8bb9e3a858833522f2e291d7cb69ea4ee
diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py --- a/qiskit/circuit/quantumcircuit.py +++ b/qiskit/circuit/quantumcircuit.py @@ -83,7 +83,7 @@ class QuantumCircuit: name (str): the name of the quantum circuit. If not set, an automatically generated string will be assigned. - global_phase (float): The global phase of the circuit in radians. + global_phase (float or ParameterExpression): The global phase of the circuit in radians. metadata (dict): Arbitrary key value metadata to associate with the circuit. This gets stored as free-form data in a dict in the :attr:`~qiskit.circuit.QuantumCircuit.metadata` attribute. It will @@ -1806,7 +1806,14 @@ def global_phase(self, angle): @property def parameters(self): """Convenience function to get the parameters defined in the parameter table.""" - return self._parameter_table.get_keys() + # parameters from gates + params = self._parameter_table.get_keys() + + # parameters in global phase + if isinstance(self.global_phase, ParameterExpression): + return params.union(self.global_phase.parameters) + + return params @property def num_parameters(self): @@ -1882,7 +1889,7 @@ def assign_parameters(self, param_dict, inplace=False): # check that all param_dict items are in the _parameter_table for this circuit params_not_in_circuit = [param_key for param_key in unrolled_param_dict - if param_key not in self._parameter_table.keys()] + if param_key not in self.parameters] if len(params_not_in_circuit) > 0: raise CircuitError('Cannot bind parameters ({}) not present in the circuit.'.format( ', '.join(map(str, params_not_in_circuit)))) @@ -1936,25 +1943,27 @@ def _assign_parameter(self, parameter, value): value (Union(ParameterExpression, float, int)): A numeric or parametric expression to replace instances of ``parameter``. """ - for instr, param_index in self._parameter_table[parameter]: - new_param = instr.params[param_index].assign(parameter, value) - # if fully bound, validate - if len(new_param.parameters) == 0: - instr.params[param_index] = instr.validate_parameter(new_param) - else: - instr.params[param_index] = new_param + # parameter might be in global phase only + if parameter in self._parameter_table.keys(): + for instr, param_index in self._parameter_table[parameter]: + new_param = instr.params[param_index].assign(parameter, value) + # if fully bound, validate + if len(new_param.parameters) == 0: + instr.params[param_index] = instr.validate_parameter(new_param) + else: + instr.params[param_index] = new_param - self._rebind_definition(instr, parameter, value) + self._rebind_definition(instr, parameter, value) - if isinstance(value, ParameterExpression): - entry = self._parameter_table.pop(parameter) - for new_parameter in value.parameters: - if new_parameter in self._parameter_table: - self._parameter_table[new_parameter].extend(entry) - else: - self._parameter_table[new_parameter] = entry - else: - del self._parameter_table[parameter] # clear evaluated expressions + if isinstance(value, ParameterExpression): + entry = self._parameter_table.pop(parameter) + for new_parameter in value.parameters: + if new_parameter in self._parameter_table: + self._parameter_table[new_parameter].extend(entry) + else: + self._parameter_table[new_parameter] = entry + else: + del self._parameter_table[parameter] # clear evaluated expressions if (isinstance(self.global_phase, ParameterExpression) and parameter in self.global_phase.parameters):
Unable bind parameters in global phase <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: master @ c3b2d7ac - **Python version**: 3.7.9 - **Operating system**: macOS Big Sur ### What is the current behavior? If the global phase of a circuit contains a parameter it can currently not be bound: ```python >>> from qiskit.circuit import QuantumCircuit, Parameter >>> x = Parameter('x') >>> circuit = QuantumCircuit(1, global_phase=x) >>> circuit.bind_parameters({x: 2}).draw() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jul/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1912, in bind_parameters return self.assign_parameters(value_dict) File "/Users/jul/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1886, in assign_parameters ', '.join(map(str, params_not_in_circuit)))) qiskit.circuit.exceptions.CircuitError: 'Cannot bind parameters (x) not present in the circuit.' ``` The above bug might be due to #5648, since the low-level `_assign_parameter` seems to support global phase binding. Also, they are not listed when calling `circuit.parameters`: ```python >>> from qiskit.circuit import QuantumCircuit, Parameter >>> from qiskit.opflow import StateFn >>> x = Parameter('x') >>> circuit = QuantumCircuit(1, global_phase=x) >>> circuit.parameters set() ```
2021-02-07T10:00:28Z
[]
[]
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jul/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1912, in bind_parameters return self.assign_parameters(value_dict) File "/Users/jul/Qiskit/qiskit-terra/qiskit/circuit/quantumcircuit.py", line 1886, in assign_parameters ', '.join(map(str, params_not_in_circuit)))) qiskit.circuit.exceptions.CircuitError: 'Cannot bind parameters (x) not present in the circuit.'
1,695