url
stringlengths
59
59
repository_url
stringclasses
1 value
labels_url
stringlengths
73
73
comments_url
stringlengths
68
68
events_url
stringlengths
66
66
html_url
stringlengths
49
49
id
int64
782M
1.89B
node_id
stringlengths
18
24
number
int64
4.97k
9.98k
title
stringlengths
2
306
user
dict
labels
list
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
4 values
active_lock_reason
null
body
stringlengths
0
63.6k
reactions
dict
timeline_url
stringlengths
68
68
performed_via_github_app
null
state_reason
stringclasses
3 values
draft
bool
0 classes
pull_request
dict
is_pull_request
bool
1 class
https://api.github.com/repos/kubeflow/pipelines/issues/6156
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6156/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6156/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6156/events
https://github.com/kubeflow/pipelines/issues/6156
953,925,245
MDU6SXNzdWU5NTM5MjUyNDU=
6,156
v2 caching
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @capri-xiyue ", "PR merged https://github.com/kubeflow/pipelines/pull/6569\r\nTO DO \r\n- [ ] Update v2 caching doc ", "reassigned it to @chensun", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Untrack https://github.com/kubeflow/pipelines/issues/8696 and close." ]
2021-07-27T14:10:00
2023-01-18T08:35:06
2023-01-18T08:35:06
CONTRIBUTOR
null
- [ ] ~#8696~
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6156/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6156/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6155
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6155/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6155/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6155/events
https://github.com/kubeflow/pipelines/issues/6155
953,915,946
MDU6SXNzdWU5NTM5MTU5NDY=
6,155
feat(backend): v2 placeholders - unique placeholders
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "Is this an issue for tracking the implementation of replacements for things such as `{{workflow.status}}`? \r\n\r\nIf so:\r\n\r\n`{{$.pipeline_task_name}}` and `{{$.pipeline_task_uuid}}` don't seem to work on Vertex AI at the moment (not sure about a self hosted KFP). Is this expected? I've also tried various placeholders to try to access the Pipeline job's status/error (eg. `{{$.error}}`, `{{$.status}}`, `{{$.error.code}}`, etc.) and nothing seems to work. How is this templating working and how does it differ from the old Argo templating?", "Hi, this is for tracking implementation of {{$.pipeline_task_name}} and {{$.pipeline_task_uuid}} like placeholders, but not status.", "Can you submit a separate issue for your feature request? And make it clearer it's for KFP v2 or Vertex.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-27T14:00:48
2023-01-18T09:43:28
null
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6155/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6155/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6154
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6154/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6154/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6154/events
https://github.com/kubeflow/pipelines/issues/6154
953,915,898
MDU6SXNzdWU5NTM5MTU4OTg=
6,154
v2 placeholders - artifact & execution output metadata logging
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-27T14:00:45
2021-08-17T10:56:42
2021-08-17T10:56:42
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6154/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6154/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6153
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6153/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6153/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6153/events
https://github.com/kubeflow/pipelines/issues/6153
953,915,811
MDU6SXNzdWU5NTM5MTU4MTE=
6,153
v2 placeholders - artifact passing via URI
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-27T14:00:40
2021-08-18T11:06:38
2021-08-18T11:06:38
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6153/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6153/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6152
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6152/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6152/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6152/events
https://github.com/kubeflow/pipelines/issues/6152
953,915,690
MDU6SXNzdWU5NTM5MTU2OTA=
6,152
v2 placeholders - artifact passing via local path
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-27T14:00:33
2021-08-11T04:40:25
2021-08-11T04:40:25
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6152/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6152/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6151
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6151/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6151/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6151/events
https://github.com/kubeflow/pipelines/issues/6151
953,915,611
MDU6SXNzdWU5NTM5MTU2MTE=
6,151
v2 placeholders - pipeline parameter and parameter passing
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-27T14:00:28
2021-08-06T01:25:51
2021-08-06T01:25:51
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6151/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6151/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6150
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6150/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6150/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6150/events
https://github.com/kubeflow/pipelines/issues/6150
953,915,541
MDU6SXNzdWU5NTM5MTU1NDE=
6,150
v2 placeholders
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "P0 work items done" ]
2021-07-27T14:00:24
2021-08-24T13:32:26
2021-08-24T13:32:25
CONTRIBUTOR
null
Support v2 placeholders in KFP v2. Tasks * [x] #6151 * [x] #6152 * [x] #6153 * [x] #6154 * [ ] #6155 * [x] https://github.com/kubeflow/pipelines/pull/6353
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6150/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6150/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6149
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6149/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6149/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6149/events
https://github.com/kubeflow/pipelines/issues/6149
953,915,264
MDU6SXNzdWU5NTM5MTUyNjQ=
6,149
v2 backend compiler and basic orchestration
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "Done" ]
2021-07-27T14:00:08
2021-08-02T14:19:40
2021-08-02T12:10:42
CONTRIBUTOR
null
Explanation: backend compiler compiles from [KFP pipeline spec](https://github.com/kubeflow/pipelines/blob/master/api/v2alpha1/pipeline_spec.proto) to argo workflow (potentially, other implementations may compile it to other execution spec). Design: [bit.ly/kfp-v2](https://bit.ly/kfp-v2) This issue tracks implementing Tasks: * [x] https://github.com/kubeflow/pipelines/pull/6111 * [x] https://github.com/kubeflow/pipelines/pull/6139 * [x] https://github.com/kubeflow/pipelines/pull/6144 * [x] #6147 * [x] https://github.com/kubeflow/pipelines/pull/6140
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6149/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6149/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6148
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6148/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6148/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6148/events
https://github.com/kubeflow/pipelines/issues/6148
953,915,190
MDU6SXNzdWU5NTM5MTUxOTA=
6,148
v2 orchestration engine
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "I'll directly track this in #6110" ]
2021-07-27T14:00:03
2021-07-28T10:53:01
2021-07-28T10:53:00
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6148/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6148/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6147
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6147/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6147/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6147/events
https://github.com/kubeflow/pipelines/issues/6147
953,914,916
MDU6SXNzdWU5NTM5MTQ5MTY=
6,147
v2 launcher with basic publisher
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-27T13:59:46
2021-08-02T14:19:50
2021-07-29T06:04:19
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6147/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6147/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6146
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6146/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6146/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6146/events
https://github.com/kubeflow/pipelines/issues/6146
953,863,052
MDU6SXNzdWU5NTM4NjMwNTI=
6,146
[sdk: google vertex AI] Is it okay to keep making changes to google AIPlatformClient SDK that will be deprecated?
{ "login": "toshitanian", "id": 1647045, "node_id": "MDQ6VXNlcjE2NDcwNDU=", "avatar_url": "https://avatars.githubusercontent.com/u/1647045?v=4", "gravatar_id": "", "url": "https://api.github.com/users/toshitanian", "html_url": "https://github.com/toshitanian", "followers_url": "https://api.github.com/users/toshitanian/followers", "following_url": "https://api.github.com/users/toshitanian/following{/other_user}", "gists_url": "https://api.github.com/users/toshitanian/gists{/gist_id}", "starred_url": "https://api.github.com/users/toshitanian/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/toshitanian/subscriptions", "organizations_url": "https://api.github.com/users/toshitanian/orgs", "repos_url": "https://api.github.com/users/toshitanian/repos", "events_url": "https://api.github.com/users/toshitanian/events{/privacy}", "received_events_url": "https://api.github.com/users/toshitanian/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "The AIPlatformClient in KFP SDK will be deprecated and we would recommend starting to use `google-cloud-aiplatform`. \r\n\r\nCurrently, the `create_run_from_job_spec` is completed moved over and `create_schedule_from_job_spec` is in-progress. We are working on a code sample of usage in `google-cloud-aiplatform` (Vertex SDK) and will add to the deprecation warning when it's ready. \r\n\r\nIf it's blocking you, you can continue submit PR to KFP client to unblock, until the Vertex SDK client is ready. Otherwise, please file a feature request and assign to me. I will be reviewing them and consider adding them to Vertex SDK. \r\n\r\nThank you for your contributions!", "@ji-yaqi Thank you for your reply! Then we will keep using the KFP SDK for a while and prepare migration to the Vertex SDK. ", "@ji-yaqi what is the Google Cloud SDK equivalent of `create_schedule_from_job_spec`?" ]
2021-07-27T13:05:29
2022-03-15T16:18:49
2021-07-28T07:41:14
CONTRIBUTOR
null
### Feature Area /area sdk ### What feature would you like to see? I'm using kfp SDK to use Vertex pipeline of GCP by `AIPlatformClient`. So I want to propose several pull requests to the functions like `create_run_from_job_spec` and `create_schedule_from_job_spec` for fixing bugs and adjust specifications. But I found that `AIPlatformClient` will be deprecated in v1.9 and will be moved to `google-cloud-aiplatform` package. I looked into the document and was not able to find the features that is compatible with `AIPlatformClient.create_schedule_from_job_spec`. So I want to ask that is it okay to keep making changes to the AIPlatformClient SDK. If it's okay and will the pull requests will be merged, I will try to make pull requests for the feature request. But if it's better to move to `google-cloud-aiplatform`, I want a guide to move current workflow to the new SDK. I understand that it's still work in progress and difficult to handle, but I appreciate if someone could tell me the direction. @ji-yaqi I'm not sure you're the right person but seems that you're contributing to the google SDK, so let me mention you here for notice. <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6146/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6146/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6142
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6142/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6142/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6142/events
https://github.com/kubeflow/pipelines/issues/6142
953,107,044
MDU6SXNzdWU5NTMxMDcwNDQ=
6,142
[frontend] Shows error banner instead of stacktrace for unrecognized storage uri prefix
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-26T17:04:58
2022-03-03T02:05:15
null
COLLABORATOR
null
Currently we shows stacktrace on Input/Output banner if the artifact is `aiplatform://xxx`. We should skip the preview and downloadable link generation. Maybe show a banner to explain that this is unavailable for Google Cloud specific resource prefix.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6142/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6142/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6138
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6138/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6138/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6138/events
https://github.com/kubeflow/pipelines/issues/6138
952,758,599
MDU6SXNzdWU5NTI3NTg1OTk=
6,138
[backend] Metadata/Executions not written in 1.7.0-rc1 (New visualisations not working as a result)
{ "login": "vaskozl", "id": 3778941, "node_id": "MDQ6VXNlcjM3Nzg5NDE=", "avatar_url": "https://avatars.githubusercontent.com/u/3778941?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vaskozl", "html_url": "https://github.com/vaskozl", "followers_url": "https://api.github.com/users/vaskozl/followers", "following_url": "https://api.github.com/users/vaskozl/following{/other_user}", "gists_url": "https://api.github.com/users/vaskozl/gists{/gist_id}", "starred_url": "https://api.github.com/users/vaskozl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vaskozl/subscriptions", "organizations_url": "https://api.github.com/users/vaskozl/orgs", "repos_url": "https://api.github.com/users/vaskozl/repos", "events_url": "https://api.github.com/users/vaskozl/events{/privacy}", "received_events_url": "https://api.github.com/users/vaskozl/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "I've also just tested the issues with 1.7.0-rc2.\r\n\r\nI've tried kfp 1.4.0 and 1.6.6.", "Downgrading the metadata images seems to have solved this for me:\r\n\r\n```\r\nimages:\r\n - name: gcr.io/ml-pipeline/metadata-envoy\r\n newTag: 1.4.0\r\n - name: gcr.io/tfx-oss-public/ml_metadata_store_server\r\n newTag: 0.25.1\r\n - name: gcr.io/ml-pipeline/metadata-writer\r\n newTag: 1.4.0\r\n```\r\n\r\nI believe the `metadata-writer` container version is the most important one here, though I have no context.", "Actually reverting doesn't solve the problem completely, I still find the Statistics and Evaluator artifacts don't display.\r\n\r\nIn the markdown they have zeroed out id's:\r\n\r\n```\r\nid: 0\r\nspan: None\r\ntype_id: 0\r\n```", "There seems to be a problem with ml-metadata 1.0.0 which is required by tfx 1.0.0 and matches the grpc server of 1.7.1.\r\n\r\nDowngrading the the metadata-grpc-server doesn't work solve anything.\r\n\r\nI've gone back down to tfx 0.30 which uses metadata 0.30.0\r\n\r\nVisualisations work when I downgrade gcr.io/tfx-oss-public/ml_metadata_store_server to `0.25.1` as well.", "Hello @vaskozl , what is the TFX version you are using when you are using KFP 1.4.0?", "/cc @jiyongjung0 \r\ndo you know any context?", "Possible related: https://github.com/tensorflow/tensorflow/issues/50045", "When on 1.4.0 everything from TFX 0.27 to 1.0.0 wrote metadata. (visualizations didn't work but they do in the 1.7.0 release). I do recall having to bump the grpcio version in requirements up and down while hopping up to TFX 1.0.0 .\r\n\r\nTFX 1.0.0 -> Kubeflow 1.7.0 (with metadata) No metadata written\r\nTFX 1.0.0 -> Kubeflow 1.4.0 Partial metadata written it seemed\r\nTFX 0.30.0 -> Kubeflow 1.4.0 All as expected\r\n\r\nAs I said I'm now using 1.7.0 with just the ml_metadata_store_server downgraded below 1.0.0.", "I found some possible cause. It is related to the changes in the way TFX stores their contexts since 1.0.0 (which is related to the changes in the execution stack using TFX IR).\r\n\r\nIn TFX 0.X, the context were\r\n- type:pipeline, value: \"my-pipeline\"\r\n- type:run, value: \"my-pipeline.my-pipeline-xaew1\" (some hash is appended in the second part.)\r\n- type:component_run, value: \"my-pipeline.my-pipeline-xaew1.CsvExampleGen\"\r\n[Related code](https://github.com/tensorflow/tfx/blob/859b668f3423cd89895443d838e0b2a3e65ce0b7/tfx/orchestration/metadata.py#L60-L62)\r\n\r\nHowever in TFX 1.0, the context became\r\n- type:pipeline, value: \"my-pipeline\"\r\n- type:pipeline_run, value: \"my-pipeline-xaew1\"\r\n- type:node, value: \"my-pipeline.CsvExampleGen\"\r\n\r\n[Related code](https://github.com/tensorflow/tfx/blob/859b668f3423cd89895443d838e0b2a3e65ce0b7/tfx/dsl/compiler/compiler.py#L134-L154)\r\n\r\nSo it seems like Kubeflow Pipelines cannot find context (and artifacts) properly.\r\nI think that we should change mlmd access code [like here](https://github.com/kubeflow/pipelines/blob/a94e92e5309b371967917dded31090c63321f93c/frontend/src/mlmd/MlmdUtils.ts#L83).\r\n\r\nCC. @zhitaoli , @1025KB , @Bobgy \r\n\r\n", "Thank you @jiyongjung0 for finding the difference between TFX 0.X and 1.0+! \r\n\r\nFor backward compatibility, what should KFP frontend do to detect if a TFX pipeline is TFX 0.X or TFX 1.0+? ", "Unfortunately, it seems that there is no direct clue when finding executions. (Artifacts has `tfx_version` property, but there is no such information in Context / Execution.)\r\n\r\nI think that we can try to find 1.0 context first, and fallback to 0.X context if not found.", "Makes sense to me to find 1.0 context first and then fall back to 0.X", "Sounds good, agree with the fallback strategy here.", "Hello @jiyongjung0 ,\r\n\r\nCurrently we use this [code logic](https://github.com/kubeflow/pipelines/blob/a94e92e5309b371967917dded31090c63321f93c/frontend/src/mlmd/MlmdUtils.ts#L163-L169) to identify the execution for a specific node using node ID (which is the corresponding Pod ID). However, with the latest integration with TFX, we are unable to find this connection from Execution, see one of the following example for a statisticsGen execution:\r\n\r\n![tfxemptyexecution](https://user-images.githubusercontent.com/37026441/129417463-ea601d0f-86bb-4637-9c21-abfc5f47b740.png)\r\n\r\nHow do I fix the TFX integration that I get the properties correctly like the following? (From an old deployment)\r\n\r\n![tfxstepnotempty](https://user-images.githubusercontent.com/37026441/129417638-66f1da64-038b-489f-bfa6-452754353167.png)\r\n", "Hi, this is a bug from TFX side introduced in https://github.com/tensorflow/tfx/commit/24fc5d1db9198a75db11af25cf05c4d3ae05491f. It seems like we don't record pod names in TFX 1.0.0. I'll try to fix this ASAP, and will update the release plan.", "I'm trying to include the above fix in the TFX 1.2.0 which is expected to be released tomorrow. I'll update you when the release is finalized.", "TFX 1.2.0 was released today and this should be fixed. Thank you again for reporting this!", "Appreciate it a lot @jiyongjung0 for the quick fix and release of TFX 1.2.0!\r\n\r\nThe following is what I found:\r\n1. I need to make changes to notebook example for TFX v1.2.0 for pip installation and image. I send a PR for this: https://github.com/kubeflow/pipelines/pull/6381, appreciate for your review in advance!\r\n2. I am able to see HTML visualization for staticsgen, schemagen, etc. (yay!), but I am not able to see the visualization of `transform` step, for the artifact like `pre_transform_stats`. Because KFP is trying to visit `Split-eval` and `Split-train` in the code `files = tf.io.gfile.listdir('${specificUri}')`: https://github.com/kubeflow/pipelines/blob/master/frontend/src/lib/OutputArtifactLoader.ts#L304, where I don't have those files, see the screenshot below:\r\n![pretransformstat](https://user-images.githubusercontent.com/37026441/129937061-330a8f7c-c857-4df6-be1b-e8b48d9afc31.png)\r\n\r\n3. The step `evaluator` fails with the following logs, how does TFX utilize the KFP visualization feature?\r\n```\r\n File \"apache_beam/runners/common.py\", line 572, in apache_beam.runners.common.SimpleInvoker.invoke_process\r\n File \"/opt/conda/lib/python3.7/site-packages/tensorflow_model_analysis/model_util.py\", line 779, in process\r\n result.extend(self._batch_reducible_process(unbatched_element))\r\n File \"/opt/conda/lib/python3.7/site-packages/tensorflow_model_analysis/model_util.py\", line 928, in _batch_reducible_process\r\n input_specs = get_input_specs(model, signature_name, required) or {}\r\n File \"/opt/conda/lib/python3.7/site-packages/tensorflow_model_analysis/model_util.py\", line 472, in get_input_specs\r\n signature_name, model.signatures))\r\nValueError: tft_layer not found in model signatures: _SignatureMap({'serving_default': <ConcreteFunction signature_wrapper(*, examples) at 0x7FCBCA4E62D0>, 'transform_features': <ConcreteFunction signature_wrapper(*, examples) at 0x7FCBCA4E3410>}) [while running 'ExtractEvaluateAndWriteResults/ExtractAndEvaluate/ExtractTransformedFeatures/Predict']\r\ntime=\"2021-08-18T16:09:09.989Z\" level=error msg=\"cannot save artifact /mlpipeline-ui-metadata.json\" argo=true error=\"stat /mlpipeline-ui-metadata.json: no such file or directory\"\r\nError: exit status 1\r\n```", "@zijianjoy\r\n\r\nThank you so much!!\r\n\r\nFor 2.(Transform output) It seems like an inconsistency in TFX implementation. The artifact from Transform was added recently and I never tried before. I'll talk with other TFX folks and get back to you.\r\n\r\nFor 3.(Evaluator), We need to change the `preprocessing_function_names` in the evaluator config in 1.2.0, because the example was changed in 1.2.0. Please see https://github.com/tensorflow/tfx/blob/34bdbc8c0f7c2d0da36559c9cb7afd603e44a5e3/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_native_keras.py#L126\r\n\r\n", "Document the conversation:\r\n\r\nNow TFX moves the information in Execution to Context with type `node`. see code: https://github.com/tensorflow/tfx/blob/master/tfx/orchestration/metadata.py#L428-L435.\r\n\r\nKFP will consider the possibility to pulling context for TFX.", "@zijianjoy Was the TFX > 1.0.0 fix included in the KFP 1.7.0 release?\r\n", "@ConverJens I can confirm it has.\r\n\r\nTFX 1.2.0 and Pipelines 1.7.0 work perfectly with no patches.", "@vaskozl Great news, thank you for the information!", "Yes @ConverJens , it is as confirmed by @vaskozl .\r\n\r\nBTW, @Bobgy is working on a compatibility matrix for KFP and TFX (and more) which shows the working version combinations in the future.", "@zijianjoy Great! A compatibility matrix would be awsome!", "Hello @ConverJens , you can check out the compatibility matrix in https://www.kubeflow.org/docs/components/pipelines/installation/compatibility-matrix/ now.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Hi, there.\r\nI am using kfp v.1.5.1 with tfx v0.28.0. \r\nI faced the issue of loading ML metadata in the Runs page, although in the compatibility matrix it is \"Fully Compatible\".\r\nThat was because of the contextName inconsistency between what frontend wants to find and that stored in the MySQL. \r\nfrontend wanted to find something like \"my_pipeline.my-pipeline-xaew1\" (some hash is appended in the second part.), but what is stored in the MySQL is \"my-pipeline.my-pipeline-xaew1\", so error happened.\r\nSee: https://github.com/kubeflow/pipelines/blob/1.5.1/frontend/src/lib/MlmdUtils.ts#L66\r\n![image](https://user-images.githubusercontent.com/37242439/193180832-6746895e-a851-4b6c-84d6-15226d8d3ae1.png)\r\n![image](https://user-images.githubusercontent.com/37242439/193180913-154d8683-f3ef-4862-bc4f-e0cbbe1d2020.png)\r\nAfter changing it from .join('_') to .join('-'), it works.\r\n```\r\n const pipelineName = argoWorkflowName\r\n .split('-')\r\n .slice(0, -1)\r\n .join('-');\r\n```\r\n\r\nAnd it also had an issue with getKfpPod using KFP_POD_NAME, because kfpRun wasn't written into the DB.\r\nSee: \r\nhttps://github.com/kubeflow/pipelines/blob/1.5.1/frontend/src/lib/MlmdUtils.ts#L146\r\nTook a workaround, using POD_NAME, made it works. \r\n```\r\nexport enum KfpExecutionProperties {\r\n KFP_POD_NAME = 'kfp_pod_name',\r\n POD_NAME = 'pod_name',\r\n}\r\n...\r\n getKfpPod(execution: Execution): string | number | undefined {\r\n return (\r\n getResourceProperty(execution, KfpExecutionProperties.POD_NAME, true) ||\r\n getResourceProperty(execution, KfpExecutionProperties.KFP_POD_NAME) ||\r\n getResourceProperty(execution, KfpExecutionProperties.KFP_POD_NAME, true) ||\r\n undefined\r\n );\r\n },\r\n```\r\n![image](https://user-images.githubusercontent.com/37242439/193181625-2f9f2b23-a198-45c7-9356-d178b8a90cdd.png)\r\n" ]
2021-07-26T10:37:03
2022-09-30T03:29:40
null
CONTRIBUTOR
null
/kind bug I upgraded Kubeflow from 1.4.0 to 1.7.0-rc1 with the platnform-agnostic manifests. While I now see correct visualizations of statistics from runs that happened before upgrading to 1.7.0-rc1, new runs only display the markdown details. The TFX pipelines I submit are exactly the same. On the new runs the ML Metadata tab of the components prints: "Corresponding ML Metadata not found." Furthermore I don't see any new executions on the executions page despite running many pipelines since upgrading. I don't see anything special in the logs of the TFX pods except: ``` WARNING:absl:metadata_connection_config is not provided by IR. ``` But that was present before upgrading to 1.7.0-rc1. The only errors I see in the metadata-grpc-deployment pod is: ``` name: "sp-lstm-rh6xt" Internal: mysql_query failed: errno: 1062, error: Duplicate entry '48-sp-lstm-rh6xt' for key 'type_id' Cannot create node for type_id: 48 name: "sp-lstm-rh6xt" ``` Which I also think is normal? Basically I don't think executions and artifacts are getting written to the DB for some reason in 1.7.0-rc1. Not sure how to debug this. This causes the visualizations to not show up as far as I can see. Metadata in the TFX pipelines is configured via the get_default_kubeflow_metadata_config tfx.orchestration.kubeflow function. Environment: Kubeflow version: 1.4.0 -> 1.7.0-rc1 kfctl version: Not used. Using tfx.orchestration.kubeflow to submit pipelines. Kubernetes platform: Upstream kubeadm: k8s v1.20.5 Kubernetes version: (use kubectl version): OS (e.g. from /etc/os-release): Centos 8 Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6138/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6138/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6134
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6134/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6134/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6134/events
https://github.com/kubeflow/pipelines/issues/6134
952,460,879
MDU6SXNzdWU5NTI0NjA4Nzk=
6,134
[sdk] v2 compiler - duplicate component definitions when used twice
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619535, "node_id": "MDU6TGFiZWw5MzA2MTk1MzU=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/priority/p2", "name": "priority/p2", "color": "fc9915", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "neuromage", "id": 206520, "node_id": "MDQ6VXNlcjIwNjUyMA==", "avatar_url": "https://avatars.githubusercontent.com/u/206520?v=4", "gravatar_id": "", "url": "https://api.github.com/users/neuromage", "html_url": "https://github.com/neuromage", "followers_url": "https://api.github.com/users/neuromage/followers", "following_url": "https://api.github.com/users/neuromage/following{/other_user}", "gists_url": "https://api.github.com/users/neuromage/gists{/gist_id}", "starred_url": "https://api.github.com/users/neuromage/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/neuromage/subscriptions", "organizations_url": "https://api.github.com/users/neuromage/orgs", "repos_url": "https://api.github.com/users/neuromage/repos", "events_url": "https://api.github.com/users/neuromage/events{/privacy}", "received_events_url": "https://api.github.com/users/neuromage/received_events", "type": "User", "site_admin": false }, { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @chensun @neuromage ", "This is a known issues, and it was a regression we introduced when workaround the limitation that IR doesn't support optional inputs.\r\nI left a TODO in the code:\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/42e13ebc41c6d318eebe797047ccd0d83641c0d8/sdk/python/kfp/dsl/_component_bridge.py#L593\r\n\r\n", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-26T03:25:11
2022-03-03T02:05:14
null
CONTRIBUTOR
null
### Environment * KFP version: 1.7.0-rc.2 <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP SDK version: master <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> Compile this pipeline in v2 mode: ``` from kfp.v2 import dsl from kfp.v2 import components @components.create_component_from_func def hello_world(text: str): print(text) return text @dsl.pipeline(name='component-used-twice') def pipeline_component_used_twice(text: str = 'hi there'): task1 = hello_world(text) task2 = hello_world(text) ``` ### Expected result <!-- What should the correct behavior be? --> In generated pipeline spec JSON, hello_world component should be generated only once. But I got `comp-hello-world` and `comp-hello-world-2`, and `exec-hello-world`, `exec-hello-world-2`. ### Materials and Reference Here's the generated JSON pipeline spec: ``` { "pipelineSpec": { "components": { "comp-hello-world": { "executorLabel": "exec-hello-world", "inputDefinitions": { "parameters": { "text": { "type": "STRING" } } } }, "comp-hello-world-2": { "executorLabel": "exec-hello-world-2", "inputDefinitions": { "parameters": { "text": { "type": "STRING" } } } } }, "deploymentSpec": { "executors": { "exec-hello-world": { "container": { "args": [ "--text", "{{$.inputs.parameters['text']}}" ], "command": [ "sh", "-ec", "program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n", "def hello_world(text):\n print(text)\n return text\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Hello world', description='')\n_parser.add_argument(\"--text\", dest=\"text\", type=str, required=True, default=argparse.SUPPRESS)\n_parsed_args = vars(_parser.parse_args())\n\n_outputs = hello_world(**_parsed_args)\n" ], "image": "python:3.7" } }, "exec-hello-world-2": { "container": { "args": [ "--text", "{{$.inputs.parameters['text']}}" ], "command": [ "sh", "-ec", "program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n", "def hello_world(text):\n print(text)\n return text\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Hello world', description='')\n_parser.add_argument(\"--text\", dest=\"text\", type=str, required=True, default=argparse.SUPPRESS)\n_parsed_args = vars(_parser.parse_args())\n\n_outputs = hello_world(**_parsed_args)\n" ], "image": "python:3.7" } } } }, "pipelineInfo": { "name": "component-used-twice" }, "root": { "dag": { "tasks": { "hello-world": { "cachingOptions": { "enableCache": true }, "componentRef": { "name": "comp-hello-world" }, "inputs": { "parameters": { "text": { "componentInputParameter": "text" } } }, "taskInfo": { "name": "hello-world" } }, "hello-world-2": { "cachingOptions": { "enableCache": true }, "componentRef": { "name": "comp-hello-world-2" }, "inputs": { "parameters": { "text": { "componentInputParameter": "text" } } }, "taskInfo": { "name": "hello-world-2" } } } }, "inputDefinitions": { "parameters": { "text": { "type": "STRING" } } } }, "schemaVersion": "2.0.0", "sdkVersion": "kfp-1.6.6" }, "runtimeConfig": { "parameters": { "text": { "stringValue": "hi there" } } } } ``` <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6134/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6134/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6133
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6133/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6133/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6133/events
https://github.com/kubeflow/pipelines/issues/6133
952,455,983
MDU6SXNzdWU5NTI0NTU5ODM=
6,133
v2 compatible mode -- known caveats & breaking changes
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "Closing because v2 compatible is no longer supported, we are working on the complete KFP v2.\n\n/cc @chensun \nWe used to have this issue for v2 compatible, a lot of them are still unsolved for v2 engine mode as well." ]
2021-07-26T03:14:40
2023-01-18T08:21:42
2022-01-20T12:22:00
CONTRIBUTOR
null
This issue is a living tracker for known caveats & breaking changes of v2 compatible mode to facilitate visibility: The information is updated as KFP 1.7.0 Breaking changes: * https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility/#building-pipelines-using-the-kubeflow-pipelines-sdk-v2 * `command` must be specified in [Kubeflow Pipelines component specification](https://www.kubeflow.org/docs/components/pipelines/reference/component-spec/). * https://github.com/kubeflow/pipelines/issues/5834 * https://github.com/kubeflow/pipelines/issues/5711 * v2 compatible mode will require KFP backend 1.7.0+, but v1 mode does not have any prerequisites on KFP backend version. Known caveats: * [ ] Pipelines do not run on [the full Kubeflow] distributions with Multi-User Kubeflow Pipelines. [#5680] * [x] https://github.com/kubeflow/pipelines/issues/5831 * [ ] https://github.com/kubeflow/pipelines/issues/5673 * [x] https://github.com/kubeflow/pipelines/issues/6132 * [ ] https://github.com/kubeflow/pipelines/issues/5835 * [ ] v2 compatible mode and v1 mode have different perf / scalability characteristics, which we haven't verified in large scale. * [ ] Because there aren't enough usage, it's very likely there are more changes we haven't found/thought of. * [ ] When consuming an input artifact by URI, its file content is still downloaded to the container regardless. [#5671] * [ ] https://github.com/kubeflow/pipelines/issues/6429 Fixed caveats: * [x] https://github.com/kubeflow/pipelines/issues/6293 * [x] Pipelines using [v1 mlpipeline-ui-metadata visualization](/docs/components/pipelines/sdk/output-viewer/) do not compile. [#5666] * [x] UI integration is implemented. * [x] Caching is work-in-progress. [#5667] [the full Kubeflow]: /docs/components/pipelines/installation/overview/#full-kubeflow [#5680]: https://github.com/kubeflow/pipelines/issues/5680 [#5666]: https://github.com/kubeflow/pipelines/issues/5666 [#5671]: https://github.com/kubeflow/pipelines/issues/5671 [#5667]: https://github.com/kubeflow/pipelines/issues/5667
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6133/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6133/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6132
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6132/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6132/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6132/events
https://github.com/kubeflow/pipelines/issues/6132
952,446,102
MDU6SXNzdWU5NTI0NDYxMDI=
6,132
[v2compat] v1 metrics support
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
null
[]
null
[ "Hello, on our side this is one of the most important feature for v2. \r\nBeing able to run multiple pipelines and then easily compare the metrics of each pipeline in a single view is key in our workflow. \r\nWe loved that v1 offered this and we cannot wait for it to be available on v2. \r\nCheers!", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Closing as v2 compatible mode is no longer supported." ]
2021-07-26T02:54:20
2023-01-18T08:21:42
2023-01-18T08:21:42
CONTRIBUTOR
null
KFP v1 supports metrics feature https://www.kubeflow.org/docs/components/pipelines/sdk/pipelines-metrics/. V2 compatible mode does not yet support it, because supporting this feature is relatively hard. I'd like some feedback how important this feature is for you. Please thumbs up on the issue and probably explain your use-case to help us triage. ## Idea to support this feature, we need to update persistence agent, let it also parse MLMD data and extract the metrics artifacts and persist them in KFP DB.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6132/reactions", "total_count": 13, "+1": 13, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6132/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6130
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6130/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6130/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6130/events
https://github.com/kubeflow/pipelines/issues/6130
951,987,762
MDU6SXNzdWU5NTE5ODc3NjI=
6,130
[pH] broken build at buildGcpGenericComponent
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619511, "node_id": "MDU6TGFiZWw5MzA2MTk1MTE=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/priority/p0", "name": "priority/p0", "color": "db1203", "default": false, "description": "" } ]
closed
false
{ "login": "IronPan", "id": 2348602, "node_id": "MDQ6VXNlcjIzNDg2MDI=", "avatar_url": "https://avatars.githubusercontent.com/u/2348602?v=4", "gravatar_id": "", "url": "https://api.github.com/users/IronPan", "html_url": "https://github.com/IronPan", "followers_url": "https://api.github.com/users/IronPan/followers", "following_url": "https://api.github.com/users/IronPan/following{/other_user}", "gists_url": "https://api.github.com/users/IronPan/gists{/gist_id}", "starred_url": "https://api.github.com/users/IronPan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/IronPan/subscriptions", "organizations_url": "https://api.github.com/users/IronPan/orgs", "repos_url": "https://api.github.com/users/IronPan/repos", "events_url": "https://api.github.com/users/IronPan/events{/privacy}", "received_events_url": "https://api.github.com/users/IronPan/received_events", "type": "User", "site_admin": false }
[ { "login": "IronPan", "id": 2348602, "node_id": "MDQ6VXNlcjIzNDg2MDI=", "avatar_url": "https://avatars.githubusercontent.com/u/2348602?v=4", "gravatar_id": "", "url": "https://api.github.com/users/IronPan", "html_url": "https://github.com/IronPan", "followers_url": "https://api.github.com/users/IronPan/followers", "following_url": "https://api.github.com/users/IronPan/following{/other_user}", "gists_url": "https://api.github.com/users/IronPan/gists{/gist_id}", "starred_url": "https://api.github.com/users/IronPan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/IronPan/subscriptions", "organizations_url": "https://api.github.com/users/IronPan/orgs", "repos_url": "https://api.github.com/users/IronPan/repos", "events_url": "https://api.github.com/users/IronPan/events{/privacy}", "received_events_url": "https://api.github.com/users/IronPan/received_events", "type": "User", "site_admin": false }, { "login": "SinaChavoshi", "id": 20114005, "node_id": "MDQ6VXNlcjIwMTE0MDA1", "avatar_url": "https://avatars.githubusercontent.com/u/20114005?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SinaChavoshi", "html_url": "https://github.com/SinaChavoshi", "followers_url": "https://api.github.com/users/SinaChavoshi/followers", "following_url": "https://api.github.com/users/SinaChavoshi/following{/other_user}", "gists_url": "https://api.github.com/users/SinaChavoshi/gists{/gist_id}", "starred_url": "https://api.github.com/users/SinaChavoshi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SinaChavoshi/subscriptions", "organizations_url": "https://api.github.com/users/SinaChavoshi/orgs", "repos_url": "https://api.github.com/users/SinaChavoshi/repos", "events_url": "https://api.github.com/users/SinaChavoshi/events{/privacy}", "received_events_url": "https://api.github.com/users/SinaChavoshi/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @IronPan ", "/assign @SinaChavoshi ", "P0, because this breaks all postsubmit builds, thus blocking us to release", "Pasting error messages:\r\n\r\n```\r\nStep 4/9 : RUN python3 -m pip install -r requirements.txt --quiet --no-cache-dir && rm -f requirements.txt\r\n ---> Running in ac5c4843faba\r\n ERROR: Command errored out with exit status 1:\r\n command: /usr/local/bin/python3 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' bdist_wheel -d /tmp/pip-wheel-6ld2iia3\r\n cwd: /tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/\r\n```\r\n\r\nComplete logs\r\n```\r\ncreating build/temp.linux-x86_64-3.7\r\n creating build/temp.linux-x86_64-3.7/googlecloudprofiler\r\n creating build/temp.linux-x86_64-3.7/googlecloudprofiler/src\r\n gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -Igooglecloudprofiler/src -I/usr/local/include/python3.7m -c googlecloudprofiler/src/log.cc -o build/temp.linux-x86_64-3.7/googlecloudprofiler/src/log.o -std=c++11\r\n unable to execute 'gcc': No such file or directory\r\n error: command 'gcc' failed with exit status 1\r\n ----------------------------------------\r\n ERROR: Failed building wheel for google-cloud-profiler\r\n ERROR: Command errored out with exit status 1:\r\n command: /usr/local/bin/python3 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-4ynx33up/install-record.txt --single-version-externally-managed --compile --install-headers /usr/local/include/python3.7m/google-cloud-profiler\r\n cwd: /tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/\r\n Complete output (22 lines):\r\n running install\r\n running build\r\n running build_py\r\n creating build\r\n creating build/lib.linux-x86_64-3.7\r\n creating build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/cpu_profiler.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/profile_pb2.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/__version__.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/backoff.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/__init__.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/client.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/builder.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n copying googlecloudprofiler/pythonprofiler.py -> build/lib.linux-x86_64-3.7/googlecloudprofiler\r\n running build_ext\r\n building 'googlecloudprofiler._profiler' extension\r\n creating build/temp.linux-x86_64-3.7\r\n creating build/temp.linux-x86_64-3.7/googlecloudprofiler\r\n creating build/temp.linux-x86_64-3.7/googlecloudprofiler/src\r\n gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -Igooglecloudprofiler/src -I/usr/local/include/python3.7m -c googlecloudprofiler/src/log.cc -o build/temp.linux-x86_64-3.7/googlecloudprofiler/src/log.o -std=c++11\r\n unable to execute 'gcc': No such file or directory\r\n error: command 'gcc' failed with exit status 1\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: /usr/local/bin/python3 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-3gpukc09/google-cloud-profiler_717d01c8a0174005b25a65673c363257/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-4ynx33up/install-record.txt --single-version-externally-managed --compile --install-headers /usr/local/include/python3.7m/google-cloud-profiler Check the logs for full command output.\r\nThe command '/bin/sh -c python3 -m pip install -r requirements.txt --quiet --no-cache-dir && rm -f requirements.txt' returned a non-zero code: 1\r\n```", "Fixed by https://github.com/kubeflow/pipelines/pull/6141" ]
2021-07-24T03:08:00
2021-07-27T00:51:45
2021-07-27T00:51:45
CONTRIBUTOR
null
The first failure happened at https://github.com/kubeflow/pipelines/runs/3137853377, but the failure itself seems unrelated to the PR, so I think it's caused by some external dependency change.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6130/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6130/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6126
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6126/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6126/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6126/events
https://github.com/kubeflow/pipelines/issues/6126
951,898,493
MDU6SXNzdWU5NTE4OTg0OTM=
6,126
Create KFP v2 DAG POC.
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "POC is available at this branch\r\n\r\nhttps://github.com/zijianjoy/pipelines/tree/dag-poc" ]
2021-07-23T21:10:00
2021-08-01T06:33:11
2021-08-01T06:33:11
COLLABORATOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6126/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6126/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6121
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6121/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6121/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6121/events
https://github.com/kubeflow/pipelines/issues/6121
951,707,276
MDU6SXNzdWU5NTE3MDcyNzY=
6,121
ML-Pipelines API Server and Metadata Writer in CrashLoopBackoff
{ "login": "ReggieCarey", "id": 10270182, "node_id": "MDQ6VXNlcjEwMjcwMTgy", "avatar_url": "https://avatars.githubusercontent.com/u/10270182?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ReggieCarey", "html_url": "https://github.com/ReggieCarey", "followers_url": "https://api.github.com/users/ReggieCarey/followers", "following_url": "https://api.github.com/users/ReggieCarey/following{/other_user}", "gists_url": "https://api.github.com/users/ReggieCarey/gists{/gist_id}", "starred_url": "https://api.github.com/users/ReggieCarey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ReggieCarey/subscriptions", "organizations_url": "https://api.github.com/users/ReggieCarey/orgs", "repos_url": "https://api.github.com/users/ReggieCarey/repos", "events_url": "https://api.github.com/users/ReggieCarey/events{/privacy}", "received_events_url": "https://api.github.com/users/ReggieCarey/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @ReggieCarey!\r\nCan you give output of `kubectl logs ml-pipeline-9b68d49cb-x67mp ml-pipeline-api-server --previous`? that should include logs from last failed container.", "For metadata-writer, what is status of `metadata-grpc-server`? metadata-writer fails to connect to the store", "Thanks Yuan Gong,\r\n\r\nAs per request:\r\n```bash\r\nThu Aug 05 15:43:51 $ kubectl logs ml-pipeline-9b68d49cb-6z2tq ml-pipeline-api-server --previous\r\nI0806 00:55:18.818889 7 client_manager.go:154] Initializing client manager\r\nI0806 00:55:18.818963 7 config.go:57] Config DBConfig.ExtraParams not specified, skipping\r\n```\r\nSadly, that's it.\r\n\r\nReggie", "For metadata-writer issue: The status of metadata-grpc-deployment is stable(ish). It has restarted 533 times.\r\n\r\nHere is output from \"describe\", the logs for \"container\" are empty\r\n\r\n```\r\nThu Aug 05 15:42:03 $ kubectl describe pod metadata-grpc-deployment-c8f784fdf-hdvgr \r\nmetacontroller-0\r\nmetadata-envoy-deployment-6756c995c9-fl7gb\r\nmetadata-grpc-deployment-c8f784fdf-hdvgr\r\nmetadata-writer-6bf5cfd7d8-v7fzb\r\nThu Aug 05 15:42:03 $ kubectl describe pod metadata-grpc-deployment-c8f784fdf-hdvgr \r\nName: metadata-grpc-deployment-c8f784fdf-hdvgr\r\nNamespace: kubeflow\r\nPriority: 0\r\nNode: cpu-compute-05/10.164.208.183\r\nStart Time: Tue, 20 Jul 2021 21:27:57 -0400\r\nLabels: application-crd-id=kubeflow-pipelines\r\n component=metadata-grpc-server\r\n istio.io/rev=default\r\n pod-template-hash=c8f784fdf\r\n security.istio.io/tlsMode=istio\r\n service.istio.io/canonical-name=metadata-grpc-deployment\r\n service.istio.io/canonical-revision=latest\r\nAnnotations: kubectl.kubernetes.io/default-logs-container: container\r\n prometheus.io/path: /stats/prometheus\r\n prometheus.io/port: 15020\r\n prometheus.io/scrape: true\r\n sidecar.istio.io/status:\r\n {\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-data\",\"istio-podinfo\",\"istio-token\",\"istiod-...\r\nStatus: Running\r\nIP: 10.0.0.135\r\nIPs:\r\n IP: 10.0.0.135\r\nControlled By: ReplicaSet/metadata-grpc-deployment-c8f784fdf\r\nInit Containers:\r\n istio-init:\r\n Container ID: docker://b6c7ccd562a209578a835ec5f5a0b3799e150f2c3be4e428be03788591c707e7\r\n Image: docker.io/istio/proxyv2:1.9.0\r\n Image ID: docker-pullable://istio/proxyv2@sha256:286b821197d7a9233d1d889119f090cd9a9394468d3a312f66ea24f6e16b2294\r\n Port: <none>\r\n Host Port: <none>\r\n Args:\r\n istio-iptables\r\n -p\r\n 15001\r\n -z\r\n 15006\r\n -u\r\n 1337\r\n -m\r\n REDIRECT\r\n -i\r\n *\r\n -x\r\n \r\n -b\r\n *\r\n -d\r\n 15090,15021,15020\r\n State: Terminated\r\n Reason: Completed\r\n Exit Code: 0\r\n Started: Thu, 29 Jul 2021 18:15:54 -0400\r\n Finished: Thu, 29 Jul 2021 18:15:54 -0400\r\n Ready: True\r\n Restart Count: 0\r\n Limits:\r\n cpu: 2\r\n memory: 1Gi\r\n Requests:\r\n cpu: 10m\r\n memory: 40Mi\r\n Environment: <none>\r\n Mounts:\r\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4qdbv (ro)\r\nContainers:\r\n container:\r\n Container ID: docker://b5c4b93b8034fdfed2181814fd9b37ff9fe2b154cf3163a988bc4ab4f29e202c\r\n Image: gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1\r\n Image ID: docker-pullable://gcr.io/tfx-oss-public/ml_metadata_store_server@sha256:01691247116fe048e0761ae8033efaad3ddd82438d0198f2235afb37c1757d48\r\n Port: 8080/TCP\r\n Host Port: 0/TCP\r\n Command:\r\n /bin/metadata_store_server\r\n Args:\r\n --grpc_port=8080\r\n --mysql_config_database=$(MYSQL_DATABASE)\r\n --mysql_config_host=$(MYSQL_HOST)\r\n --mysql_config_port=$(MYSQL_PORT)\r\n --mysql_config_user=$(DBCONFIG_USER)\r\n --mysql_config_password=$(DBCONFIG_PASSWORD)\r\n --enable_database_upgrade=true\r\n State: Running\r\n Started: Thu, 05 Aug 2021 20:36:26 -0400\r\n Last State: Terminated\r\n Reason: Error\r\n Exit Code: 139\r\n Started: Thu, 05 Aug 2021 19:36:25 -0400\r\n Finished: Thu, 05 Aug 2021 20:36:25 -0400\r\n Ready: True\r\n Restart Count: 532\r\n Liveness: tcp-socket :grpc-api delay=3s timeout=2s period=5s #success=1 #failure=3\r\n Readiness: tcp-socket :grpc-api delay=3s timeout=2s period=5s #success=1 #failure=3\r\n Environment:\r\n DBCONFIG_USER: <set to the key 'username' in secret 'mysql-secret'> Optional: false\r\n DBCONFIG_PASSWORD: <set to the key 'password' in secret 'mysql-secret'> Optional: false\r\n MYSQL_DATABASE: <set to the key 'mlmdDb' of config map 'pipeline-install-config'> Optional: false\r\n MYSQL_HOST: <set to the key 'dbHost' of config map 'pipeline-install-config'> Optional: false\r\n MYSQL_PORT: <set to the key 'dbPort' of config map 'pipeline-install-config'> Optional: false\r\n Mounts:\r\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4qdbv (ro)\r\n istio-proxy:\r\n Container ID: docker://82b1c17880c0ec145d97cd26ca16526552122e7362e3fe4a0956555ebd63635b\r\n Image: docker.io/istio/proxyv2:1.9.0\r\n Image ID: docker-pullable://istio/proxyv2@sha256:286b821197d7a9233d1d889119f090cd9a9394468d3a312f66ea24f6e16b2294\r\n Port: 15090/TCP\r\n Host Port: 0/TCP\r\n Args:\r\n proxy\r\n sidecar\r\n --domain\r\n $(POD_NAMESPACE).svc.cluster.local\r\n --serviceCluster\r\n metadata-grpc-deployment.kubeflow\r\n --proxyLogLevel=warning\r\n --proxyComponentLogLevel=misc:error\r\n --log_output_level=default:info\r\n --concurrency\r\n 2\r\n State: Running\r\n Started: Thu, 29 Jul 2021 18:16:00 -0400\r\n Last State: Terminated\r\n Reason: Error\r\n Exit Code: 255\r\n Started: Tue, 20 Jul 2021 21:28:03 -0400\r\n Finished: Thu, 29 Jul 2021 17:51:17 -0400\r\n Ready: True\r\n Restart Count: 1\r\n Limits:\r\n cpu: 2\r\n memory: 1Gi\r\n Requests:\r\n cpu: 10m\r\n memory: 40Mi\r\n Readiness: http-get http://:15021/healthz/ready delay=1s timeout=3s period=2s #success=1 #failure=30\r\n Environment:\r\n JWT_POLICY: third-party-jwt\r\n PILOT_CERT_PROVIDER: istiod\r\n CA_ADDR: istiod.istio-system.svc:15012\r\n POD_NAME: metadata-grpc-deployment-c8f784fdf-hdvgr (v1:metadata.name)\r\n POD_NAMESPACE: kubeflow (v1:metadata.namespace)\r\n INSTANCE_IP: (v1:status.podIP)\r\n SERVICE_ACCOUNT: (v1:spec.serviceAccountName)\r\n HOST_IP: (v1:status.hostIP)\r\n CANONICAL_SERVICE: (v1:metadata.labels['service.istio.io/canonical-name'])\r\n CANONICAL_REVISION: (v1:metadata.labels['service.istio.io/canonical-revision'])\r\n PROXY_CONFIG: {}\r\n \r\n ISTIO_META_POD_PORTS: [\r\n {\"name\":\"grpc-api\",\"containerPort\":8080,\"protocol\":\"TCP\"}\r\n ]\r\n ISTIO_META_APP_CONTAINERS: container\r\n ISTIO_META_CLUSTER_ID: Kubernetes\r\n ISTIO_META_INTERCEPTION_MODE: REDIRECT\r\n ISTIO_META_WORKLOAD_NAME: metadata-grpc-deployment\r\n ISTIO_META_OWNER: kubernetes://apis/apps/v1/namespaces/kubeflow/deployments/metadata-grpc-deployment\r\n ISTIO_META_MESH_ID: cluster.local\r\n TRUST_DOMAIN: cluster.local\r\n Mounts:\r\n /etc/istio/pod from istio-podinfo (rw)\r\n /etc/istio/proxy from istio-envoy (rw)\r\n /var/lib/istio/data from istio-data (rw)\r\n /var/run/secrets/istio from istiod-ca-cert (rw)\r\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4qdbv (ro)\r\n /var/run/secrets/tokens from istio-token (rw)\r\nConditions:\r\n Type Status\r\n Initialized True \r\n Ready True \r\n ContainersReady True \r\n PodScheduled True \r\nVolumes:\r\n istio-envoy:\r\n Type: EmptyDir (a temporary directory that shares a pod's lifetime)\r\n Medium: Memory\r\n SizeLimit: <unset>\r\n istio-data:\r\n Type: EmptyDir (a temporary directory that shares a pod's lifetime)\r\n Medium: \r\n SizeLimit: <unset>\r\n istio-podinfo:\r\n Type: DownwardAPI (a volume populated by information about the pod)\r\n Items:\r\n metadata.labels -> labels\r\n metadata.annotations -> annotations\r\n limits.cpu -> cpu-limit\r\n requests.cpu -> cpu-request\r\n istio-token:\r\n Type: Projected (a volume that contains injected data from multiple sources)\r\n TokenExpirationSeconds: 43200\r\n istiod-ca-cert:\r\n Type: ConfigMap (a volume populated by a ConfigMap)\r\n Name: istio-ca-root-cert\r\n Optional: false\r\n kube-api-access-4qdbv:\r\n Type: Projected (a volume that contains injected data from multiple sources)\r\n TokenExpirationSeconds: 3607\r\n ConfigMapName: kube-root-ca.crt\r\n ConfigMapOptional: <nil>\r\n DownwardAPI: true\r\nQoS Class: Burstable\r\nNode-Selectors: <none>\r\nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\r\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal Pulled 31m (x350 over 7d2h) kubelet Container image \"gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1\" already present on machine\r\n Normal Created 31m (x350 over 7d2h) kubelet Created container container\r\n Normal Started 31m (x350 over 7d2h) kubelet Started container container\r\n```\r\n", "Both servers are part of the istio service mesh. In the past, the mysql process was implicated. That process remains up and stable with 0 restarts.\r\n\r\n<img width=\"1052\" alt=\"image\" src=\"https://user-images.githubusercontent.com/10270182/128441376-53624383-92a0-45d4-bf59-83927864bebd.png\">\r\n\r\nBoth pods go to a state of Running then fail but they are not synchronized in this failure.\r\n\r\nI should also add additional info that cache-server-* has restarted some 132 times in 15 days.\r\n```\r\n[mysql] 2021/08/05 17:34:16 packets.go:36: unexpected EOF\r\n[mysql] 2021/08/05 18:34:16 packets.go:36: unexpected EOF\r\n[mysql] 2021/08/05 19:34:16 packets.go:36: unexpected EOF\r\n[mysql] 2021/08/05 20:34:16 packets.go:36: unexpected EOF\r\nF0805 20:34:16.890285 1 error.go:325] driver: bad connection\r\ngoroutine 1 [running]:\r\ngithub.com/golang/glog.stacks(0xc00043e300, 0xc000168000, 0x43, 0x96)\r\n\t/go/pkg/mod/github.com/golang/glog@v0.0.0-20160126235308-23def4e6c14b/glog.go:769 +0xb8\r\ngithub.com/golang/glog.(*loggingT).output(0x28daaa0, 0xc000000003, 0xc000412000, 0x280d034, 0x8, 0x145, 0x0)\r\n\t/go/pkg/mod/github.com/golang/glog@v0.0.0-20160126235308-23def4e6c14b/glog.go:720 +0x372\r\ngithub.com/golang/glog.(*loggingT).printf(0x28daaa0, 0x3, 0x199ee22, 0x2, 0xc0002f38d8, 0x1, 0x1)\r\n\t/go/pkg/mod/github.com/golang/glog@v0.0.0-20160126235308-23def4e6c14b/glog.go:655 +0x14b\r\ngithub.com/golang/glog.Fatalf(...)\r\n\t/go/pkg/mod/github.com/golang/glog@v0.0.0-20160126235308-23def4e6c14b/glog.go:1148\r\ngithub.com/kubeflow/pipelines/backend/src/common/util.TerminateIfError(...)\r\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:325\r\nmain.initMysql(0x7fffe3a0c170, 0x5, 0x7fffe3a0c180, 0x5, 0x7fffe3a0c190, 0x4, 0x7fffe3a0c19f, 0x7, 0x7fffe3a0c1b1, 0x4, ...)\r\n\t/go/src/github.com/kubeflow/pipelines/backend/src/cache/client_manager.go:157 +0x4f1\r\nmain.initDBClient(0x7fffe3a0c170, 0x5, 0x7fffe3a0c180, 0x5, 0x7fffe3a0c190, 0x4, 0x7fffe3a0c19f, 0x7, 0x7fffe3a0c1b1, 0x4, ...)\r\n\t/go/src/github.com/kubeflow/pipelines/backend/src/cache/client_manager.go:71 +0x681\r\nmain.(*ClientManager).init(0xc0002f3e98, 0x7fffe3a0c170, 0x5, 0x7fffe3a0c180, 0x5, 0x7fffe3a0c190, 0x4, 0x7fffe3a0c19f, 0x7, 0x7fffe3a0c1b1, ...)\r\n\t/go/src/github.com/kubeflow/pipelines/backend/src/cache/client_manager.go:57 +0x80\r\nmain.NewClientManager(...)\r\n\t/go/src/github.com/kubeflow/pipelines/backend/src/cache/client_manager.go:169\r\nmain.main()\r\n\t/go/src/github.com/kubeflow/pipelines/backend/src/cache/main.go:77 +0x66e\r\n```\r\n\r\nMaybe this is an issue with istio and service mesh configuration on bare metal.\r\n\r\nIn past incarnations of this bug, there was a repair offered up to establish PeerAuthentication - this resource does not exist in my cluster. The old suggestion was to apply:\r\n```yaml\r\napiVersion: security.istio.io/v1beta1\r\nkind: PeerAuthentication\r\nmetadata:\r\n name: \"default\"\r\nspec:\r\n mtls:\r\n mode: STRICT\r\n```\r\n\r\nI do not know if this is still applicable in the the Istio 1.9.0 world.\r\n\r\n", "Any progress or ideas or things to try? KFP probably represents one of the most beneficial parts of Kubeflow 1.3.0 to us. As of now all I have is Jupiter Notebooks with a Kubeflow Dashboard wrapper. I can't use KFP at all.", "It's been 17 days and I have not heard any movement on this bug. I really want to get this resolved. I have switched my ISTIO service mesh to require mTLS (STRICT) everywhere. I have verified that the processes listed here as well as the mysql process are all within the service mesh - as evidenced by istio-proxy and istio-init being injected into the three pods.\r\n\r\nThis really does appear to be a problem with access to the mysql store.\r\n\r\nMy next experiments will be to stand up an ubuntu container with mysql client in the kubeflow namespace. From there I hope to be able to validate connectivity.\r\n\r\nI can see two outcomes:\r\n\r\n1) Connectivity works\r\n2) Connectivity fails\r\n\r\nIn both cases the next step is still : What do I do given this additional information? \r\n\r\nAs an FYI, the mysql process' istio-proxy shows the following in the logs:\r\n```\r\n2021-08-26T15:15:46.314059Z\tinfo\txdsproxy\tconnected to upstream XDS server: istiod.istio-system.svc:15012\r\n2021-08-26T15:44:53.280933Z\twarning\tenvoy config\tStreamAggregatedResources gRPC config stream closed: 0, \r\n2021-08-26T15:44:53.515221Z\tinfo\txdsproxy\tconnected to upstream XDS server: istiod.istio-system.svc:15012\r\n2021-08-26T16:12:54.444630Z\twarning\tenvoy config\tStreamAggregatedResources gRPC config stream closed: 0, \r\n2021-08-26T16:12:54.806062Z\tinfo\txdsproxy\tconnected to upstream XDS server: istiod.istio-system.svc:15012\r\n2021-08-26T16:45:50.518921Z\twarning\tenvoy config\tStreamAggregatedResources gRPC config stream closed: 0, \r\n2021-08-26T16:45:50.786400Z\tinfo\txdsproxy\tconnected to upstream XDS server: istiod.istio-system.svc:15012\r\n```\r\n", "UPDATE:\r\n\r\nWas able to get to partial success:\r\n\r\nWhat I did was to edit KubeFlow/v1.3.0/manifests/apps/pipeline/upstream/third-party/mysql/base/mysql-deployment.yaml\r\nAdd in:\r\n```\r\nspec:\r\n template:\r\n metadata:\r\n annotations:\r\n sidecar.istio.io/inject: \"false\"\r\n```\r\n\r\n(NOTE: I also changed to use image: mysql:8 - but I don't think this is the issue)\r\n\r\nAnd then, I underplayed and redeployed the KFP - I know I could have just applied the changes.\r\n```\r\n$ kustomize build apps/pipeline/upstream/env/platform-agnostic-multi-user | kubectl delete -f -\r\n$ kustomize build apps/pipeline/upstream/env/platform-agnostic-multi-user | kubectl apply -f -\r\n```\r\n\r\nThe downside is that metadata-grpc-deployment and metadata-writer now fail.\r\n\r\nfor metadata-grpc-deployment, the log reads:\r\n```\r\n2021-08-26 21:43:31.894833: F ml_metadata/metadata_store/metadata_store_server_main.cc:220] Non-OK-status: status status: Internal: mysql_real_connect failed: errno: 2059, error: Plugin caching_sha2_password could not be loaded: lib/mariadb/plugin/caching_sha2_password.so: cannot open shared object file: No such file or directoryMetadataStore cannot be created with the given connection config.\r\n```\r\n\r\nfor metadata-writer, the logs read:\r\n```\r\nFailed to access the Metadata store. Exception: \"no healthy upstream\"\r\n```\r\n\r\nNext I tried to use \"platform-agnostic-multi-user-legacy\"...\r\n\r\n```\r\n$ kustomize build apps/pipeline/upstream/env/platform-agnostic-multi-user-legacy | kubectl apply -f -\r\n```\r\n\r\nAnd all processes are now running - except this now shows up:\r\n\r\n![image](https://user-images.githubusercontent.com/10270182/131041900-6c40173c-6178-45c7-bc2b-f3695976ab7b.png)\r\n\r\n\r\nAgain: Any suggestions and assistance is highly appreciated.", "I can confirm that this is issue is seen with KF 1.3.1 too. The bug is very annoying as KFP remains inaccessible. ", "Ohh, sorry this fell through the cracks. Let me take a look tmr.", "/assign @zijianjoy \nThis looks like an Istio compatibility issue.", "When you disable sidecar injection, also find all the destination rules and delete the destination rule for mysql. Otherwise, all other clients will fail to access MySQL assuming mTLS is turned on.\n\nEdit: this is a workaround by pulling MySQL out of the mesh.", "If you want MySQL in the mesh, you need to check Istio documentation for troubleshooting instructions. I agree Istio is very hard to troubleshoot, got the same frustration when configuring these up.", "I had the similar problem on metadatawriter, after the pod of deployment `metadata-grpc-deployment` has envoy sidecar, the metadatawriter stopped the crashloopbackoff.\r\n\r\nso please check any component in `kubeflow` ns has required envoy sidecar, especially when you enforced the strict mTLS via peerAuthentication.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "I also encountered this issue. \r\nMy env:\r\nk8s: 1.19.16\r\nkubeflow manifests: v1.5.1 tag\r\nkustomize: 3.2.0\r\n\r\ninstall command:\r\n```\r\nwhile ! kustomize build example | kubectl apply -f -; do echo \"Retrying to apply resources\"; sleep 10; done\r\n```\r\n\r\nmetadata-grpc-deployment-6f6f7776c5-pq6hq log:\r\n```\r\nWARNING: Logging before InitGoogleLogging() is written to STDERR\r\nF0321 09:45:37.213575 1 metadata_store_server_main.cc:236] Check failed: absl::OkStatus() == status (OK vs. INTERNAL: mysql_real_connect failed: errno: , error: [mysql-error-info='']) MetadataStore cannot be created with the given connection config.\r\n*** Check failure stack trace: ***\r\n```\r\n\r\nmysql-f7b9b7dd4-5m4tb logs:\r\n```\r\n2023-03-21 09:47:24+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 5.7.33-1debian10 started.\r\n2023-03-21 09:47:27+00:00 [ERROR] [Entrypoint]: mysqld failed while attempting to check config\r\n command was: mysqld --ignore-db-dir=lost+found --datadir /var/lib/mysql --verbose --help --log-bin-index=/tmp/tmp.fbqLrtywDf\r\n\r\n```\r\n\r\nmetadata-writer-d7ff8d4bc-8thjn logs:\r\n```\r\nFailed to access the Metadata store. Exception: \"no healthy upstream\"\r\nFailed to access the Metadata store. Exception: \"no healthy upstream\"\r\nFailed to access the Metadata store. Exception: \"no healthy upstream\"\r\nTraceback (most recent call last):\r\n File \"/kfp/metadata_writer/metadata_writer.py\", line 63, in <module>\r\n mlmd_store = connect_to_mlmd()\r\n File \"/kfp/metadata_writer/metadata_helpers.py\", line 62, in connect_to_mlmd\r\n raise RuntimeError('Could not connect to the Metadata store.')\r\nRuntimeError: Could not connect to the Metadata store.\r\n```\r\n\r\n\r\nAnd I tried k8s 1.21.2, still has the same problem.", "NOTE: Everything I wrote below is totally not useful to this thread about cloud deployments. I just picked this issue out of the five I was looking at. I seriously thought it was contextually appropriate but it is not. The below information may be useful to you, but it was put here on accident essentially. I also think it's useful info and I don't want to just delete it. I will move what I've said below into it's own issue tomorrow. \r\n\r\n-------\r\n\r\nI have this problem on some systems and believe it is because of the max_user_{instance,watches} settings. Which can be fixed by setting this. \r\n\r\n```\r\nsudo sysctl fs.inotify.max_user_instances=1280 \r\nsudo sysctl fs.inotify.max_user_watches=655360\r\n```\r\n\r\nI have found that it should be set before I deploy the cluster and kubeflow but if a cluster is running you can kill the pods that get hung but in my experience that has been a waste of time compared to just starting over from a clean slate. \r\n\r\nI also had this issue on a machine that was running off of an SSD booted over USB-C and setting the max_user_* didn't fix it. I still don't really \"know\" why I couldn't get that system to work but basically I think this problem amounts to any situation where the disk throughput gets maxed out. \r\n\r\nKubeflow is a massive beast that does a LOT of stuff and on many machines you will hit limits where it can't function and it usually looks like what you've posted here. I know that's not the most technical solution to an open issue, but the problem is kind of squishy and I think can occur for multiple reasons. ", "I believe this problem is consistently produced by following the Charmed Kubeflow documentation on a fresh Ubuntu 22.04 or 20.04 system. I would be surprised if anyone can follow the current installation instructions with any success as the range of systems I'm testing on right now is some pretty high end server stuff and a couple consumer sort of desktop machines. \r\n\r\nNot that this likely matters but I am doing GPU stuff so I've enabled GPU with microk8s. I can easily pass validation and push up a container to do `nvidia-smi` so i don't think that would be causing the issue. I mention this because one of my systems had an infoROM issue on one GPU for a while and if I didn't disable it containers would fail to launch and the failures would look a lot like this issue sometimes (depending on a lot of factors). If anyone has an infoROM issue on a GPU, when you search for that Nvidia's official folks tell you it's unserviceable which is untrue. You just have to use `nvflash` a windows tool and you can't download it directly from nvidia for some reason it's only available from some websites that IMO look pretty sketchy. Use at your own risk.\r\n\r\nSince I do have a running microk8s 1.7 kubeflow setup, I think I should be able to solve this and commit back but I'm pretty certain the current Charmed Kubeflow setup instructions are very broken. \r\n\r\n", "The numbers I used above were not sufficient to solve the `too many open files` problem. I just ripped them from this issue and at one point a couple months ago, they did work for me. I would be very interested to know what changed in the packages that made the instances/watches requirements increase but I'm very much too busy to formally care. Here's the issue I got these numbers from. \r\n\r\nhttps://github.com/kubeflow/manifests/issues/2087\r\n\r\nI have no idea how to calculate what to raise the numbers to or the consequences of raising the limit too high. I just upped the largest digit by one. \r\n\r\n```\r\nsudo sysctl fs.inotify.max_user_instances=2280 \r\nsudo sysctl fs.inotify.max_user_watches=755360\r\n```\r\n\r\nAnd I totally forgot about this other issue I've had. GVFS backends! There is a client that runs on a default Ubuntu Desktop install for each user that's logged in. It is monitoring the disk and when you fire up Kubeflow it flips out and takes up 120-200% of the cpu for each user. I do not need this tool for my deployments. I'm unsure if this is a problem with a barebones server install but this is critical to solving the problem of launching kubeflow on a fresh installed Ubuntu Desktop 20.04/22.04 system.\r\n\r\n`sudo apt-get remove gvfs-backends -y`\r\n\r\nHooray. I think this solves a reproducibility problem I've had for over a month. I haven't quite figured out if there are licensing issues with distributing this, but I've built an ubuntu LiveCD with CUDA, docker, and kubeflow (microk8s). I'll ping folks on slack and see if there's any interest in it. I've got a solid pipeline for doing the CI/CD for releases and man, the little things to get going are really a big barrier. \r\n\r\nIt is very possible that the max_user_* doesn't need to be raised so high if the `gvfs-backends` are removed from the start but I will not be trying to figure that out explicitly in the near term.", "Also `tensorboard-controller` was failing because the ingress wasn't coming up correctly. That has been covered in a ton of issues. \r\n\r\nhttps://discourse.charmhub.io/t/charmed-kubeflow-upgrade-error/7007/7\r\n\r\n`juju run --unit istio-pilot/0 -- \"export JUJU_DISPATCH_PATH=hooks/config-changed; ./dispatch\"`\r\n\r\nSolves that. I've tested this on multiple machines now and raising the max_user_* limits, uninstalling gvfs-backends, and fixing the ingress with the above command solves all of the problems consistently. I'm working on a Zero to Kubeflow tutorial, but I'll submit a PR for the Charmed Kubeflow instructions that covers these things if someone can point me at where to submit it.\r\n\r\nI am realizing after some review that in this situation and the other issues I've read relating to a similar failure, most people are running in the cloud and not on bare metal. I do think the gist of what I've pointed out is still valid though but on this thread what I've posted is just not directly useful. It's squishy. These problems usually are related to disk throughput but that gets weird to sort in the cloud. Anyway.... All of what I said above has nothing to do with this issue I am realizing. Sorry for posting all this in the wrong place. I don't know where else to put all this info, so I'm going to leave it here for now. \r\n\r\nI'll collect it and put it into it's own issue tomorrow and remove it from this thread. Sorry if this confused anyone. It's been a long day.", "If you are using an external MySQL database (especially if its MySQL 8.0), you are likely experiencing this issue around support for `caching_sha2_password` authentication, see here for more:\r\n- https://dev.mysql.com/blog-archive/upgrading-to-mysql-8-0-default-authentication-plugin-considerations/\r\n\r\nFYI, Kubeflow Pipelines itself[ fixed MySQL 8.0 and `caching_sha2_password` support](https://github.com/kubeflow/pipelines/pull/8351) in `2.0.0-alpha.7` (ships with Kubeflow 1.7), but there is still an issue with the upstream `ml-metadata` project:\r\n- https://github.com/google/ml-metadata/issues/178", "I had the same problem. For me it was a cilium networking provider compatibility issue. I had to move to kubenet and it worked.\r\n", "> \r\n\r\nCan you elaborate the compatibility issue please? As I'm using cilium as well." ]
2021-07-23T16:05:49
2023-08-17T11:28:08
null
NONE
null
### What steps did you take I deployed Kubeflow 1.3 by using the manifests approach. I then repaired an issue with dex running on K8s v1.21 ### What happened: The installation succeeded. All processes started up except the two. Both Metadata writer and ml-pipeline crash constantly and are restarted. ML-Pipeline always reports 1 of 2 running. Metadata-writer sometimes appears to be fully running then fails. No other kubeflow pods are having problems like this - even the mysql pod seems stable. I can only assume the failure of the metadata writer is due to a continued failure in ml-pipeline api-server. The pod keeps getting terminated by something with a reason code of 137. See last image provided for details on the cycle time. ### What did you expect to happen: I expect that the pipeline tools install and operate normally. This has been a consistent problem going back to KF 1.1 with no adequate resolution ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> I use the kubeflow 1.3 manifests deployment approach * KFP version: <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> This install is via the kubeflow 1.3. * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> NOT APPLICABLE ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> kubectl logs ml-pipeline-9b68d49cb-x67mp ml-pipeline-api-server ``` I0723 15:20:24.692579 8 client_manager.go:154] Initializing client manager I0723 15:20:24.692646 8 config.go:57] Config DBConfig.ExtraParams not specified, skipping ``` kubectl describe pod ml-pipeline-9b68d49cb-x67mp ``` Name: ml-pipeline-9b68d49cb-x67mp Namespace: kubeflow Priority: 0 Node: cpu-compute-09/10.164.208.67 Start Time: Tue, 20 Jul 2021 21:27:57 -0400 Labels: app=ml-pipeline app.kubernetes.io/component=ml-pipeline app.kubernetes.io/name=kubeflow-pipelines application-crd-id=kubeflow-pipelines istio.io/rev=default pod-template-hash=9b68d49cb security.istio.io/tlsMode=istio service.istio.io/canonical-name=kubeflow-pipelines service.istio.io/canonical-revision=latest Annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: true kubectl.kubernetes.io/default-logs-container: ml-pipeline-api-server prometheus.io/path: /stats/prometheus prometheus.io/port: 15020 prometheus.io/scrape: true sidecar.istio.io/status: {"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-... Status: Running IP: 10.0.4.21 IPs: IP: 10.0.4.21 Controlled By: ReplicaSet/ml-pipeline-9b68d49cb Init Containers: istio-init: Container ID: docker://db62120288183c6d962e0bfb60db7780fa7bb8c9e231bc9f48976a10c1b29587 Image: docker.io/istio/proxyv2:1.9.0 Image ID: docker-pullable://istio/proxyv2@sha256:286b821197d7a9233d1d889119f090cd9a9394468d3a312f66ea24f6e16b2294 Port: <none> Host Port: <none> Args: istio-iptables -p 15001 -z 15006 -u 1337 -m REDIRECT -i * -x -b * -d 15090,15021,15020 State: Terminated Reason: Completed Exit Code: 0 Started: Tue, 20 Jul 2021 21:28:19 -0400 Finished: Tue, 20 Jul 2021 21:28:19 -0400 Ready: True Restart Count: 0 Limits: cpu: 2 memory: 1Gi Requests: cpu: 10m memory: 40Mi Environment: <none> Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8csrh (ro) Containers: ml-pipeline-api-server: Container ID: docker://0a4b0d31179f67cc38ddb5ebb8eb31b32344c80fe9e4789ef20c073b02c5335b Image: gcr.io/ml-pipeline/api-server:1.5.0 Image ID: docker-pullable://gcr.io/ml-pipeline/api-server@sha256:0d90705712e201ca7102336e4bd6ff794e7f76facdac2c6e82134294706d78ca Ports: 8888/TCP, 8887/TCP Host Ports: 0/TCP, 0/TCP State: Waiting Reason: CrashLoopBackOff Last State: Terminated Reason: Error Exit Code: 137 Started: Fri, 23 Jul 2021 11:13:49 -0400 Finished: Fri, 23 Jul 2021 11:14:34 -0400 Ready: False Restart Count: 1117 Requests: cpu: 250m memory: 500Mi Liveness: exec [wget -q -S -O - http://localhost:8888/apis/v1beta1/healthz] delay=3s timeout=2s period=5s #success=1 #failure=3 Readiness: exec [wget -q -S -O - http://localhost:8888/apis/v1beta1/healthz] delay=3s timeout=2s period=5s #success=1 #failure=3 Environment Variables from: pipeline-api-server-config-dc9hkg52h6 ConfigMap Optional: false Environment: KUBEFLOW_USERID_HEADER: kubeflow-userid KUBEFLOW_USERID_PREFIX: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION: <set to the key 'autoUpdatePipelineDefaultVersion' of config map 'pipeline-install-config'> Optional: false POD_NAMESPACE: kubeflow (v1:metadata.namespace) OBJECTSTORECONFIG_SECURE: false OBJECTSTORECONFIG_BUCKETNAME: <set to the key 'bucketName' of config map 'pipeline-install-config'> Optional: false DBCONFIG_USER: <set to the key 'username' in secret 'mysql-secret'> Optional: false DBCONFIG_PASSWORD: <set to the key 'password' in secret 'mysql-secret'> Optional: false DBCONFIG_DBNAME: <set to the key 'pipelineDb' of config map 'pipeline-install-config'> Optional: false DBCONFIG_HOST: <set to the key 'dbHost' of config map 'pipeline-install-config'> Optional: false DBCONFIG_PORT: <set to the key 'dbPort' of config map 'pipeline-install-config'> Optional: false OBJECTSTORECONFIG_ACCESSKEY: <set to the key 'accesskey' in secret 'mlpipeline-minio-artifact'> Optional: false OBJECTSTORECONFIG_SECRETACCESSKEY: <set to the key 'secretkey' in secret 'mlpipeline-minio-artifact'> Optional: false Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8csrh (ro) istio-proxy: Container ID: docker://6cd34842733729c0743c0ce153a6b15614da748e72a2352616cdf6d10eb9a997 Image: docker.io/istio/proxyv2:1.9.0 Image ID: docker-pullable://istio/proxyv2@sha256:286b821197d7a9233d1d889119f090cd9a9394468d3a312f66ea24f6e16b2294 Port: 15090/TCP Host Port: 0/TCP Args: proxy sidecar --domain $(POD_NAMESPACE).svc.cluster.local --serviceCluster ml-pipeline.$(POD_NAMESPACE) --proxyLogLevel=warning --proxyComponentLogLevel=misc:error --log_output_level=default:info --concurrency 2 State: Running Started: Tue, 20 Jul 2021 21:28:36 -0400 Ready: True Restart Count: 0 Limits: cpu: 2 memory: 1Gi Requests: cpu: 10m memory: 40Mi Readiness: http-get http://:15021/healthz/ready delay=1s timeout=3s period=2s #success=1 #failure=30 Environment: JWT_POLICY: third-party-jwt PILOT_CERT_PROVIDER: istiod CA_ADDR: istiod.istio-system.svc:15012 POD_NAME: ml-pipeline-9b68d49cb-x67mp (v1:metadata.name) POD_NAMESPACE: kubeflow (v1:metadata.namespace) INSTANCE_IP: (v1:status.podIP) SERVICE_ACCOUNT: (v1:spec.serviceAccountName) HOST_IP: (v1:status.hostIP) CANONICAL_SERVICE: (v1:metadata.labels['service.istio.io/canonical-name']) CANONICAL_REVISION: (v1:metadata.labels['service.istio.io/canonical-revision']) PROXY_CONFIG: {} ISTIO_META_POD_PORTS: [ {"name":"http","containerPort":8888,"protocol":"TCP"} ,{"name":"grpc","containerPort":8887,"protocol":"TCP"} ] ISTIO_META_APP_CONTAINERS: ml-pipeline-api-server ISTIO_META_CLUSTER_ID: Kubernetes ISTIO_META_INTERCEPTION_MODE: REDIRECT ISTIO_METAJSON_ANNOTATIONS: {"cluster-autoscaler.kubernetes.io/safe-to-evict":"true"} ISTIO_META_WORKLOAD_NAME: ml-pipeline ISTIO_META_OWNER: kubernetes://apis/apps/v1/namespaces/kubeflow/deployments/ml-pipeline ISTIO_META_MESH_ID: cluster.local TRUST_DOMAIN: cluster.local Mounts: /etc/istio/pod from istio-podinfo (rw) /etc/istio/proxy from istio-envoy (rw) /var/lib/istio/data from istio-data (rw) /var/run/secrets/istio from istiod-ca-cert (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8csrh (ro) /var/run/secrets/tokens from istio-token (rw) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: istio-envoy: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: Memory SizeLimit: <unset> istio-data: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: <unset> istio-podinfo: Type: DownwardAPI (a volume populated by information about the pod) Items: metadata.labels -> labels metadata.annotations -> annotations limits.cpu -> cpu-limit requests.cpu -> cpu-request istio-token: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 43200 istiod-ca-cert: Type: ConfigMap (a volume populated by a ConfigMap) Name: istio-ca-root-cert Optional: false kube-api-access-8csrh: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: <nil> DownwardAPI: true QoS Class: Burstable Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning BackOff 12m (x13622 over 2d13h) kubelet Back-off restarting failed container Warning Unhealthy 8m2s (x9931 over 2d13h) kubelet Readiness probe failed: Normal Pulled 2m58s (x1116 over 2d13h) kubelet Container image "gcr.io/ml-pipeline/api-server:1.5.0" already present on machine ``` ![image](https://user-images.githubusercontent.com/10270182/126809124-0dd430dc-9b85-4896-a3f2-d6a7cbc38cb1.png) ![image](https://user-images.githubusercontent.com/10270182/126809730-39e395a6-37a6-413a-976c-eeae02b71400.png) ## Metadata-Writer Logs: ``` Failed to access the Metadata store. Exception: "upstream connect error or disconnect/reset before headers. reset reason: connection failure" Failed to access the Metadata store. Exception: "upstream connect error or disconnect/reset before headers. reset reason: connection failure" Failed to access the Metadata store. Exception: "upstream connect error or disconnect/reset before headers. reset reason: connection failure" Failed to access the Metadata store. Exception: "upstream connect error or disconnect/reset before headers. reset reason: connection failure" Traceback (most recent call last): File "/kfp/metadata_writer/metadata_writer.py", line 63, in <module> mlmd_store = connect_to_mlmd() File "/kfp/metadata_writer/metadata_helpers.py", line 62, in connect_to_mlmd raise RuntimeError('Could not connect to the Metadata store.') RuntimeError: Could not connect to the Metadata store. ``` ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6121/reactions", "total_count": 7, "+1": 7, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6121/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6120
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6120/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6120/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6120/events
https://github.com/kubeflow/pipelines/issues/6120
951,277,889
MDU6SXNzdWU5NTEyNzc4ODk=
6,120
Kubeflow sdk - error in client.list_experiments()
{ "login": "TanjaDuPlessis", "id": 61677130, "node_id": "MDQ6VXNlcjYxNjc3MTMw", "avatar_url": "https://avatars.githubusercontent.com/u/61677130?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TanjaDuPlessis", "html_url": "https://github.com/TanjaDuPlessis", "followers_url": "https://api.github.com/users/TanjaDuPlessis/followers", "following_url": "https://api.github.com/users/TanjaDuPlessis/following{/other_user}", "gists_url": "https://api.github.com/users/TanjaDuPlessis/gists{/gist_id}", "starred_url": "https://api.github.com/users/TanjaDuPlessis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TanjaDuPlessis/subscriptions", "organizations_url": "https://api.github.com/users/TanjaDuPlessis/orgs", "repos_url": "https://api.github.com/users/TanjaDuPlessis/repos", "events_url": "https://api.github.com/users/TanjaDuPlessis/events{/privacy}", "received_events_url": "https://api.github.com/users/TanjaDuPlessis/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "IIRC, Kubeflow v1.3 enables multi-user isolation by default. @Bobgy, is that right?\r\n\r\nIn that case, `list_experiments()` requires passing a namespace: https://github.com/kubeflow/pipelines/blob/24c551d3fc0b955f48af1206db5c80ec011db3dd/sdk/python/kfp/_client.py#L452-L461", "@chensun I think that is correct yes. I did try passing a namespace, but still get the error. \r\n`client.list_experiments(namespace='kubeflow')`\r\n\r\nI did also try passing the namespace 'kubeflow-example-user-com' which is what is visible from the UI. Neither works. I'm not sure whether I'm passing it incorrectly?\r\n\r\n```\r\nApiException: (500)\r\nReason: Internal Server Error\r\nHTTP response headers: HTTPHeaderDict({'X-Powered-By': 'Express', 'content-type': 'application/json', 'trailer': 'Grpc-Trailer-Content-Type', 'date': 'Fri, 23 Jul 2021 12:42:14 GMT', 'x-envoy-upstream-service-time': '11', 'server': 'envoy', 'connection': 'close', 'transfer-encoding': 'chunked'})\r\nHTTP response body: {\"error\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\",\"code\":13,\"message\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\",\"details\":[{\"@type\":\"type.googleapis.com/api.Error\",\"error_message\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\",\"error_details\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\"}]}\r\n```", "Hi @TanjaDuPlessis, I wonder if you may missed any setup required for multi tenancy.\r\nCan you please check if you went through all the steps: https://www.kubeflow.org/docs/components/multi-tenancy/getting-started/\r\n\r\n/cc @Bobgy ", "@TanjaDuPlessis refer to https://www.kubeflow.org/docs/components/pipelines/multi-user/#in-cluster-api-request-authentication, it's one of the current caveats of multi-user mode.\r\n\r\nWe are now supporting this usage through https://github.com/kubeflow/pipelines/issues/5138.", "@chensun this is expected behavior, because KFP API server is authorizing requests.", "hello @Bobgy , if it's ecpected does that mean we can't use it ? \r\nI have the same issue but from notebook, should i add username/password somewhere .\r\nthanks for your time !\r\nI saw there is some kind of fix was it included in 1.3 ?", "@rexad yes, and it's now documented in https://www.kubeflow.org/docs/components/pipelines/sdk/connect-api/#connect-to-kubeflow-pipelines-from-the-same-cluster" ]
2021-07-23T06:03:56
2021-10-15T04:56:57
2021-07-30T14:37:31
NONE
null
I'm following the example in docs (https://www.kubeflow.org/docs/components/pipelines/sdk/connect-api/) but am getting an error when trying to access the experiments or runs (when trying to list and/or create them) I've port-forwarded ml-pipeline-ui: `kubectl port-forward svc/ml-pipeline-ui 3000:80 --namespace kubeflow` Trying to list the experiments results in an error: ``` import kfp client = kfp.Client(host='http://localhost:3000') print(client.list_experiments()) ``` The error I get from client.list_experiments(): > ApiException: (500) Reason: Internal Server Error HTTP response > headers: HTTPHeaderDict({'X-Powered-By': 'Express', 'content-type': > 'application/json', 'trailer': 'Grpc-Trailer-Content-Type', 'date': > 'Thu, 22 Jul 2021 21:09:12 GMT', 'x-envoy-upstream-service-time': '7', > 'server': 'envoy', 'connection': 'close', 'transfer-encoding': > 'chunked'}) HTTP response body: {"error":"Internal error: > Unauthenticated: Request header error: there is no user identity > header.: Request header error: there is no user identity > header.\nFailed to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357\nFailed > to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357","code":13,"message":"Internal > error: Unauthenticated: Request header error: there is no user > identity header.: Request header error: there is no user identity > header.\nFailed to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357\nFailed > to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357","details":[{"@type":"type.googleapis.com/api.Error","error_message":"Internal > error: Unauthenticated: Request header error: there is no user > identity header.: Request header error: there is no user identity > header.\nFailed to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357\nFailed > to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357","error_details":"Internal > error: Unauthenticated: Request header error: there is no user > identity header.: Request header error: there is no user identity > header.\nFailed to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).canAccessExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:249\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:148\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357\nFailed > to authorize with API resource > references\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\n\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*ExperimentServer).ListExperiment\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/experiment_server.go:150\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler.func1\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:748\nmain.apiServerInterceptor\n\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\ngithub.com/kubeflow/pipelines/backend/api/go_client._ExperimentService_ListExperiment_Handler\n\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/experiment.pb.go:750\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\ngoogle.golang.org/grpc.(*Server).handleStream\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\n\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1357"}]} However, listing the pipelines works: ``` import kfp client = kfp.Client(host='http://localhost:3000') print(client.list_pipelines()) ``` I have verified that the port-forwarding worked - I am able to access the UI at http://localhost:3000/ and can see the pipelines. I am working with Kubeflow v1.3. I have also tried adding the additional namespace argument as suggested for multi-user (https://www.kubeflow.org/docs/components/pipelines/multi-user/#when-using-the-sdk). Any ideas on where I am going wrong?
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6120/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6120/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6118
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6118/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6118/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6118/events
https://github.com/kubeflow/pipelines/issues/6118
951,203,620
MDU6SXNzdWU5NTEyMDM2MjA=
6,118
[bug] pipeline ui runs status can not update
{ "login": "liguodongiot", "id": 13220186, "node_id": "MDQ6VXNlcjEzMjIwMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/13220186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/liguodongiot", "html_url": "https://github.com/liguodongiot", "followers_url": "https://api.github.com/users/liguodongiot/followers", "following_url": "https://api.github.com/users/liguodongiot/following{/other_user}", "gists_url": "https://api.github.com/users/liguodongiot/gists{/gist_id}", "starred_url": "https://api.github.com/users/liguodongiot/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/liguodongiot/subscriptions", "organizations_url": "https://api.github.com/users/liguodongiot/orgs", "repos_url": "https://api.github.com/users/liguodongiot/repos", "events_url": "https://api.github.com/users/liguodongiot/events{/privacy}", "received_events_url": "https://api.github.com/users/liguodongiot/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "Is this a duplicate of https://github.com/kubeflow/pipelines/issues/3763?\r\n@liguodongIOT ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "你好,我是李国冬,我已收到你的邮件,我会尽快给您回复。" ]
2021-07-23T02:49:51
2022-03-03T00:05:32
null
NONE
null
When I use kfp1.5 to deploy to k8s. I execute run, this pipeline ui has not been able to update the status. ![image](https://user-images.githubusercontent.com/13220186/126731820-c0f77592-8f7c-4be0-8a84-6d79d9582c74.png) --- And I check that the pod has finished running ![image](https://user-images.githubusercontent.com/13220186/126731965-5d48bf63-8757-4999-b445-2b1b71684bf6.png) I restarted k8s and kfp, the status is normal。
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6118/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6118/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6116
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6116/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6116/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6116/events
https://github.com/kubeflow/pipelines/issues/6116
951,116,471
MDU6SXNzdWU5NTExMTY0NzE=
6,116
How to output Artifacts (Model, Metrics, Dataset, etc.) without using Python-based component?
{ "login": "parthmishra", "id": 3813311, "node_id": "MDQ6VXNlcjM4MTMzMTE=", "avatar_url": "https://avatars.githubusercontent.com/u/3813311?v=4", "gravatar_id": "", "url": "https://api.github.com/users/parthmishra", "html_url": "https://github.com/parthmishra", "followers_url": "https://api.github.com/users/parthmishra/followers", "following_url": "https://api.github.com/users/parthmishra/following{/other_user}", "gists_url": "https://api.github.com/users/parthmishra/gists{/gist_id}", "starred_url": "https://api.github.com/users/parthmishra/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/parthmishra/subscriptions", "organizations_url": "https://api.github.com/users/parthmishra/orgs", "repos_url": "https://api.github.com/users/parthmishra/repos", "events_url": "https://api.github.com/users/parthmishra/events{/privacy}", "received_events_url": "https://api.github.com/users/parthmishra/received_events", "type": "User", "site_admin": false }
[ { "id": 930619540, "node_id": "MDU6TGFiZWw5MzA2MTk1NDA=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/docs", "name": "area/docs", "color": "d2b48c", "default": false, "description": null }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "Taking HTML visualization as an example, the usage on V2 will look like:\r\n\r\n```\r\n@component \r\ndef write_html(html_artifact: Output[HTML]):\r\n html_content = '<!DOCTYPE html><html><body><h1>Hello world</h1></body></html>'\r\n with open(html_artifact.path, 'w') as f:\r\n f.write(html_content)\r\n```\r\n\r\nHere you specify `Output[]` with artifact type `HTML`. Similarly, you can do it for Model, Metrics, etc.\r\nFor example: https://github.com/kubeflow/pipelines/blob/master/samples/test/metrics_visualization_v2.py#L28\r\n\r\nDoes it work to your usecase?\r\n\r\ncc @chensun ", "> Taking HTML visualization as an example, the usage on V2 will look like:\n> \n> \n> \n> ```\n> \n> @component \n> \n> def write_html(html_artifact: Output[HTML]):\n> \n> html_content = '<!DOCTYPE html><html><body><h1>Hello world</h1></body></html>'\n> \n> with open(html_artifact.path, 'w') as f:\n> \n> f.write(html_content)\n> \n> ```\n> \n> \n> \n> Here you specify `Output[]` with artifact type `HTML`. Similarly, you can do it for Model, Metrics, etc.\n> \n> For example: https://github.com/kubeflow/pipelines/blob/master/samples/test/metrics_visualization_v2.py#L28\n> \n> \n> \n> Does it work to your usecase?\n> \n> \n> \n> cc @chensun \n\nI'm aware that you can do this when creating a component with the v2 decorator, I'm asking if it's possible to do so in a component that was not generated with the v2 decorator. Like a completely separate component that is loaded in by its YAML definition (perhaps even a different language)", "Hi @parthmishra, \r\n\r\nAt this moment, it would be quite challenging for a user to replicate the supports for `Input[Model]`, `Output[Metrics]`, etc. in their custom container.\r\n\r\nHere's a sample code of what the container interface would look like using the v2 `@component` decorator: https://github.com/kubeflow/pipelines/blob/d69b6ae82a8c4afcf4bd3e7d444089302ba23e28/sdk/python/kfp/v2/compiler_cli_tests/test_data/lightweight_python_functions_v2_pipeline.json#L143-L149 (The Python code is inlined in the container commands, but they could be moved to inside the container).\r\n\r\nIf you were able to inspect that code sample, you would find that, other than the user code, the code also contains the entire code from following files:\r\nhttps://github.com/kubeflow/pipelines/blob/d69b6ae82a8c4afcf4bd3e7d444089302ba23e28/sdk/python/kfp/components/executor.py\r\nhttps://github.com/kubeflow/pipelines/blob/d69b6ae82a8c4afcf4bd3e7d444089302ba23e28/sdk/python/kfp/components/executor_main.py\r\nhttps://github.com/kubeflow/pipelines/blob/d69b6ae82a8c4afcf4bd3e7d444089302ba23e28/sdk/python/kfp/dsl/io_types.py\r\n\r\nSo technically, you could implement your own version following this code. But we are not expecting users to do so. \r\n\r\nIt is on our roadmap that we would help users include such code in their own components by packaging and installing the code into their custom container -- assuming the container is Python based. For non-Python based container, we would document the expected interface so that users can follow that to implement their own. ", "@chensun following from this, is it possible to pass something like a pandas DataFrame (as a csv file probably) throughout **custom** components? For example, let's assume there are four components that all do different types of preprocessing for our data; how can we pass the data through without specifying an outside filepath (i.e. string that is a GCS path)?", "@chensun \r\n\r\nThanks for the explanation, I think the v2 SDK docs for \"regular\" component building should state that these Artifact types are not useable and that users wishing to implement these inputs/outputs should instead write them using Python-function based components. The [current docs](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/component-development/#designing-a-pipeline-component) are misleading in this regard and make it seem like there is equal feature parity between the two methods of implementing Components. ", "> @chensun following from this, is it possible to pass something like a pandas DataFrame (as a csv file probably) throughout **custom** components? For example, let's assume there are four components that all do different types of preprocessing for our data; how can we pass the data through without specifying an outside filepath (i.e. string that is a GCS path)?\r\n\r\n@nicokuzak \r\nYes, you can pass a DataFrame as a file throughout custom components. And you don't need to provide a GCS path yourself, the system generates such a path.\r\nIf you write component.yaml, you need `{outputPath: output_name}` placeholder, or if you write Python-function based component, you need to type annotate the output like such: `output_name: OutputPath('CSV')`. At runtime, your code should expect a local file path, and you need to dump the DataFrame object into the file. The downstream component can take such output as an input using `{inputPath: input_name}` or `input_name: InputPath('CSV')`. Then you can read from the file, and load it into a DataFrame object.\r\n\r\nOn Vertex Pipelines, the local path is backed by GCS Fuse, meaning it maps to a GCS location like `gs://my-bucket/some-blob`, whatever content you write to the local file will be \"synced\" to the GCS location. ", "> @chensun\r\n> \r\n> Thanks for the explanation, I think the v2 SDK docs for \"regular\" component building should state that these Artifact types are not useable and that users wishing to implement these inputs/outputs should instead write them using Python-function based components. The [current docs](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/component-development/#designing-a-pipeline-component) are misleading in this regard and make it seem like there is equal feature parity between the two methods of implementing Components.\r\n\r\n@parthmishra Thank you for your feedback. Agree that our current doc isn't in a good shape. We will continuously improve our documentation. Meanwhile, we're designing the next generation of component authoring experience that could make these Artifacts types available for custom container components as well.", "I would like to also add that it would be nice to be able to use the v2 Metrics capabilities with the manually written `component.yaml` approach. Since this is not possible, I have to create op from python functions directly if I want to use the Metrics capability (for display in Vertex AI). As mentioned here in v1, this was possible by dumping to a specified JSON file and the Metrics were read from there. If there are any workarounds please let us know, thank you. \r\n\r\nPreferred File structure example as noted:\r\n-build.sh\r\n-component.yaml\r\n-Dockerfile\r\n-src/train.py\r\n\r\nThis structure is optimal for more complex code and portability, however, without Metrics capability, cannot use this approach if Metrics needed. ", "> I would like to also add that it would be nice to be able to use the v2 Metrics capabilities with the manually written `component.yaml` approach. Since this is not possible, I have to create op from python functions directly if I want to use the Metrics capability (for display in Vertex AI). As mentioned here in v1, this was possible by dumping to a specified JSON file and the Metrics were read from there. If there are any workarounds please let us know, thank you.\r\n> \r\n> Preferred File structure example as noted: -build.sh -component.yaml -Dockerfile -src/train.py\r\n> \r\n> This structure is optimal for more complex code and portability, however, without Metrics capability, cannot use this approach if Metrics needed.\r\n\r\n@vemqar I could be wrong, but If you're using Python for your component, you can use the component decorator to output a `component.yaml` definition file (and specify custom base image as output of `build.sh`). The function you decorate can essentially just be used for serializing inputs, passing them to the rest of your code (e.g. `src/train.py`) and then serializing the outputs. Not ideal as you clutter up the `component.yaml` file with in-lined code, but I don't see why it wouldn't work.\r\n\r\nPerhaps you could also just directly use the KFP SDK to serialize Artifacts, essentially what the in-lined code is doing for Python-function based components anyways.", "Thanks @parthmishra for the advice. I did try to essentially import the `Metrics` class definition into my src/ code , but it doesn't work because it needs to be initialized. I realized that the kfp `Artifact` needs to be initialized with GCP paths which will specify where the Metrics Artifacts will be stored. So what I understand, essentially I would have to myself initialize `Artifact` with a `uri` path then call `Metrics` . If you have a working example though, would appreciate it. I did inspect the `component.yaml` of a function with Metrics Output and didn't see an obvious way to integrate that into a custom written component file. ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "I found a somewhat hacky solution to this problem. I'm using Kubeflow's `Executor` class (which is the one used by function-based components) to easily instantiate the Artifact objects. I could iterate through `executor_input` and create all the objects myself, but I think it's a lot more convenient to use `Executor`, even if I'm not using it for what is was designed.\r\n\r\nYou need to include `{executorInput: null}` in your component.yaml file and your python script would look something like this:\r\n\r\n```python\r\nfrom kfp.v2.components.executor import Executor\r\nfrom kfp.v2.dsl import Metrics, Model\r\nimport argparse\r\nimport json\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--executor-input\", type=str, required=True)\r\n\r\nargs = parser.parse_args()\r\n\r\n# carrega argumentos do executor\r\nexecutor_input = json.loads(args.executor_input)\r\n\r\n# carrega configuracoes\r\nexecutor = Executor(executor_input, lambda x: x)\r\n\r\n# obtem objetos Kubeflow\r\nmetrics:Metrics = executor._output_artifacts['metrics']\r\nmodel:Model = executor._output_artifacts['model']\r\n\r\n# log de metricas\r\nmetrics.log_metric(\"accuracy\", 0.9)\r\n\r\n# salva modelo\r\nwith open(model.path, \"w\") as f:\r\n f.write(\"data\")\r\n\r\n# salva saidas\r\nexecutor._write_executor_output()\r\n```\r\n\r\nI'm also attaching all the files necessary to run this example, as well as some screenshots to show you that it works (at least on Vertex AI pipelines). Just so we don't have to build and publish a docker image, I included the python script in the component.yaml file.\r\n\r\n## Code:\r\n[code.zip](https://github.com/kubeflow/pipelines/files/8186268/code.zip)\r\n\r\n## Screenshots:\r\n\r\n<img width=\"360\" alt=\"Captura de Tela 2022-03-04 às 10 38 54\" src=\"https://user-images.githubusercontent.com/5862030/156774012-459291ae-1bd9-4989-b620-021716356269.png\">\r\n<img width=\"479\" alt=\"Captura de Tela 2022-03-04 às 10 39 07\" src=\"https://user-images.githubusercontent.com/5862030/156774016-3f55ab91-49f5-4cc7-8b48-ebe0d3ab79db.png\">\r\n\r\nEdit: after commenting I realized what I did was kind of what was suggested in https://github.com/kubeflow/pipelines/issues/6116#issuecomment-885506281 . So I just wanted to give them credits.", "@jordyantunes \r\nMen, you're a genius!! Thank you so much!\r\n\r\nWith you permission, I did a few changes in your code to also accept input artifacts and input parameters (Little rusty that part, but I did it in a rush haha), and put it in a repo so anyone can used it as a example (I'm planning to put it in a medium article explaining how to implement CI/CD in Vertex Pipelines and of course I will mention you :D) \r\n\r\nhttps://github.com/juansebashr/VertexPipelinesCICD" ]
2021-07-22T22:50:35
2022-05-20T15:16:06
null
CONTRIBUTOR
null
Using v2 SDK and Vertex Pipelines environment, is it possible to create a reusable component (i.e. manually write a `component.yaml` file) that consumes and/or generates the new Artifact types such as Model, Metrics, Dataset, etc.? My understanding of these Artifact types is that they are a value/path/reference along with associated metadata. When passing or consuming these in a non-Python-based component, I can only reference or generate an Artifact's path and nothing else it seems. For example, in the v1 SDK, it was possible to generate metrics that could be visualized by just by [dumping a JSON object](https://www.kubeflow.org/docs/components/pipelines/sdk/pipelines-metrics/#export-the-metrics-dictionary) to the given output path. This allowed the possibility of using non-Python-based components to generate metrics and other metadata. Is such a thing possible in v2/Vertex Pipelines? If not, is it on the roadmap or is the recommendation to port all components to lightweight Python components?
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6116/reactions", "total_count": 8, "+1": 8, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6116/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6114
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6114/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6114/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6114/events
https://github.com/kubeflow/pipelines/issues/6114
950,861,735
MDU6SXNzdWU5NTA4NjE3MzU=
6,114
Cache is not reused when component output was previously consumed as file, but is now consumed as value
{ "login": "boarder7395", "id": 37314943, "node_id": "MDQ6VXNlcjM3MzE0OTQz", "avatar_url": "https://avatars.githubusercontent.com/u/37314943?v=4", "gravatar_id": "", "url": "https://api.github.com/users/boarder7395", "html_url": "https://github.com/boarder7395", "followers_url": "https://api.github.com/users/boarder7395/followers", "following_url": "https://api.github.com/users/boarder7395/following{/other_user}", "gists_url": "https://api.github.com/users/boarder7395/gists{/gist_id}", "starred_url": "https://api.github.com/users/boarder7395/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/boarder7395/subscriptions", "organizations_url": "https://api.github.com/users/boarder7395/orgs", "repos_url": "https://api.github.com/users/boarder7395/repos", "events_url": "https://api.github.com/users/boarder7395/events{/privacy}", "received_events_url": "https://api.github.com/users/boarder7395/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1682634265, "node_id": "MDU6TGFiZWwxNjgyNjM0MjY1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/status/wontfix", "name": "status/wontfix", "color": "4d48b5", "default": false, "description": "" } ]
closed
false
{ "login": "Ark-kun", "id": 1829149, "node_id": "MDQ6VXNlcjE4MjkxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ark-kun", "html_url": "https://github.com/Ark-kun", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "repos_url": "https://api.github.com/users/Ark-kun/repos", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "type": "User", "site_admin": false }
[ { "login": "Ark-kun", "id": 1829149, "node_id": "MDQ6VXNlcjE4MjkxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ark-kun", "html_url": "https://github.com/Ark-kun", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "repos_url": "https://api.github.com/users/Ark-kun/repos", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "type": "User", "site_admin": false } ]
null
[ "Hello. Thank you for finding this issue.\r\n\r\nDue to bandwidth, we do not currently prioritize issue where the cache is NOT reused. (The issues where the cache is reused incorrectly have high priority though.)\r\n\r\nThis issue is difficult to solve given the architecture of the KFP compiler. Even the file passing features `inputPath`/`outputPath` were extremely hard to hack-in.\r\n\r\nIt would be pretty hard for cache server to detect the case and download the artifact value to return as the output value.\r\n\r\nP.S. There are much more serious issues with cache reuse. For example, the cache cannot be reused in the same pipeline due to the unique names of all outputs (another issue of the KFP compiler).", "@Ark-kun For the issue you referenced:\r\n___\r\nP.S. There are much more serious issues with cache reuse. For example, the cache cannot be reused in the same pipeline due to the unique names of all outputs (another issue of the KFP compiler).\r\n___\r\nIs there an existing issue for that I could use for more context? I don't understand what you mean by \"unique names of all outputs\". ", "We solved this issue in v2 compatible mode(fyi: bit.ly/kfp-v2-compatible\r\n). No matter what you use for downstream components, the cache result is the same. We have not fully released kfp v2 compatible mode. You need to wait for Kubeflow Pipeline 1.7.0" ]
2021-07-22T16:53:14
2021-07-30T01:03:09
2021-07-30T00:53:35
CONTRIBUTOR
null
### What steps did you take Start with a pipeline where a component does not pass its output (outputPath) to a subsequent component. Add a new pipeline where an additional component passes its output (outputPath) to a subsequent component that takes the output as an input (inputValue). In this event the subsequent pipeline does not use the cached component because the output Parameters now includes the valueFrom parameter. ### What happened: Cache Key was different resulting in pipeline not using cached component output. ### What did you expect to happen: The pipeline should still use the cached component output. ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Kubeflow 1.2 * KFP version: 1.0.4 (also tested 1.6.0 looks to still be an issue there as well). * KFP SDK version: 1.6.3 ### Anything else you would like to add: Workflow 1 -------------------- ``` apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: annotations: pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 pipelines.kubeflow.org/pipeline_compilation_time: 2021-07-22T11:05:01.675812 pipelines.kubeflow.org/pipeline_spec: '{"description": "Runs the dask preprocessing of the Misty dataset and then trains a model.", "inputs": [{"default": "s3://athn-ai-ds-mltp-misty-dev/datasets/enron", "name": "s3path", "optional": true, "type": "String"}], "name": "End to End of Misty Pipeline"}' pipelines.kubeflow.org/run_name: Run of misty.dsmltp-359.endtoend (8f03e) creationTimestamp: "2021-07-22T15:05:25Z" generateName: end-to-end-of-misty-pipeline- generation: 4 labels: pipeline/runid: 2c3189c6-ade6-48b5-932f-1f2c9cacdf62 pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 workflows.argoproj.io/phase: Running name: end-to-end-of-misty-pipeline-h5hsq namespace: tkalbach resourceVersion: "2254518" selfLink: /apis/argoproj.io/v1alpha1/namespaces/tkalbach/workflows/end-to-end-of-misty-pipeline-h5hsq uid: 0cc2f56f-3aaa-4326-99f9-9ebc3adcff17 spec: arguments: parameters: - name: s3path value: s3://athn-ai-ds-mltp-misty-dev/datasets/enron entrypoint: end-to-end-of-misty-pipeline serviceAccountName: default-editor templates: - dag: tasks: - arguments: parameters: - name: s3path value: '{{inputs.parameters.s3path}}' name: preprocess template: preprocess - arguments: parameters: - name: preprocess-TFRecords value: '{{tasks.preprocess.outputs.parameters.preprocess-TFRecords}}' dependencies: - preprocess name: train template: train inputs: parameters: - name: s3path metadata: annotations: sidecar.istio.io/inject: "false" labels: pipelines.kubeflow.org/cache_enabled: "true" name: end-to-end-of-misty-pipeline outputs: {} - container: args: - --raw-data - '{{inputs.parameters.s3path}}' - --tfrecords - /tmp/outputs/TFRecords/data - --labels - /tmp/outputs/Labels/data - --vocab - /tmp/outputs/Vocab/data - --dask-image - 473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56 - --workers - "5" command: - run_preprocess image: 473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56 name: "" resources: {} inputs: parameters: - name: s3path metadata: annotations: pipelines.kubeflow.org/arguments.parameters: '{"Raw Data": "{{inputs.parameters.s3path}}", "Workers": "5"}' pipelines.kubeflow.org/component_ref: '{"digest": "bbb3a746e8bd1c3e71d6fdbc64d9c54df47ecbbdaead27bc982a1f6f6c6030b6"}' pipelines.kubeflow.org/component_spec: '{"description": "Run the preprocessing of Enron dataset.", "implementation": {"container": {"args": ["--raw-data", {"inputValue": "Raw Data"}, "--tfrecords", {"outputPath": "TFRecords"}, "--labels", {"outputPath": "Labels"}, "--vocab", {"outputPath": "Vocab"}, "--dask-image", "473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56", "--workers", {"inputValue": "Workers"}], "command": ["run_preprocess"], "image": "473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56"}}, "inputs": [{"description": "Path where raw data is stored.", "name": "Raw Data", "type": "String"}, {"description": "The number of workers to use for this run.", "name": "Workers", "type": "Integer"}], "name": "Preprocess", "outputs": [{"description": "The TFRecord files.", "name": "TFRecords", "type": "String"}, {"description": "The indexed label file.", "name": "Labels", "type": "String"}, {"description": "The vocab JSON file.", "name": "Vocab", "type": "String"}]}' pipelines.kubeflow.org/task_display_name: Preprocess Dataset. sidecar.istio.io/inject: "false" labels: clean: "true" pipelines.kubeflow.org/cache_enabled: "true" pipelines.kubeflow.org/enable_caching: "true" pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 pipelines.kubeflow.org/pipeline-sdk-type: kfp name: preprocess nodeSelector: instanceType: m5.xlarge procurement: ondemand tenant: misty outputs: artifacts: - name: preprocess-Labels path: /tmp/outputs/Labels/data - name: preprocess-TFRecords path: /tmp/outputs/TFRecords/data - name: preprocess-Vocab path: /tmp/outputs/Vocab/data parameters: - name: preprocess-TFRecords valueFrom: path: /tmp/outputs/TFRecords/data tolerations: - effect: NoSchedule key: tenant operator: Equal value: misty - container: args: - --input - '{{inputs.parameters.preprocess-TFRecords}}' - --epochs - "1" - --learning-rate - "0.001" - --model-dir - /tmp/outputs/Model_dir/data - --tensorboard-dir - /tmp/outputs/Tensorboard_dir/data - --port - "8080" command: - run_train image: 473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/train:c662f3f name: "" ports: - containerPort: 8080 hostPort: 8080 resources: limits: nvidia.com/gpu: "1" inputs: parameters: - name: preprocess-TFRecords metadata: annotations: pipelines.kubeflow.org/arguments.parameters: '{"Dataset": "{{inputs.parameters.preprocess-TFRecords}}", "Epochs": "1", "Learning Rate": "0.001", "Port": "8080"}' pipelines.kubeflow.org/component_ref: '{"digest": "1e57e72eb3cb918ff2519cd4e0e828b72521453ad98b397e58866b16b395018d"}' pipelines.kubeflow.org/component_spec: '{"description": "Run the training of Enron Model.", "implementation": {"container": {"args": ["--input", {"inputValue": "Dataset"}, "--epochs", {"inputValue": "Epochs"}, "--learning-rate", {"inputValue": "Learning Rate"}, "--model-dir", {"outputPath": "Model dir"}, "--tensorboard-dir", {"outputPath": "Tensorboard dir"}, "--port", {"inputValue": "Port"}], "command": ["run_train"], "image": "473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/train:c662f3f"}}, "inputs": [{"description": "Path where raw data is stored.", "name": "Dataset", "type": "String"}, {"description": "Number of epochs to train for.", "name": "Epochs", "type": "Integer"}, {"description": "Learning rate to use while training.", "name": "Learning Rate", "type": "Float"}, {"description": "The port to start tensorboard on.", "name": "Port", "type": "Integer"}], "name": "Train", "outputs": [{"description": "Path to save the model.", "name": "Model dir", "type": "String"}, {"description": "Path to save the tensorboard logs to.", "name": "Tensorboard dir", "type": "String"}]}' pipelines.kubeflow.org/task_display_name: Train Model. sidecar.istio.io/inject: "false" labels: clean: "true" pipelines.kubeflow.org/cache_enabled: "true" pipelines.kubeflow.org/enable_caching: "true" pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 pipelines.kubeflow.org/pipeline-sdk-type: kfp name: train nodeSelector: instanceType: p2.xlarge procurement: ondemand tenant: misty outputs: artifacts: - name: train-Model-dir path: /tmp/outputs/Model_dir/data - name: train-Tensorboard-dir path: /tmp/outputs/Tensorboard_dir/data tolerations: - effect: NoSchedule key: tenant operator: Equal value: misty - effect: NoSchedule key: processing operator: Equal value: gpu status: finishedAt: null nodes: end-to-end-of-misty-pipeline-h5hsq: children: - end-to-end-of-misty-pipeline-h5hsq-2090586888 displayName: end-to-end-of-misty-pipeline-h5hsq finishedAt: null id: end-to-end-of-misty-pipeline-h5hsq inputs: parameters: - name: s3path value: s3://athn-ai-ds-mltp-misty-dev/datasets/enron name: end-to-end-of-misty-pipeline-h5hsq phase: Running startedAt: "2021-07-22T15:05:25Z" templateName: end-to-end-of-misty-pipeline type: DAG end-to-end-of-misty-pipeline-h5hsq-2090586888: boundaryID: end-to-end-of-misty-pipeline-h5hsq displayName: preprocess finishedAt: null id: end-to-end-of-misty-pipeline-h5hsq-2090586888 inputs: parameters: - name: s3path value: s3://athn-ai-ds-mltp-misty-dev/datasets/enron name: end-to-end-of-misty-pipeline-h5hsq.preprocess phase: Running startedAt: "2021-07-22T15:05:25Z" templateName: preprocess type: Pod phase: Running startedAt: "2021-07-22T15:05:25Z" ``` Workflow 2 ----------------- ``` apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: annotations: pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 pipelines.kubeflow.org/pipeline_compilation_time: 2021-07-22T11:05:01.675812 pipelines.kubeflow.org/pipeline_spec: '{"description": "Runs the dask preprocessing of the Misty dataset and then trains a model.", "inputs": [{"default": "s3://athn-ai-ds-mltp-misty-dev/datasets/enron", "name": "s3path", "optional": true, "type": "String"}], "name": "End to End of Misty Pipeline"}' pipelines.kubeflow.org/run_name: Run of misty.dsmltp-359.endtoend (8f03e) creationTimestamp: "2021-07-22T15:05:25Z" generateName: end-to-end-of-misty-pipeline- generation: 4 labels: pipeline/runid: 2c3189c6-ade6-48b5-932f-1f2c9cacdf62 pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 workflows.argoproj.io/phase: Running name: end-to-end-of-misty-pipeline-h5hsq namespace: tkalbach resourceVersion: "2254518" selfLink: /apis/argoproj.io/v1alpha1/namespaces/tkalbach/workflows/end-to-end-of-misty-pipeline-h5hsq uid: 0cc2f56f-3aaa-4326-99f9-9ebc3adcff17 spec: arguments: parameters: - name: s3path value: s3://athn-ai-ds-mltp-misty-dev/datasets/enron entrypoint: end-to-end-of-misty-pipeline serviceAccountName: default-editor templates: - dag: tasks: - arguments: parameters: - name: s3path value: '{{inputs.parameters.s3path}}' name: preprocess template: preprocess - arguments: parameters: - name: preprocess-TFRecords value: '{{tasks.preprocess.outputs.parameters.preprocess-TFRecords}}' dependencies: - preprocess name: train template: train inputs: parameters: - name: s3path metadata: annotations: sidecar.istio.io/inject: "false" labels: pipelines.kubeflow.org/cache_enabled: "true" name: end-to-end-of-misty-pipeline outputs: {} - container: args: - --raw-data - '{{inputs.parameters.s3path}}' - --tfrecords - /tmp/outputs/TFRecords/data - --labels - /tmp/outputs/Labels/data - --vocab - /tmp/outputs/Vocab/data - --dask-image - 473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56 - --workers - "5" command: - run_preprocess image: 473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56 name: "" resources: {} inputs: parameters: - name: s3path metadata: annotations: pipelines.kubeflow.org/arguments.parameters: '{"Raw Data": "{{inputs.parameters.s3path}}", "Workers": "5"}' pipelines.kubeflow.org/component_ref: '{"digest": "bbb3a746e8bd1c3e71d6fdbc64d9c54df47ecbbdaead27bc982a1f6f6c6030b6"}' pipelines.kubeflow.org/component_spec: '{"description": "Run the preprocessing of Enron dataset.", "implementation": {"container": {"args": ["--raw-data", {"inputValue": "Raw Data"}, "--tfrecords", {"outputPath": "TFRecords"}, "--labels", {"outputPath": "Labels"}, "--vocab", {"outputPath": "Vocab"}, "--dask-image", "473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56", "--workers", {"inputValue": "Workers"}], "command": ["run_preprocess"], "image": "473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/dask_preprocess:591db56"}}, "inputs": [{"description": "Path where raw data is stored.", "name": "Raw Data", "type": "String"}, {"description": "The number of workers to use for this run.", "name": "Workers", "type": "Integer"}], "name": "Preprocess", "outputs": [{"description": "The TFRecord files.", "name": "TFRecords", "type": "String"}, {"description": "The indexed label file.", "name": "Labels", "type": "String"}, {"description": "The vocab JSON file.", "name": "Vocab", "type": "String"}]}' pipelines.kubeflow.org/task_display_name: Preprocess Dataset. sidecar.istio.io/inject: "false" labels: clean: "true" pipelines.kubeflow.org/cache_enabled: "true" pipelines.kubeflow.org/enable_caching: "true" pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 pipelines.kubeflow.org/pipeline-sdk-type: kfp name: preprocess nodeSelector: instanceType: m5.xlarge procurement: ondemand tenant: misty outputs: artifacts: - name: preprocess-Labels path: /tmp/outputs/Labels/data - name: preprocess-TFRecords path: /tmp/outputs/TFRecords/data - name: preprocess-Vocab path: /tmp/outputs/Vocab/data parameters: - name: preprocess-TFRecords valueFrom: path: /tmp/outputs/TFRecords/data tolerations: - effect: NoSchedule key: tenant operator: Equal value: misty - container: args: - --input - '{{inputs.parameters.preprocess-TFRecords}}' - --epochs - "1" - --learning-rate - "0.001" - --model-dir - /tmp/outputs/Model_dir/data - --tensorboard-dir - /tmp/outputs/Tensorboard_dir/data - --port - "8080" command: - run_train image: 473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/train:c662f3f name: "" ports: - containerPort: 8080 hostPort: 8080 resources: limits: nvidia.com/gpu: "1" inputs: parameters: - name: preprocess-TFRecords metadata: annotations: pipelines.kubeflow.org/arguments.parameters: '{"Dataset": "{{inputs.parameters.preprocess-TFRecords}}", "Epochs": "1", "Learning Rate": "0.001", "Port": "8080"}' pipelines.kubeflow.org/component_ref: '{"digest": "1e57e72eb3cb918ff2519cd4e0e828b72521453ad98b397e58866b16b395018d"}' pipelines.kubeflow.org/component_spec: '{"description": "Run the training of Enron Model.", "implementation": {"container": {"args": ["--input", {"inputValue": "Dataset"}, "--epochs", {"inputValue": "Epochs"}, "--learning-rate", {"inputValue": "Learning Rate"}, "--model-dir", {"outputPath": "Model dir"}, "--tensorboard-dir", {"outputPath": "Tensorboard dir"}, "--port", {"inputValue": "Port"}], "command": ["run_train"], "image": "473391520281.dkr.ecr.us-east-1.amazonaws.com/misty/dsmltp-359/train:c662f3f"}}, "inputs": [{"description": "Path where raw data is stored.", "name": "Dataset", "type": "String"}, {"description": "Number of epochs to train for.", "name": "Epochs", "type": "Integer"}, {"description": "Learning rate to use while training.", "name": "Learning Rate", "type": "Float"}, {"description": "The port to start tensorboard on.", "name": "Port", "type": "Integer"}], "name": "Train", "outputs": [{"description": "Path to save the model.", "name": "Model dir", "type": "String"}, {"description": "Path to save the tensorboard logs to.", "name": "Tensorboard dir", "type": "String"}]}' pipelines.kubeflow.org/task_display_name: Train Model. sidecar.istio.io/inject: "false" labels: clean: "true" pipelines.kubeflow.org/cache_enabled: "true" pipelines.kubeflow.org/enable_caching: "true" pipelines.kubeflow.org/kfp_sdk_version: 1.6.5 pipelines.kubeflow.org/pipeline-sdk-type: kfp name: train nodeSelector: instanceType: p2.xlarge procurement: ondemand tenant: misty outputs: artifacts: - name: train-Model-dir path: /tmp/outputs/Model_dir/data - name: train-Tensorboard-dir path: /tmp/outputs/Tensorboard_dir/data tolerations: - effect: NoSchedule key: tenant operator: Equal value: misty - effect: NoSchedule key: processing operator: Equal value: gpu status: finishedAt: null nodes: end-to-end-of-misty-pipeline-h5hsq: children: - end-to-end-of-misty-pipeline-h5hsq-2090586888 displayName: end-to-end-of-misty-pipeline-h5hsq finishedAt: null id: end-to-end-of-misty-pipeline-h5hsq inputs: parameters: - name: s3path value: s3://athn-ai-ds-mltp-misty-dev/datasets/enron name: end-to-end-of-misty-pipeline-h5hsq phase: Running startedAt: "2021-07-22T15:05:25Z" templateName: end-to-end-of-misty-pipeline type: DAG end-to-end-of-misty-pipeline-h5hsq-2090586888: boundaryID: end-to-end-of-misty-pipeline-h5hsq displayName: preprocess finishedAt: null id: end-to-end-of-misty-pipeline-h5hsq-2090586888 inputs: parameters: - name: s3path value: s3://athn-ai-ds-mltp-misty-dev/datasets/enron name: end-to-end-of-misty-pipeline-h5hsq.preprocess phase: Running startedAt: "2021-07-22T15:05:25Z" templateName: preprocess type: Pod phase: Running startedAt: "2021-07-22T15:05:25Z" ``` ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6114/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6114/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6113
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6113/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6113/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6113/events
https://github.com/kubeflow/pipelines/issues/6113
950,778,064
MDU6SXNzdWU5NTA3NzgwNjQ=
6,113
[sdk] get_user_namespace method returns user not namespace
{ "login": "rawc0der", "id": 1760897, "node_id": "MDQ6VXNlcjE3NjA4OTc=", "avatar_url": "https://avatars.githubusercontent.com/u/1760897?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rawc0der", "html_url": "https://github.com/rawc0der", "followers_url": "https://api.github.com/users/rawc0der/followers", "following_url": "https://api.github.com/users/rawc0der/following{/other_user}", "gists_url": "https://api.github.com/users/rawc0der/gists{/gist_id}", "starred_url": "https://api.github.com/users/rawc0der/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rawc0der/subscriptions", "organizations_url": "https://api.github.com/users/rawc0der/orgs", "repos_url": "https://api.github.com/users/rawc0der/repos", "events_url": "https://api.github.com/users/rawc0der/events{/privacy}", "received_events_url": "https://api.github.com/users/rawc0der/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "That's strange. Do you have any other code between `client.set_user_namespace('my-namespace')` and `client.get_user_namespace()`?\r\n\r\nIf you look at the code, they just set/read a dictionary entry using the same key. (The dictionary can be dump to or load from a local file).\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/27051ab9d1c9e8ca1464c17b10d12ed73c8227f1/sdk/python/kfp/_client.py#L335-L347\r\n\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/27051ab9d1c9e8ca1464c17b10d12ed73c8227f1/sdk/python/kfp/_client.py#L372-L378", "Hey @chensun,\r\n\r\nThanks for the reply. I discovered the source for this, it's in my custom scripts .. \r\n\r\nThere was a faulty line overriding the namespace as the username.\r\n```\r\nnamespace = user or default_data.get(\"namespace\")\r\n```\r\n\r\nClosing this as it's not relevant! Thanks again." ]
2021-07-22T15:15:04
2021-07-23T13:40:36
2021-07-23T13:40:36
NONE
null
### Environment * KFP version: 1.6.0 <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP SDK version: 1.6.3 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> **All dependencies version:** <!-- Specify the output of the following shell command: $pip list | grep kfp --> * kfp-pipeline-spec 0.1.8 * kfp-server-api 1.6.0 ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> Creating a client using standard constructor and calling the method `get_user_namespace` is actually returning the user for the session and not the namespace. ``` client = kfp.Client() client.set_user_namespace('my-namespace') client.get_user_namespace() != 'my-namespace' ``` ### Expected result <!-- What should the correct behavior be? --> `get_user_namespace()` method should return the namespace set for the current authenticated session. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> I am authenticating with custom script which is using cookie based session `kfp.Client(host=kfp_host_url, cookies=cookie)` and the username is added in Dex as a static member. Namespace is different than the username (in my case the user is `ci@company.com` and the namespace is `ci`). This method `set_user_namespace` doesen't seem to be changing session state and somehow `get_user_namespace` is hardwired to return the user of the session, _maybe some other bug is responsible_, trying to wrap my head around the source of this. Anybody else has encountered this 🐝 havior? @Ark-kun any ideas? Thanks! --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6113/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6113/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6112
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6112/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6112/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6112/events
https://github.com/kubeflow/pipelines/issues/6112
950,775,279
MDU6SXNzdWU5NTA3NzUyNzk=
6,112
[bug] Kubeflow building component tutorial get error when compiling with kfp.v2.compiler
{ "login": "botingw", "id": 33360357, "node_id": "MDQ6VXNlcjMzMzYwMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/33360357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/botingw", "html_url": "https://github.com/botingw", "followers_url": "https://api.github.com/users/botingw/followers", "following_url": "https://api.github.com/users/botingw/following{/other_user}", "gists_url": "https://api.github.com/users/botingw/gists{/gist_id}", "starred_url": "https://api.github.com/users/botingw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/botingw/subscriptions", "organizations_url": "https://api.github.com/users/botingw/orgs", "repos_url": "https://api.github.com/users/botingw/repos", "events_url": "https://api.github.com/users/botingw/events{/privacy}", "received_events_url": "https://api.github.com/users/botingw/received_events", "type": "User", "site_admin": false }
[ { "id": 930619540, "node_id": "MDU6TGFiZWw5MzA2MTk1NDA=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/docs", "name": "area/docs", "color": "d2b48c", "default": false, "description": null }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-22T15:12:23
2021-08-17T14:57:43
2021-08-17T14:57:43
NONE
null
### What steps did you take <!-- A clear and concise description of what the bug is.--> kfp = 1.6.4 I just try to test using my custom component in Vertex AI pipelines. I use the steps as this [link](https://www.kubeflow.org/docs/components/pipelines/sdk/component-development/) to build my container and upload to google container registry. Then use the following code to build my pipeline ` def my_pipeline(): get_lines_step = create_step_get_lines( # Input name "Input 1" is converted to pythonic parameter name "input_1" input_1='one\ntwo\nthree\nfour\nfive\nsix\nseven\neight\nnine\nten', parameter_1='5', ) ` and compile the pipeline ` from kfp.v2 import compiler if __name__ == "__main__": pipeline_func = my_pipeline pipeline_path = 'workflow_get_lines.py' + '.json' compiler.Compiler().compile( pipeline_func=pipeline_func, package_path=pipeline_path ) ` Then I can not compile the pipeline. Using kfp.compiler does not work too, as Vertex AI code ` from kfp.v2.google.client import AIPlatformClient # noqa: F811 api_client = AIPlatformClient( project_id=PROJECT_ID, region=REGION, ) ` ` response = api_client.create_run_from_job_spec( pipeline_path, pipeline_root=PIPELINE_ROOT, ) ` does not support it I can not also find any Vertex AI tutorial that builds a component with a script and a Dockerfile. The only way tutorial shows is building a component from a python function with @component. I think using only functions to build components is difficult to build a complicate component. ### What happened: I get the following error: ...... /opt/conda/lib/python3.7/site-packages/kfp/dsl/_component_bridge.py in _input_artifact_path_placeholder(input_key) 377 raise TypeError('Input "{}" with type "{}" cannot be paired with ' 378 'InputPathPlaceholder.'.format( --> 379 input_key, inputs_dict[input_key].type)) 380 else: 381 return "{{{{$.inputs.artifacts['{}'].path}}}}".format(input_key) TypeError: Input "Input 1" with type "String" cannot be paired with InputPathPlaceholder. ### What did you expect to happen: compiled ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> I try to use google Vertex AI python SDK to deploy the pipeline to Vertex AI pipelines * KFP version: <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> kfp = 1.6.4 ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> /area sdk <!-- /area testing --> <!-- /area samples --> /area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6112/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6112/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6110
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6110/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6110/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6110/events
https://github.com/kubeflow/pipelines/issues/6110
950,635,054
MDU6SXNzdWU5NTA2MzUwNTQ=
6,110
KFP v2 tracker
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 2152751095, "node_id": "MDU6TGFiZWwyMTUyNzUxMDk1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/frozen", "name": "lifecycle/frozen", "color": "ededed", "default": false, "description": null } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-22T13:03:58
2023-08-15T17:03:58
null
CONTRIBUTOR
null
Design: [bit.ly/kfp-v2](https://bit.ly/kfp-v2). Project: https://github.com/kubeflow/pipelines/projects/9 Tracking by major release milestones: * [x] KFP 2.0 Alpha * [x] #8697 * [x] #8698 * [x] #8802 * [x] #8815 * [x] #8818 * [ ] #8858 * v2 API integration * [x] #8703 * [x] #8706 * [ ] #8705 * Feature: Volume * [x] #8707 * [x] #8758 * [x] #8708 * [x] #8715 * Feature: Secret * [x] #8707 * [x] #8758 * [x] #8709 * Feature: User log * [x] #8714 * Feature: Optional inputs & default value * [x] #8716 * Feature: Environment variable * [x] #8704 * Feature: Accelerator config * [x] #7043 * Feature: resource requests * [x] #7047 * Feature: nodeSelector * [x] #8959 * Non-feature: Documentation * [ ] #8713 * Non-feature: Testing & Release * [ ] #6168 * [ ] #6184 * Non-feature: Backend data model * [x] #8736 * Bugs: * [x] #8724 * [x] #8725 * Security * [x] #9136 * [ ] KFP 2.0 Post-GA * Non-feature: Telemetry * [ ] #8712 * Feature: Status-IR and MLMD calls * [x] #8960 * [x] #8961 * [ ] #8964 * [ ] #8965 * [ ] #8966 * [ ] #8710 * [x] #8711 * [ ] #8721 * [ ] #6155 * [ ] #8717 * [ ] #8718 * [ ] #8719 * [ ] #8720 * [ ] #8833 * [ ] TODO(chesu): ... -------- Tracking by feature/area: * [ ] v2 orchestration engine * [x] #6149 * [x] #6150 * [x] #6156 * [x] #6157 * [x] #6163 * [ ] v2 e2e testing * [x] #6167 * [ ] #6168 * [ ] misc * [x] #6354 * [x] #6183 * [ ] #6184 * [x] https://github.com/kubeflow/pipelines/issues/6185 * [ ] #7058 * [ ] #6198 * [x] #6199 * [x] #7104 * [x] #8832
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6110/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6110/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6108
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6108/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6108/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6108/events
https://github.com/kubeflow/pipelines/issues/6108
950,416,013
MDU6SXNzdWU5NTA0MTYwMTM=
6,108
[feature] Support input artifact value placeholder in v2 compatible mode.
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "Sure, I can take this.", "I'm scheduling to 1.8 release, do you want it earlier?", "Closing this feature request, as we no longer want to move forward with https://github.com/kubeflow/pipelines/pull/6080." ]
2021-07-22T08:21:42
2021-07-26T22:37:14
2021-07-26T22:37:14
COLLABORATOR
null
Follow up on https://github.com/kubeflow/pipelines/pull/6080 To make legacy data passing work in v2 compatible mode, we need to support `{{{{$.inputs.artifacts['{}'].value}}}}` placeholder in v2 launcher.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6108/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6108/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6107
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6107/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6107/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6107/events
https://github.com/kubeflow/pipelines/issues/6107
950,205,865
MDU6SXNzdWU5NTAyMDU4NjU=
6,107
[sdk] Can't access local files on local KF deployment
{ "login": "Pedrohgv", "id": 27008096, "node_id": "MDQ6VXNlcjI3MDA4MDk2", "avatar_url": "https://avatars.githubusercontent.com/u/27008096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Pedrohgv", "html_url": "https://github.com/Pedrohgv", "followers_url": "https://api.github.com/users/Pedrohgv/followers", "following_url": "https://api.github.com/users/Pedrohgv/following{/other_user}", "gists_url": "https://api.github.com/users/Pedrohgv/gists{/gist_id}", "starred_url": "https://api.github.com/users/Pedrohgv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Pedrohgv/subscriptions", "organizations_url": "https://api.github.com/users/Pedrohgv/orgs", "repos_url": "https://api.github.com/users/Pedrohgv/repos", "events_url": "https://api.github.com/users/Pedrohgv/events{/privacy}", "received_events_url": "https://api.github.com/users/Pedrohgv/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Kubernetes containers running on the usually remote compute nodes are not expected to get access to any local files.\r\n\r\nPlease read the Data-passing tutorial for [command-line programs](https://github.com/Ark-kun/kfp_samples/blob/ae1a5b6/2019-10%20Kubeflow%20summit/106%20-%20Creating%20components%20from%20command-line%20programs/106%20-%20Creating%20components%20from%20command-line%20programs.ipynb) and [Python-based components]( https://github.com/kubeflow/pipelines/blob/d902cae/samples/tutorials/Data%20passing%20in%20python%20components.ipynb).\r\n\r\nIn general you always need to put your data files to some place where they have a URI, then use some \"downloader\" component to import that data into the pipeline.\r\n\r\nP.S. This naming here is not great. You seem to be getting a path of a path:\r\n```\r\n --input-path, {inputPath: input_path}, \r\n --output-path, {outputPath: output_path},\r\n```\r\nThis naming is better:\r\n```\r\n --input-path, {inputPath: input1}, \r\n --output-path, {outputPath: output1},\r\n```", "I see, will take a look at the tutorials.\r\n\r\nThanks!" ]
2021-07-22T00:31:47
2021-07-23T09:30:33
2021-07-23T09:30:33
NONE
null
### Environment * KFP version: 1.6.5 * KFP SDK version: 1.6.0 * All dependencies version: kfp 1.6.5 kfp-pipeline-spec 0.1.8 kfp-server-api 1.6.0 ### Steps to reproduce Running Kubeflow on a K3s cluster on my machine. I built a simple python function that takes as input: - Input folder path that contains some files to be transformed - Output folder that will contain the transformed files - A parameter to the transformation itself. The inputs are passed to the python code by using the argparse library: parser.add_argument('--input-path', type=str, required=True) parser.add_argument('--output-path', type=str, required=True) parser.add_argument('--classes', nargs='+', required=True) Wrapped the python code in a container, tested it with mounted volumes and it works as expected. Wrote the .yaml file to build the pipeline component using the inputPath placeholder for both the output and input path. My interface looks like this: ``` inputs: - {name: input_path, type: string} - {name: output_path, type: string} - {name: classes, type: list} outputs: - {name: output_path, type: string, description: 'Output path to save both images and Yolo converted annotations.'} ``` My command and args sections looks like this: ``` command: [python3, ./src/pascal_2_yolo_converter.py] args: [ --input-path, {inputPath: input_path}, --output-path, {outputPath: output_path}, --classes, {inputValue: classes}, ] ``` I'm trying to make the pipeline (composed ins this example by only this simple component) read the files from a local folder, transform them and write the transformed files on another local folder. When uploading the pipeline to kubeflow pipelines UI and filling the input fields, I do not get the expected results (the creation of the output folder with the transformed files). By modifying the code to print those inputs I can see in the logs that the 'classes' parameter is correctly managed, but the input and output paths are printed as '/tmp/inputs/input_path/data' and '/tmp/outputs/output_path/data' respectively. I expected that using local files with kubeflow would be tricky because the applications are containerized, but I don't understand what's going on with the input/output. Am I doing something wrong? Is there a better way to work with files locally on a local K8s cluster? Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6107/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6107/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6103
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6103/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6103/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6103/events
https://github.com/kubeflow/pipelines/issues/6103
949,865,044
MDU6SXNzdWU5NDk4NjUwNDQ=
6,103
[feature] Post Publish Hooks to capture Lineage in external systems.
{ "login": "Nagarajj", "id": 135624, "node_id": "MDQ6VXNlcjEzNTYyNA==", "avatar_url": "https://avatars.githubusercontent.com/u/135624?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Nagarajj", "html_url": "https://github.com/Nagarajj", "followers_url": "https://api.github.com/users/Nagarajj/followers", "following_url": "https://api.github.com/users/Nagarajj/following{/other_user}", "gists_url": "https://api.github.com/users/Nagarajj/gists{/gist_id}", "starred_url": "https://api.github.com/users/Nagarajj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Nagarajj/subscriptions", "organizations_url": "https://api.github.com/users/Nagarajj/orgs", "repos_url": "https://api.github.com/users/Nagarajj/repos", "events_url": "https://api.github.com/users/Nagarajj/events{/privacy}", "received_events_url": "https://api.github.com/users/Nagarajj/received_events", "type": "User", "site_admin": false }
[ { "id": 930476737, "node_id": "MDU6TGFiZWw5MzA0NzY3Mzc=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/help%20wanted", "name": "help wanted", "color": "db1203", "default": true, "description": "The community is welcome to contribute." }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @Nagarajj, thank you for drafting this detailed proposal!\r\n\r\n> Specified as a container which runs after publisher actions (Python Container rather than Go Binary).\r\n\r\nAre you OK with the perf implication? Because a container will have to be a separate Pod, and the overhead of scheduling & starting a Pod is at least 3~5 seconds from what I observe on GKE.\r\n\r\nMaybe we can improve using https://argoproj.github.io/argo-workflows/container-set-template/, but it's an Alpha feature and it's only supported by argo emissary executor.\r\n\r\nI was talking about go binary as an option, because it would have minimal perf overhead. A binary can be mounted in a volume to the same main container and executed after your main command (and typically it should finish really fast).", "> Synchronous execution for reliability reasons (Synchronous over asynchronous).\r\n\r\n+1, I've heard similar requirements before", "> output values/artifacts, input values/artifacts\r\n\r\nDo you need access to the artifact files? This will also be not very efficient if postpublish hook runs on a separate Pod.", "On the highest level, we have two objectives:\r\n\r\n1. Run container before and after each user component as part of our custom caching layer. This can be further optimized by the built-in caching logic because starting new containers lead to performance overhead. Is it possible to override the built-in caching logic with customized code? \r\n\r\n2. For each component, we want to capture where its arguments come from to build data lineage. This is particularly critical to reproducibility and debugging. Unfortunately, right now there's no way to achieve such things.\r\n", "> Hi @Nagarajj, thank you for drafting this detailed proposal!\r\n> \r\n> > Specified as a container which runs after publisher actions (Python Container rather than Go Binary).\r\n> \r\n> Are you OK with the perf implication? Because a container will have to be a separate Pod, and the overhead of scheduling & starting a Pod is at least 3~5 seconds from what I observe on GKE.\r\n> \r\n> Maybe we can improve using https://argoproj.github.io/argo-workflows/container-set-template/, but it's an Alpha feature and it's only supported by argo emissary executor.\r\n> \r\n> I was talking about go binary as an option, because it would have minimal perf overhead. A binary can be mounted in a volume to the same main container and executed after your main command (and typically it should finish really fast).\r\n\r\nThanks @Bobgy for looking into this. \r\n\r\nPerf should not be an implication in our case, as these are batch pipelines. The only concern with go binary is, Python has low barrier of entry/maintenance.\r\nYes, ContainerSet Template would do good for these scenarios. ", "> > output values/artifacts, input values/artifacts\r\n> \r\n> Do you need access to the artifact files? This will also be not very efficient if postpublish hook runs on a separate Pod.\r\n\r\n@Bobgy Would ContainerSet Template help here as well ?", "Yes, I believe ContainerSet template seems like a good fit.\r\nIt's just released as Alpha in argo, so encourage anyone interested to try it out and mature it.\r\n\r\n> Run container before and after each user component as part of our custom caching layer. This can be further optimized by the built-in caching logic because starting new containers lead to performance overhead. Is it possible to override the built-in caching logic with customized code?\r\n\r\nWill ContainerSet be enough for your case @zzxvictor? As far as I can tell, multiple containers in one Pod has less overhead than multiple Pods, because they can be scheduled only once and they share local volumes.\r\n\r\nAnother option I mentioned before is using go binaries, if you mount a go binary that KFP cacher can call into to get the caching decision -- that will be faster than container set I believe. However, as mentioned it has higher barrier for entry, because now language is limited to those that can be compiled to binaries.\r\n\r\nAs a last resort, you can always fork, we plan to build built-in caching logic as HTTP template in argo (a long running service receiving cache requests from argo workflows), so if you fork our handler code and replace it with your own caching code you can achieve the goal you want. However, I'm not sure there are enough people who wants to customize so deeply.", "> For each component, we want to capture where its arguments come from to build data lineage. This is particularly critical to reproducibility and debugging. Unfortunately, right now there's no way to achieve such things.\r\n\r\nIn [v2 compatible mode](https://bit.ly/kfp-v2-compatible) and KFP v2 we are building, these info are already captured in the ml-metadata store deployed with KFP. What's missing there?", "Just to clarify our process to move this forward.\r\nEither\r\n* a contributor drives the discussions and proposes a design\r\n* there's enough interest, so that KFP team wants to take over this\r\n\r\nWe are currently collecting feedback and different use-cases with this. Here are some questions that I am still unclear with:\r\n1. Who will be the person that configures post publish hooks? Pipeline authors or platform admins or there are use-cases for both?\r\n2. What will be the most appropriate interface for the post publish hook? golang binary or container?\r\n3. Following 2, what will be minimum perf requirement for the hook?", "If we can have post_publish be a Lifecycle Hook, similar to [PreCache check](https://github.com/kubeflow/pipelines/blob/c89ed71cf6339cdcdd957d4dca4b1f32c10db9c9/api/v2alpha1/pipeline_spec.proto#L546-L566) that should be an ideal experience. \r\n\r\n1. These LifeCycle hooks, would let platform admins extend the capabilities of Kubeflow Pipelines Runtime in interesting ways, without having to hack things like launcher (integration with existing metadata systems etc). \r\n2. If Platform admins are building this, it should be ok it being a golang binary.\r\n3. If these are built as optional/opt-in hooks (and in golang), platform teams should be able to decide if added performance overhead provides enough value. ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-21T15:53:05
2022-03-11T05:57:45
null
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area backend --> <!-- /area sdk --> <!-- /area components --> ### What feature would you like to see? IR Spec specifies Lifecycle [Hooks](https://github.com/kubeflow/pipelines/blob/c89ed71cf6339cdcdd957d4dca4b1f32c10db9c9/api/v2alpha1/pipeline_spec.proto#L546-L566) (pre-cache) which could be used to override the caching logic in the driver. A similar hook (post_publish) would be useful which can be fired after the publisher, to help capture the entire lineage metadata of output values/artifacts, input values/artifacts, pipeline/task status etc. It would be ideal if the hook could be 1. Specified as a container which runs after publisher actions (Python Container rather than Go Binary). 2. Synchronous execution for reliability reasons (Synchronous over asynchronous). ### What is the use case or pain point? Having these hooks will help integrate with the existing external metadata management systems. These external metadata management systems are already in use within the company for governance/compliance/audit reasons along with metadata management. ### Is there a workaround currently? It is not possible to achieve this with the current IR spec. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6103/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6103/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6102
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6102/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6102/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6102/events
https://github.com/kubeflow/pipelines/issues/6102
949,847,022
MDU6SXNzdWU5NDk4NDcwMjI=
6,102
[frontend] Unable to upload pipeline by URL in KF 1.3
{ "login": "deepio-oc", "id": 55089906, "node_id": "MDQ6VXNlcjU1MDg5OTA2", "avatar_url": "https://avatars.githubusercontent.com/u/55089906?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deepio-oc", "html_url": "https://github.com/deepio-oc", "followers_url": "https://api.github.com/users/deepio-oc/followers", "following_url": "https://api.github.com/users/deepio-oc/following{/other_user}", "gists_url": "https://api.github.com/users/deepio-oc/gists{/gist_id}", "starred_url": "https://api.github.com/users/deepio-oc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/deepio-oc/subscriptions", "organizations_url": "https://api.github.com/users/deepio-oc/orgs", "repos_url": "https://api.github.com/users/deepio-oc/repos", "events_url": "https://api.github.com/users/deepio-oc/events{/privacy}", "received_events_url": "https://api.github.com/users/deepio-oc/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "Thank you for the report! Let me try to reproduce", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-21T15:33:14
2022-03-03T00:05:33
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Using KF 1.3 steps while ! kustomize build example | kubectl apply -f -; do echo "Retrying to apply resources"; sleep 10; done * KFP version: Pipeline UI is running gcr.io/ml-pipeline/frontend:1.5.0 ### Steps to reproduce 1. Upload pipeline 2. Choose upload by URL 3. Provide a public HTTP URL to pipeline file ### Expected result Pipeline should be uploaded. I am able to upload pipeline using local file. By URL should work the same way. ### Materials and Reference ![Screen Shot 2021-07-21 at 10 25 00 AM](https://user-images.githubusercontent.com/55089906/126516601-5f9bc97f-4703-491b-a38d-b8b3ae3f205b.png) --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6102/reactions", "total_count": 5, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 1, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6102/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6101
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6101/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6101/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6101/events
https://github.com/kubeflow/pipelines/issues/6101
949,729,919
MDU6SXNzdWU5NDk3Mjk5MTk=
6,101
[bug] Exit handler crashes on termination
{ "login": "hahamark1", "id": 12664815, "node_id": "MDQ6VXNlcjEyNjY0ODE1", "avatar_url": "https://avatars.githubusercontent.com/u/12664815?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hahamark1", "html_url": "https://github.com/hahamark1", "followers_url": "https://api.github.com/users/hahamark1/followers", "following_url": "https://api.github.com/users/hahamark1/following{/other_user}", "gists_url": "https://api.github.com/users/hahamark1/gists{/gist_id}", "starred_url": "https://api.github.com/users/hahamark1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hahamark1/subscriptions", "organizations_url": "https://api.github.com/users/hahamark1/orgs", "repos_url": "https://api.github.com/users/hahamark1/repos", "events_url": "https://api.github.com/users/hahamark1/events{/privacy}", "received_events_url": "https://api.github.com/users/hahamark1/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "Facing same issue. Any update on this issue?", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "We still face the issue. Can I help in someway to fix the issue?" ]
2021-07-21T13:50:50
2022-10-14T17:35:47
null
NONE
null
### What happened: Im running a pipeline with an exithandler. This setup work well when the pipeline crashes or completes, but on termination the exit handler fails with this error message: `This step is in Failed state with this message: Step exceeded its deadline` ### What did you expect to happen: I expected the exit handler to perform the same way it would when a pipeline crashes. ### Environment: KF 1.5 Ubuntu 18 * How do you deploy Kubeflow Pipelines (KFP)? KFP standalone on an ubuntu server. * KFP version: KFP 1.5 * KFP SDK version: 1.6.2 ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> /area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6101/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6101/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6100
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6100/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6100/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6100/events
https://github.com/kubeflow/pipelines/issues/6100
949,626,088
MDU6SXNzdWU5NDk2MjYwODg=
6,100
[pH] prepare for kpt v1.0.0-beta.1
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @zijianjoy ", "manifests presubmit test is already failing with kpt v1: https://oss-prow.knative.dev/view/gs/oss-prow/pr-logs/pull/kubeflow_pipelines/5926/kubeflow-pipelines-manifests/1417793964035018752", "```\r\n+ kpt cfg tree /home/prow/go/src/github.com/kubeflow/pipelines/manifests/kustomize/hack/..\r\nerror: unknown command \"cfg\" for \"kpt\"\r\nDid you mean this?\r\n\tfn\r\n\tpkg\r\n```", "FYI, I temporarily disabled the test that uses kpt in https://github.com/kubeflow/pipelines/commit/efe22ca0a4c8b051c4284d7fd83be2aac58162d0\r\n\r\nWe can re-enable after upgrade", "Candidate for Kubeflow 1.4 release", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Validated the manifest test is now adopted to kpt v1.0.0-beta.13" ]
2021-07-21T11:50:37
2022-03-11T05:38:21
2022-03-11T05:38:20
CONTRIBUTOR
null
349.0.0 (2021-07-20) Breaking Changes * **(Kpt)** kpt updated to v1.0.0-beta.1. This version has several breaking changes from previous versions. Documentation is available at <https://kpt.dev> and migration instructions can be found at <https://kpt.dev/installation/migration>. Old versions are available at <https://github.com/GoogleContainerTools/kpt>.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6100/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6100/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6096
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6096/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6096/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6096/events
https://github.com/kubeflow/pipelines/issues/6096
949,350,424
MDU6SXNzdWU5NDkzNTA0MjQ=
6,096
How do I call Hosted KFP APIs programmatically when I have Google Cloud token?
{ "login": "Ark-kun", "id": 1829149, "node_id": "MDQ6VXNlcjE4MjkxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ark-kun", "html_url": "https://github.com/Ark-kun", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "repos_url": "https://api.github.com/users/Ark-kun/repos", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "type": "User", "site_admin": false }
[ { "id": 1682717392, "node_id": "MDU6TGFiZWwxNjgyNzE3Mzky", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/question", "name": "kind/question", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }, { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "1. I think you can list the endpoint via kubectl `kubectl describe configmap inverse-proxy-config -n kubeflow | grep googleusercontent.com`, you probably need some script to query for all k8s context you have. Reference: https://www.kubeflow.org/docs/components/pipelines/installation/standalone-deployment/#deploying-kubeflow-pipelines\r\n2. I don't know approaches other than https://cloud.google.com/iap/docs/authentication-howto#authenticating_from_a_desktop_app, I would also be interested in any potential solution to it.", "Strange, I think you can just get the token by `gcloud auth print-access-token` and add it to header `Authorization: Bearer <Token>`.\r\n\r\nDid you make a typo there? It should be Authorization Bearer: https://swagger.io/docs/specification/authentication/bearer-authentication/", "> Can I programmatically list the Hosted KFP instances (as I see in the Google Cloud AI Platform UX) and get their inverse proxy URLs?\n\nIt's not a public API from my understanding", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-21T05:50:24
2022-03-03T00:05:24
null
CONTRIBUTOR
null
Suppose I have several instances of KFP installed from Google Cloud Marketplace. I also have Google Cloud token with `cloud-platform` scopes. (Similar to the one given by `gcloud auth print-access-token`). I can call any Google Cloud API using that token. But how can I call KFP API? 1) Can I programmatically list the Hosted KFP instances (as I see in the Google Cloud AI Platform UX) and get their inverse proxy URLs? 2) Given a KFP inverse proxy URL and token, how can I can any KFP API? I've tried to pass the token as `Authentication: Bearer: <token>`, but that did not quite work for me. I've read some KFP documentation which mentioned creating service accounts and client IDs, but I did not see a solution that would work with just a token.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6096/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6096/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6093
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6093/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6093/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6093/events
https://github.com/kubeflow/pipelines/issues/6093
949,112,778
MDU6SXNzdWU5NDkxMTI3Nzg=
6,093
[bug] pipeline parameters in google_cloud_pipeline_components.aiplatform.TabularCreateDatasetOp
{ "login": "ckchow", "id": 3922740, "node_id": "MDQ6VXNlcjM5MjI3NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/3922740?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ckchow", "html_url": "https://github.com/ckchow", "followers_url": "https://api.github.com/users/ckchow/followers", "following_url": "https://api.github.com/users/ckchow/following{/other_user}", "gists_url": "https://api.github.com/users/ckchow/gists{/gist_id}", "starred_url": "https://api.github.com/users/ckchow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ckchow/subscriptions", "organizations_url": "https://api.github.com/users/ckchow/orgs", "repos_url": "https://api.github.com/users/ckchow/repos", "events_url": "https://api.github.com/users/ckchow/events{/privacy}", "received_events_url": "https://api.github.com/users/ckchow/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "IronPan", "id": 2348602, "node_id": "MDQ6VXNlcjIzNDg2MDI=", "avatar_url": "https://avatars.githubusercontent.com/u/2348602?v=4", "gravatar_id": "", "url": "https://api.github.com/users/IronPan", "html_url": "https://github.com/IronPan", "followers_url": "https://api.github.com/users/IronPan/followers", "following_url": "https://api.github.com/users/IronPan/following{/other_user}", "gists_url": "https://api.github.com/users/IronPan/gists{/gist_id}", "starred_url": "https://api.github.com/users/IronPan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/IronPan/subscriptions", "organizations_url": "https://api.github.com/users/IronPan/orgs", "repos_url": "https://api.github.com/users/IronPan/repos", "events_url": "https://api.github.com/users/IronPan/events{/privacy}", "received_events_url": "https://api.github.com/users/IronPan/received_events", "type": "User", "site_admin": false }
[ { "login": "IronPan", "id": 2348602, "node_id": "MDQ6VXNlcjIzNDg2MDI=", "avatar_url": "https://avatars.githubusercontent.com/u/2348602?v=4", "gravatar_id": "", "url": "https://api.github.com/users/IronPan", "html_url": "https://github.com/IronPan", "followers_url": "https://api.github.com/users/IronPan/followers", "following_url": "https://api.github.com/users/IronPan/following{/other_user}", "gists_url": "https://api.github.com/users/IronPan/gists{/gist_id}", "starred_url": "https://api.github.com/users/IronPan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/IronPan/subscriptions", "organizations_url": "https://api.github.com/users/IronPan/orgs", "repos_url": "https://api.github.com/users/IronPan/repos", "events_url": "https://api.github.com/users/IronPan/events{/privacy}", "received_events_url": "https://api.github.com/users/IronPan/received_events", "type": "User", "site_admin": false }, { "login": "SinaChavoshi", "id": 20114005, "node_id": "MDQ6VXNlcjIwMTE0MDA1", "avatar_url": "https://avatars.githubusercontent.com/u/20114005?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SinaChavoshi", "html_url": "https://github.com/SinaChavoshi", "followers_url": "https://api.github.com/users/SinaChavoshi/followers", "following_url": "https://api.github.com/users/SinaChavoshi/following{/other_user}", "gists_url": "https://api.github.com/users/SinaChavoshi/gists{/gist_id}", "starred_url": "https://api.github.com/users/SinaChavoshi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SinaChavoshi/subscriptions", "organizations_url": "https://api.github.com/users/SinaChavoshi/orgs", "repos_url": "https://api.github.com/users/SinaChavoshi/repos", "events_url": "https://api.github.com/users/SinaChavoshi/events{/privacy}", "received_events_url": "https://api.github.com/users/SinaChavoshi/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @IronPan @SinaChavoshi ", "Good find, seems like we are splitting on \"=\" instead of splitting on the first instance of '=' and taking the remaining argument as the input query string. Marking as P0 as directly query is broken with this issue. " ]
2021-07-20T21:28:35
2021-08-12T21:59:54
2021-08-12T21:59:54
NONE
null
### What steps did you take Create a simple pipeline that tries to create a tabular dataset from a bigquery table. ``` @dsl.pipeline( name='train-churn', pipeline_root='gs://blah/test1' ) def pipeline(project_id: str, dataset_id: str, table_id: str): query = dedent(f""" <query elided> """) query_done = bigquery_query_op( query=query, project_id=project_id, dataset_id=dataset_id, table_id=table_id, dataset_location='us' ) # todo bq op unfortunately returns nothing. so we compose our own coordinates for dataset creation bq_source_rendered = f'bq://{str(project_id)}.{str(dataset_id)}.{str(table_id)}' dataset = gcc.aiplatform.TabularDatasetCreateOp( display_name='test_name_1', bq_source=bq_source_rendered ) dataset.after(query_done) <.... rest of pipeline .... > ``` ### What happened: gcp error log: ``` "__main__", mod_spec) File "/opt/python3.7/lib/python3.7/runpy.py", line 85, in _run_code exec(code, run_globals) File "/opt/python3.7/lib/python3.7/site-packages/google_cloud_pipeline_components/aiplatform/remote_runner.py", line 284, in <module> main() File "/opt/python3.7/lib/python3.7/site-packages/google_cloud_pipeline_components/aiplatform/remote_runner.py", line 268, in main key, value = arg[2:].split("=") ValueError: too many values to unpack (expected 2) ``` AI platform runner does not parse and unpack the parameters correctly, fails on dataset creation. ### What did you expect to happen: Pipeline should complete. ### Environment: KFP v2 using vertex. (locally compiled pipeline) * KFP SDK version: ``` kfp 1.6.5 kfp-pipeline-spec 0.1.8 kfp-server-api 1.6.0 ``` ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> /area sdk <!-- /area testing --> <!-- /area samples --> /area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6093/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6093/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6092
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6092/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6092/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6092/events
https://github.com/kubeflow/pipelines/issues/6092
948,750,179
MDU6SXNzdWU5NDg3NTAxNzk=
6,092
[sdk] Wrong type hint of create_run_from_job_spec
{ "login": "sfujiwara", "id": 2109535, "node_id": "MDQ6VXNlcjIxMDk1MzU=", "avatar_url": "https://avatars.githubusercontent.com/u/2109535?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sfujiwara", "html_url": "https://github.com/sfujiwara", "followers_url": "https://api.github.com/users/sfujiwara/followers", "following_url": "https://api.github.com/users/sfujiwara/following{/other_user}", "gists_url": "https://api.github.com/users/sfujiwara/gists{/gist_id}", "starred_url": "https://api.github.com/users/sfujiwara/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sfujiwara/subscriptions", "organizations_url": "https://api.github.com/users/sfujiwara/orgs", "repos_url": "https://api.github.com/users/sfujiwara/repos", "events_url": "https://api.github.com/users/sfujiwara/events{/privacy}", "received_events_url": "https://api.github.com/users/sfujiwara/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "NikeNano", "id": 22057410, "node_id": "MDQ6VXNlcjIyMDU3NDEw", "avatar_url": "https://avatars.githubusercontent.com/u/22057410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikeNano", "html_url": "https://github.com/NikeNano", "followers_url": "https://api.github.com/users/NikeNano/followers", "following_url": "https://api.github.com/users/NikeNano/following{/other_user}", "gists_url": "https://api.github.com/users/NikeNano/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikeNano/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikeNano/subscriptions", "organizations_url": "https://api.github.com/users/NikeNano/orgs", "repos_url": "https://api.github.com/users/NikeNano/repos", "events_url": "https://api.github.com/users/NikeNano/events{/privacy}", "received_events_url": "https://api.github.com/users/NikeNano/received_events", "type": "User", "site_admin": false }
[ { "login": "NikeNano", "id": 22057410, "node_id": "MDQ6VXNlcjIyMDU3NDEw", "avatar_url": "https://avatars.githubusercontent.com/u/22057410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikeNano", "html_url": "https://github.com/NikeNano", "followers_url": "https://api.github.com/users/NikeNano/followers", "following_url": "https://api.github.com/users/NikeNano/following{/other_user}", "gists_url": "https://api.github.com/users/NikeNano/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikeNano/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikeNano/subscriptions", "organizations_url": "https://api.github.com/users/NikeNano/orgs", "repos_url": "https://api.github.com/users/NikeNano/repos", "events_url": "https://api.github.com/users/NikeNano/events{/privacy}", "received_events_url": "https://api.github.com/users/NikeNano/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign", "You are always more than welcome to make a PR @sfujiwara if you find things like this. ", "@NikeNano Thanks. I made PR #6098. Could you please review it?" ]
2021-07-20T15:06:41
2021-07-22T07:21:25
2021-07-22T07:21:25
CONTRIBUTOR
null
### Environment * KFP version: - * KFP SDK version: 1.6.5 * All dependencies version: * kfp-pipeline-spec: 0.1.8 * kfp-server-api: 1.6.0 ### Steps to reproduce The type hint of `create_run_from_job_spec` says `str`, but it actually returns `dict`. https://github.com/kubeflow/pipelines/blob/74d27e7e7ec88c62154a3dc5fb19cb27fc2922e6/sdk/python/kfp/v2/google/client/client.py#L258-L268 ```python from kfp.v2 import compiler, dsl from kfp.v2.google.client import AIPlatformClient compiler.Compiler().compile(pipeline_func=pipeline_func, package_path=pipeline_json) api_client = AIPlatformClient(project_id=project_id, region="us-central1") # `response` is actually `dict`, but the type hint says `str`. response = api_client.create_run_from_job_spec(...) ``` ### Expected result - The return types of `create_run_from_job_spec` should be `dict` - The return types of `_submit_job` (called in `create_run_from_job_spec`) should be `dict` https://github.com/kubeflow/pipelines/blob/74d27e7e7ec88c62154a3dc5fb19cb27fc2922e6/sdk/python/kfp/v2/google/client/client.py#L207-L211 ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6092/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6092/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6091
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6091/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6091/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6091/events
https://github.com/kubeflow/pipelines/issues/6091
948,620,611
MDU6SXNzdWU5NDg2MjA2MTE=
6,091
[backend] Sample pipeline (conditional-execution-pipeline)
{ "login": "Hmr-ramzi", "id": 11632681, "node_id": "MDQ6VXNlcjExNjMyNjgx", "avatar_url": "https://avatars.githubusercontent.com/u/11632681?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Hmr-ramzi", "html_url": "https://github.com/Hmr-ramzi", "followers_url": "https://api.github.com/users/Hmr-ramzi/followers", "following_url": "https://api.github.com/users/Hmr-ramzi/following{/other_user}", "gists_url": "https://api.github.com/users/Hmr-ramzi/gists{/gist_id}", "starred_url": "https://api.github.com/users/Hmr-ramzi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Hmr-ramzi/subscriptions", "organizations_url": "https://api.github.com/users/Hmr-ramzi/orgs", "repos_url": "https://api.github.com/users/Hmr-ramzi/repos", "events_url": "https://api.github.com/users/Hmr-ramzi/events{/privacy}", "received_events_url": "https://api.github.com/users/Hmr-ramzi/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "@Hmr-ramzi What is the distribution you use to deploy your Kubeflow cluster? https://www.kubeflow.org/docs/started/installing-kubeflow/#install-a-packaged-kubeflow-distribution", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-20T12:49:06
2022-03-03T02:05:18
null
NONE
null
### What steps did you take Deloy kubeflow 1.3 using kustomize. Create a kubeflow user. Create a default experiment. Create a run with default example conditional-excution-pipeline ### What happened: A pod was generated for the pipeline in the user namespace but remaining on state container Creating and failing with error: Warning FailedMount 80s (x46 over 168m) kubelet Unable to attach or mount volumes: unmounted volumes=[docker-sock], unattached volumes=[podmeta │ │ ata docker-sock mlpipeline-minio-artifact default-editor-token-2jchv[]: timed out waiting for the condition ### What did you expect to happen: The pipeline was expected to start normally ### Environment: * How do you deploy Kubeflow Pipelines (KFP)? using kustomize * KFP version: kubeflow 1.3 from the manifests repo * KFP SDK version: kubeflow 1.3 from the manifests repo ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> ### Labels /area backend --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6091/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6091/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6089
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6089/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6089/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6089/events
https://github.com/kubeflow/pipelines/issues/6089
948,138,176
MDU6SXNzdWU5NDgxMzgxNzY=
6,089
[backend] Standalone deployment on GKE version 1.19 with workload identity fails v2 compatible mode due to lack of permissions
{ "login": "neuromage", "id": 206520, "node_id": "MDQ6VXNlcjIwNjUyMA==", "avatar_url": "https://avatars.githubusercontent.com/u/206520?v=4", "gravatar_id": "", "url": "https://api.github.com/users/neuromage", "html_url": "https://github.com/neuromage", "followers_url": "https://api.github.com/users/neuromage/followers", "following_url": "https://api.github.com/users/neuromage/following{/other_user}", "gists_url": "https://api.github.com/users/neuromage/gists{/gist_id}", "starred_url": "https://api.github.com/users/neuromage/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/neuromage/subscriptions", "organizations_url": "https://api.github.com/users/neuromage/orgs", "repos_url": "https://api.github.com/users/neuromage/repos", "events_url": "https://api.github.com/users/neuromage/events{/privacy}", "received_events_url": "https://api.github.com/users/neuromage/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "/cc @Bobgy ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-20T00:38:40
2022-03-03T02:05:24
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? I used option 2 in standalone deployment * KFP version: Deployed from master branch * KFP SDK version: 1.6.4 ### Steps to reproduce 1. Create a new GKE cluster (version 1.19.9-gke.1900) a. When creating the cluster, under `Security` options, enable `Workload Identity` b. Ensure you have a service account for using workload identity (e.g. `workload-identity@<PROJECT-ID>.iam.gserviceaccount.com`) c. Ensure this account is bound to KSA `[kubeflow/pipeline-runner]`. Example: ``` gcloud iam service-accounts add-iam-policy-binding --role roles/iam.workloadIdentityUser --member "serviceAccount:<PROJECT-ID>.svc.id.goog[kubeflow/pipeline-runner]" workload-identity@<PROJECT-ID>.iam.gserviceaccount.com ``` 2. Deploy KFP using option 2 as described [here](https://github.com/kubeflow/pipelines/tree/master/manifests/kustomize) 3. Run a v2-compatible pipeline with `pipeline_root` set to a GCS directory in which the workload identity service account above has write permissions. ### Expected result Pipeline completes successfully. ### What happened instead Pipeline failed, with launcher complaining about lacking permissions. ### Fix Enable GKE metadata server on the default node-pool using [these instructions](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#option_2_node_pool_modification), e.g. ``` gcloud container node-pools update default-pool --cluster=<CLUSTER-NAME> --region=<REGION> --workload-metadata=GKE_METADATA ``` <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6089/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6089/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6088
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6088/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6088/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6088/events
https://github.com/kubeflow/pipelines/issues/6088
948,136,803
MDU6SXNzdWU5NDgxMzY4MDM=
6,088
my new idea
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-07-20T00:35:00
2021-07-20T00:36:01
2021-07-20T00:36:01
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6088/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6088/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6087
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6087/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6087/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6087/events
https://github.com/kubeflow/pipelines/issues/6087
947,976,371
MDU6SXNzdWU5NDc5NzYzNzE=
6,087
[feature] Add a delete_schedule and update_schedule functions to AIPlatformClient
{ "login": "axelmagn", "id": 1660665, "node_id": "MDQ6VXNlcjE2NjA2NjU=", "avatar_url": "https://avatars.githubusercontent.com/u/1660665?v=4", "gravatar_id": "", "url": "https://api.github.com/users/axelmagn", "html_url": "https://github.com/axelmagn", "followers_url": "https://api.github.com/users/axelmagn/followers", "following_url": "https://api.github.com/users/axelmagn/following{/other_user}", "gists_url": "https://api.github.com/users/axelmagn/gists{/gist_id}", "starred_url": "https://api.github.com/users/axelmagn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/axelmagn/subscriptions", "organizations_url": "https://api.github.com/users/axelmagn/orgs", "repos_url": "https://api.github.com/users/axelmagn/repos", "events_url": "https://api.github.com/users/axelmagn/events{/privacy}", "received_events_url": "https://api.github.com/users/axelmagn/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
{ "login": "ji-yaqi", "id": 17338099, "node_id": "MDQ6VXNlcjE3MzM4MDk5", "avatar_url": "https://avatars.githubusercontent.com/u/17338099?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ji-yaqi", "html_url": "https://github.com/ji-yaqi", "followers_url": "https://api.github.com/users/ji-yaqi/followers", "following_url": "https://api.github.com/users/ji-yaqi/following{/other_user}", "gists_url": "https://api.github.com/users/ji-yaqi/gists{/gist_id}", "starred_url": "https://api.github.com/users/ji-yaqi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ji-yaqi/subscriptions", "organizations_url": "https://api.github.com/users/ji-yaqi/orgs", "repos_url": "https://api.github.com/users/ji-yaqi/repos", "events_url": "https://api.github.com/users/ji-yaqi/events{/privacy}", "received_events_url": "https://api.github.com/users/ji-yaqi/received_events", "type": "User", "site_admin": false }
[ { "login": "ji-yaqi", "id": 17338099, "node_id": "MDQ6VXNlcjE3MzM4MDk5", "avatar_url": "https://avatars.githubusercontent.com/u/17338099?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ji-yaqi", "html_url": "https://github.com/ji-yaqi", "followers_url": "https://api.github.com/users/ji-yaqi/followers", "following_url": "https://api.github.com/users/ji-yaqi/following{/other_user}", "gists_url": "https://api.github.com/users/ji-yaqi/gists{/gist_id}", "starred_url": "https://api.github.com/users/ji-yaqi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ji-yaqi/subscriptions", "organizations_url": "https://api.github.com/users/ji-yaqi/orgs", "repos_url": "https://api.github.com/users/ji-yaqi/repos", "events_url": "https://api.github.com/users/ji-yaqi/events{/privacy}", "received_events_url": "https://api.github.com/users/ji-yaqi/received_events", "type": "User", "site_admin": false } ]
null
[ "cc @sasha-gitg @SinaChavoshi @ji-yaqi ", "Close this since the client is moved to Vertex SDK. " ]
2021-07-19T19:55:16
2022-02-03T23:06:12
2022-02-03T23:06:12
NONE
null
### Feature Area /area sdk ### What feature would you like to see? I would like to be able to delete and update existing schedules created via `create_schedule_from_job_spec`. ### What is the use case or pain point? Pipelines are updated over time, and need to be redeployed. `create_schedule_from_job_spec` manages long-term resources in the form of cloud functions and schedules that will need to be updated as part of a redeployment event. At the moment, I must manage these resources manually, which makes it easier to implement everything myself rather than using `AIPlatformClient`. ### Is there a workaround currently? Circumvent AIPlatformClient. Create and update schedule / functions / pubsub manually. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6087/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6087/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6086
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6086/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6086/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6086/events
https://github.com/kubeflow/pipelines/issues/6086
947,973,609
MDU6SXNzdWU5NDc5NzM2MDk=
6,086
[feature] Optionally disable API enablement when calling AIPlatformClient.create_schedule_from_job_spec
{ "login": "axelmagn", "id": 1660665, "node_id": "MDQ6VXNlcjE2NjA2NjU=", "avatar_url": "https://avatars.githubusercontent.com/u/1660665?v=4", "gravatar_id": "", "url": "https://api.github.com/users/axelmagn", "html_url": "https://github.com/axelmagn", "followers_url": "https://api.github.com/users/axelmagn/followers", "following_url": "https://api.github.com/users/axelmagn/following{/other_user}", "gists_url": "https://api.github.com/users/axelmagn/gists{/gist_id}", "starred_url": "https://api.github.com/users/axelmagn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/axelmagn/subscriptions", "organizations_url": "https://api.github.com/users/axelmagn/orgs", "repos_url": "https://api.github.com/users/axelmagn/repos", "events_url": "https://api.github.com/users/axelmagn/events{/privacy}", "received_events_url": "https://api.github.com/users/axelmagn/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "Hi Axel, do you mean to skip checking for permission for APIs, and check those permissions at the time of API usage? ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-19T19:50:57
2022-03-03T02:05:19
null
NONE
null
### Feature Area /area sdk ### What feature would you like to see? I would like to manipulate a flag in the function parameters of `AIPlatform.create_schedule_from_job_spec` in order to disable the logic which automatically tries to enable the necessary APIs. An acceptable alternative would be to expose a function at a lower level of abstraction that decouples API enablement from pipeline scheduling. ### What is the use case or pain point? This function call currently fails when run by service accounts in a Cloud Build context or similar due to insufficient permissions. Furthermore, I have been unable to identify the required role necessary to allow a service account to enable APIs. It would be useful to include a link to this role in the error when encountering this failure mode. I have already enabled all of the requisite APIs, and can verify functionality when run from a user account. ### Is there a workaround currently? Currently I enable admin permissions for my service account (in a dev environment). This is not ideal. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6086/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6086/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6085
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6085/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6085/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6085/events
https://github.com/kubeflow/pipelines/issues/6085
947,966,870
MDU6SXNzdWU5NDc5NjY4NzA=
6,085
[feature] Parameter parity in AIPlatformClient functions
{ "login": "axelmagn", "id": 1660665, "node_id": "MDQ6VXNlcjE2NjA2NjU=", "avatar_url": "https://avatars.githubusercontent.com/u/1660665?v=4", "gravatar_id": "", "url": "https://api.github.com/users/axelmagn", "html_url": "https://github.com/axelmagn", "followers_url": "https://api.github.com/users/axelmagn/followers", "following_url": "https://api.github.com/users/axelmagn/following{/other_user}", "gists_url": "https://api.github.com/users/axelmagn/gists{/gist_id}", "starred_url": "https://api.github.com/users/axelmagn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/axelmagn/subscriptions", "organizations_url": "https://api.github.com/users/axelmagn/orgs", "repos_url": "https://api.github.com/users/axelmagn/repos", "events_url": "https://api.github.com/users/axelmagn/events{/privacy}", "received_events_url": "https://api.github.com/users/axelmagn/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "cc @sasha-gitg @SinaChavoshi ", "Assigning to @chensun for comments/ triage. ", "This AIPlatformClient would be deprecated soon. Thanks for the FR, we have it in our options to add into Vertex SDK. ", "@ji-yaqi any updates on this?", "Hi @axelmagn, we will deprecate AIplatformClient in v2.0.0. Meanwhile, you can use scheduling here: https://cloud.google.com/vertex-ai/docs/pipelines/schedule-cloud-scheduler", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-19T19:40:33
2022-03-02T10:06:43
null
NONE
null
### Feature Area /area sdk ### What feature would you like to see? `AIPlatformClient.create_run_from_job_spec` accepts parameters such as `labels` and `network` that are not accepted by `AIPlatformClient.create_schedule_from_job_spec`. I think that any parameter I can specify in `create_run` should be specifiable in `create_schedule`. ### What is the use case or pain point? I would like to be able to specify `labels` for my scheduled jobs in order to enforce an organization regime on my pipeline jobs. ### Is there a workaround currently? I can implement this myself using Cloud Scheduler and Cloud Functions. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6085/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6085/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6073
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6073/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6073/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6073/events
https://github.com/kubeflow/pipelines/issues/6073
946,804,827
MDU6SXNzdWU5NDY4MDQ4Mjc=
6,073
chore(frontend): Create v2 feature flag on frontend
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-17T11:25:33
2021-08-03T08:10:11
2021-08-03T08:10:11
COLLABORATOR
null
For implementing V2 features on frontend, we need to introduce feature flag. Reference: https://www.webtips.dev/feature-flags-in-react
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6073/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6073/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6068
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6068/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6068/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6068/events
https://github.com/kubeflow/pipelines/issues/6068
946,410,457
MDU6SXNzdWU5NDY0MTA0NTc=
6,068
[frontend] go.doc cron link not working properly
{ "login": "kelvins", "id": 1009397, "node_id": "MDQ6VXNlcjEwMDkzOTc=", "avatar_url": "https://avatars.githubusercontent.com/u/1009397?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kelvins", "html_url": "https://github.com/kelvins", "followers_url": "https://api.github.com/users/kelvins/followers", "following_url": "https://api.github.com/users/kelvins/following{/other_user}", "gists_url": "https://api.github.com/users/kelvins/gists{/gist_id}", "starred_url": "https://api.github.com/users/kelvins/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kelvins/subscriptions", "organizations_url": "https://api.github.com/users/kelvins/orgs", "repos_url": "https://api.github.com/users/kelvins/repos", "events_url": "https://api.github.com/users/kelvins/events{/privacy}", "received_events_url": "https://api.github.com/users/kelvins/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "On Firefox it is showing another error message:\r\n\r\n<img width=\"1440\" alt=\"Screen Shot 2021-07-17 at 09 09 51\" src=\"https://user-images.githubusercontent.com/1009397/126036414-8e763826-6a7b-4901-8ed2-e712bef5a44d.png\">\r\n" ]
2021-07-16T15:49:35
2021-07-19T03:44:18
2021-07-19T03:44:18
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Following the Kubeflow v1.2 documentation for GCP: https://v1-2-branch.kubeflow.org/docs/distributions/gke/ * KFP version: 1.0.4 ### Steps to reproduce - Access the `Experiments` page and click on the `+ Create run` button. - Select `Run Type` -> `Recurring`. - Set `Run Trigger` -> `Trigger type` to `Cron`. - It will show a link to the cron expression format: ![image](https://user-images.githubusercontent.com/1009397/125974079-3e2c6346-d139-4a41-bc03-7b33ac79b452.png) - Click the `here` link and it shows the following result: ![image](https://user-images.githubusercontent.com/1009397/125974261-b47db663-ed96-42ca-95c4-5c061cba926a.png) ### Expected result I would expect to properly load [this doc page](https://pkg.go.dev/github.com/robfig/cron?utm_source=godoc#hdr-CRON_Expression_Format). ### Materials and Reference I'm not a frontend developer so I'm not sure what is happening but I believe it is not redirecting/rendering the page properly, maybe because of the Shadow DOM!? Also, a suggestion would be to include a `target="_blank"` to the `a` tag. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6068/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6068/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6067
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6067/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6067/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6067/events
https://github.com/kubeflow/pipelines/issues/6067
946,248,926
MDU6SXNzdWU5NDYyNDg5MjY=
6,067
Istio injection always disabled
{ "login": "karlschriek", "id": 25316920, "node_id": "MDQ6VXNlcjI1MzE2OTIw", "avatar_url": "https://avatars.githubusercontent.com/u/25316920?v=4", "gravatar_id": "", "url": "https://api.github.com/users/karlschriek", "html_url": "https://github.com/karlschriek", "followers_url": "https://api.github.com/users/karlschriek/followers", "following_url": "https://api.github.com/users/karlschriek/following{/other_user}", "gists_url": "https://api.github.com/users/karlschriek/gists{/gist_id}", "starred_url": "https://api.github.com/users/karlschriek/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/karlschriek/subscriptions", "organizations_url": "https://api.github.com/users/karlschriek/orgs", "repos_url": "https://api.github.com/users/karlschriek/repos", "events_url": "https://api.github.com/users/karlschriek/events{/privacy}", "received_events_url": "https://api.github.com/users/karlschriek/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "capri-xiyue", "id": 52932582, "node_id": "MDQ6VXNlcjUyOTMyNTgy", "avatar_url": "https://avatars.githubusercontent.com/u/52932582?v=4", "gravatar_id": "", "url": "https://api.github.com/users/capri-xiyue", "html_url": "https://github.com/capri-xiyue", "followers_url": "https://api.github.com/users/capri-xiyue/followers", "following_url": "https://api.github.com/users/capri-xiyue/following{/other_user}", "gists_url": "https://api.github.com/users/capri-xiyue/gists{/gist_id}", "starred_url": "https://api.github.com/users/capri-xiyue/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/capri-xiyue/subscriptions", "organizations_url": "https://api.github.com/users/capri-xiyue/orgs", "repos_url": "https://api.github.com/users/capri-xiyue/repos", "events_url": "https://api.github.com/users/capri-xiyue/events{/privacy}", "received_events_url": "https://api.github.com/users/capri-xiyue/received_events", "type": "User", "site_admin": false }
[ { "login": "capri-xiyue", "id": 52932582, "node_id": "MDQ6VXNlcjUyOTMyNTgy", "avatar_url": "https://avatars.githubusercontent.com/u/52932582?v=4", "gravatar_id": "", "url": "https://api.github.com/users/capri-xiyue", "html_url": "https://github.com/capri-xiyue", "followers_url": "https://api.github.com/users/capri-xiyue/followers", "following_url": "https://api.github.com/users/capri-xiyue/following{/other_user}", "gists_url": "https://api.github.com/users/capri-xiyue/gists{/gist_id}", "starred_url": "https://api.github.com/users/capri-xiyue/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/capri-xiyue/subscriptions", "organizations_url": "https://api.github.com/users/capri-xiyue/orgs", "repos_url": "https://api.github.com/users/capri-xiyue/repos", "events_url": "https://api.github.com/users/capri-xiyue/events{/privacy}", "received_events_url": "https://api.github.com/users/capri-xiyue/received_events", "type": "User", "site_admin": false } ]
null
[ "Can it be resolved by adding istio injection label to true as https://github.com/kubeflow/manifests/pull/1153?\r\n\r\ncc @Bobgy to confirm the use case.", "@karlschriek which argo executor supports Istio sudecars?\n\nFrom what I learned from https://argoproj.github.io/argo-workflows/sidecar-injection/. It seems that Istio sidecar injection is still not supported.", "The Argo Workflows team seem to be of the opinion that it should work with v3 onward, though I also cannot find a PR or release note that specifically mentions it, but the issue https://github.com/argoproj/argo-workflows/issues/1282 was closed with a note that this should now be supported. @Bobgy I just saw that you also asked for clarification there!", "I've updated https://github.com/argoproj/argo-workflows/blob/master/docs/sidecar-injection.md with more detail for you." ]
2021-07-16T12:29:06
2021-08-19T13:23:54
2021-08-19T13:23:54
NONE
null
While trying to put together a Kubeflow distribution that includes as many as possible elements inside the Istio mesh, we found that for Kubeflow Pipelines steps the istio sidecar injection is always overriden to "false". (This would for example make it impossible for a pipelines step to make calls to a KFServing model in the same namespace which has an istio sidecar enabled). This seems to be hardcoded here: https://github.com/kubeflow/pipelines/blob/fff62e1521e89dd31c87eec03073b8f8627d73a3/backend/src/apiserver/resource/resource_manager.go#L384 I presume that was done because the Pipelines owner are very sure that KFP would not work with injection enabled. As far as I know, newer versions of Argo Workflows should now support it. Could we turn this into an ENV var?
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6067/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6067/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6065
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6065/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6065/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6065/events
https://github.com/kubeflow/pipelines/issues/6065
946,009,210
MDU6SXNzdWU5NDYwMDkyMTA=
6,065
[frontend] console errors like "Unable to find corresponding tab name for MANIFEST" in run details page
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "I think the problem happens in the line\r\n```\r\n for (let tab in SidePanelTab) {\r\n```\r\n\r\nIn typescript, an enum object also contains look up map for the string -> enum conversion, so 'MANIFEST', 'INPUT_OUTPUT', each enum key is also iterated here.", "That is a good catch, thank you @Bobgy !\r\n\r\nI created a PR to fix this: https://github.com/kubeflow/pipelines/pull/6077" ]
2021-07-16T07:09:16
2021-07-19T04:57:16
2021-07-19T04:57:16
CONTRIBUTOR
null
/assign @zijianjoy there is an error message for each enum ``` Unable to find corresponding tab name for ML_METADATA Unable to find corresponding tab name for POD ```
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6065/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6065/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6063
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6063/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6063/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6063/events
https://github.com/kubeflow/pipelines/issues/6063
945,987,899
MDU6SXNzdWU5NDU5ODc4OTk=
6,063
[pH] v2 sample test - enable kaniko caching
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-07-16T06:31:42
2021-08-01T08:58:37
2021-08-01T08:58:37
CONTRIBUTOR
null
https://cloud.google.com/build/docs/kaniko-cache this will help improve local testing speed https://github.com/kubeflow/pipelines/blob/master/v2/test/components/kaniko.yaml
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6063/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6063/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6059
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6059/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6059/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6059/events
https://github.com/kubeflow/pipelines/issues/6059
945,853,554
MDU6SXNzdWU5NDU4NTM1NTQ=
6,059
[sdk] building google_cloud_pipeline_components does not include YAML files
{ "login": "helinwang", "id": 1724178, "node_id": "MDQ6VXNlcjE3MjQxNzg=", "avatar_url": "https://avatars.githubusercontent.com/u/1724178?v=4", "gravatar_id": "", "url": "https://api.github.com/users/helinwang", "html_url": "https://github.com/helinwang", "followers_url": "https://api.github.com/users/helinwang/followers", "following_url": "https://api.github.com/users/helinwang/following{/other_user}", "gists_url": "https://api.github.com/users/helinwang/gists{/gist_id}", "starred_url": "https://api.github.com/users/helinwang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/helinwang/subscriptions", "organizations_url": "https://api.github.com/users/helinwang/orgs", "repos_url": "https://api.github.com/users/helinwang/repos", "events_url": "https://api.github.com/users/helinwang/events{/privacy}", "received_events_url": "https://api.github.com/users/helinwang/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "SinaChavoshi", "id": 20114005, "node_id": "MDQ6VXNlcjIwMTE0MDA1", "avatar_url": "https://avatars.githubusercontent.com/u/20114005?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SinaChavoshi", "html_url": "https://github.com/SinaChavoshi", "followers_url": "https://api.github.com/users/SinaChavoshi/followers", "following_url": "https://api.github.com/users/SinaChavoshi/following{/other_user}", "gists_url": "https://api.github.com/users/SinaChavoshi/gists{/gist_id}", "starred_url": "https://api.github.com/users/SinaChavoshi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SinaChavoshi/subscriptions", "organizations_url": "https://api.github.com/users/SinaChavoshi/orgs", "repos_url": "https://api.github.com/users/SinaChavoshi/repos", "events_url": "https://api.github.com/users/SinaChavoshi/events{/privacy}", "received_events_url": "https://api.github.com/users/SinaChavoshi/received_events", "type": "User", "site_admin": false }
[ { "login": "SinaChavoshi", "id": 20114005, "node_id": "MDQ6VXNlcjIwMTE0MDA1", "avatar_url": "https://avatars.githubusercontent.com/u/20114005?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SinaChavoshi", "html_url": "https://github.com/SinaChavoshi", "followers_url": "https://api.github.com/users/SinaChavoshi/followers", "following_url": "https://api.github.com/users/SinaChavoshi/following{/other_user}", "gists_url": "https://api.github.com/users/SinaChavoshi/gists{/gist_id}", "starred_url": "https://api.github.com/users/SinaChavoshi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SinaChavoshi/subscriptions", "organizations_url": "https://api.github.com/users/SinaChavoshi/orgs", "repos_url": "https://api.github.com/users/SinaChavoshi/repos", "events_url": "https://api.github.com/users/SinaChavoshi/events{/privacy}", "received_events_url": "https://api.github.com/users/SinaChavoshi/received_events", "type": "User", "site_admin": false }, { "login": "sasha-gitg", "id": 44654632, "node_id": "MDQ6VXNlcjQ0NjU0NjMy", "avatar_url": "https://avatars.githubusercontent.com/u/44654632?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sasha-gitg", "html_url": "https://github.com/sasha-gitg", "followers_url": "https://api.github.com/users/sasha-gitg/followers", "following_url": "https://api.github.com/users/sasha-gitg/following{/other_user}", "gists_url": "https://api.github.com/users/sasha-gitg/gists{/gist_id}", "starred_url": "https://api.github.com/users/sasha-gitg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sasha-gitg/subscriptions", "organizations_url": "https://api.github.com/users/sasha-gitg/orgs", "repos_url": "https://api.github.com/users/sasha-gitg/repos", "events_url": "https://api.github.com/users/sasha-gitg/events{/privacy}", "received_events_url": "https://api.github.com/users/sasha-gitg/received_events", "type": "User", "site_admin": false } ]
null
[ "The package from `pip install google_cloud_pipeline_components` does not include YAML files. But there should be YAML file in the SDK. E.g., https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/experimental/gcp_launcher/component.yaml\r\n", "/assign @SinaChavoshi ", "I found a way, need to include lines like\r\n package_data={'google_cloud_pipeline_components': ['google/automl/tabular/*.yaml']},\r\nin setup.py, reference: https://screenshot.googleplex.com/BFyR2qutgQs9GdH\r\n\r\nI think we need to add the intended yaml.", "Thank you for raising this issue. fixed by https://github.com/kubeflow/pipelines/pull/6066" ]
2021-07-16T00:57:01
2021-08-03T17:47:46
2021-08-03T17:47:46
NONE
null
### Steps to reproduce Follow the instructions at https://github.com/kubeflow/pipelines/tree/master/components/google-cloud e.g., ``` source_root=$(pwd) git clone https://github.com/kubeflow/pipelines.git cd pipelines/components/google-cloud python setup.py bdist_wheel clean WHEEL_FILE=$(find "$source_root/pipelines/components/google-cloud/dist/" -name "google_cloud_pipeline_components*.whl") pip3 install --upgrade $WHEEL_FILE ``` The resulting wheel file does not include the yaml files.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6059/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6059/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6056
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6056/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6056/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6056/events
https://github.com/kubeflow/pipelines/issues/6056
945,708,725
MDU6SXNzdWU5NDU3MDg3MjU=
6,056
[backend] Mounting volume using mount_pvc will mount volume in any condition and will not mount only if I delete the code
{ "login": "dutingda", "id": 38673967, "node_id": "MDQ6VXNlcjM4NjczOTY3", "avatar_url": "https://avatars.githubusercontent.com/u/38673967?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dutingda", "html_url": "https://github.com/dutingda", "followers_url": "https://api.github.com/users/dutingda/followers", "following_url": "https://api.github.com/users/dutingda/following{/other_user}", "gists_url": "https://api.github.com/users/dutingda/gists{/gist_id}", "starred_url": "https://api.github.com/users/dutingda/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dutingda/subscriptions", "organizations_url": "https://api.github.com/users/dutingda/orgs", "repos_url": "https://api.github.com/users/dutingda/repos", "events_url": "https://api.github.com/users/dutingda/events{/privacy}", "received_events_url": "https://api.github.com/users/dutingda/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1145992201, "node_id": "MDU6TGFiZWwxMTQ1OTkyMjAx", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk/dsl", "name": "area/sdk/dsl", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @chensun ", "For this problem, I think I know the reason. But it comes to the other question, how do I retrieve the pipelineParam value?", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-15T20:00:13
2022-03-03T02:05:24
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? * KFP version: 1.0.0 To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: 1.0.0 ### Steps to reproduce I used the kubeflow python sdk `train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline'))` and I have an argument of a if statement if something: train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline')) And no matter the what is the value for something, the volume will be attached in the run. However, when I deleted everything (the whole line) it turns out that the volume will not be attached. I am not sure why? Is it because the implementation doing some kind of pre-scan of the api used and will create the component before? ### Expected result It will mount and not mount based on the condition. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6056/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6056/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6053
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6053/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6053/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6053/events
https://github.com/kubeflow/pipelines/issues/6053
945,284,719
MDU6SXNzdWU5NDUyODQ3MTk=
6,053
[backend] visualization server image size increased from 1.3G to 6.4G
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "@zijianjoy Why did you downgrade to TFX 0.26?", "@vaskozl Sorry for missing to include the context in PR description, I tried to upgrade TFX to the latest. However, the dependency graph doesn't resolve because of the requirement conflict on `grpcio`. We will have to wait for https://github.com/tensorflow/tensorflow/issues/50045 so I can upgrade tensorflow image as well as TFX related libraries. " ]
2021-07-15T11:31:41
2021-07-20T18:03:22
2021-07-16T03:16:38
CONTRIBUTOR
null
Look at existing visualization server image sizes: gcr.io/ml-pipeline/visualization-server Compare that with newly built size: gcr.io/ml-pipeline-test/visualization-server@sha256:fb3768db11f2a0bc6f3a94e7191d32eaa2bdaf53a6ca38d2d03de0f6f47c5ede I think the change is caused by using tfx image as base: https://github.com/kubeflow/pipelines/commit/290d201cdf0baae27f3e79ad6add7d81af2249c8. Maybe we should not do that. ## Side effect Release cloud build task is now timing out on pulling the visualization server image, just because it's too large https://console.cloud.google.com/cloud-build/builds;region=global/f92a0259-dd8c-4cd4-a7d9-43c6569f06b5;step=30?project=ml-pipeline-test <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6053/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6053/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6049
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6049/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6049/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6049/events
https://github.com/kubeflow/pipelines/issues/6049
945,076,924
MDU6SXNzdWU5NDUwNzY5MjQ=
6,049
[frontend] Metrics not getting displayed in kubeflow UI after changing the storage to s3
{ "login": "swamat", "id": 53515128, "node_id": "MDQ6VXNlcjUzNTE1MTI4", "avatar_url": "https://avatars.githubusercontent.com/u/53515128?v=4", "gravatar_id": "", "url": "https://api.github.com/users/swamat", "html_url": "https://github.com/swamat", "followers_url": "https://api.github.com/users/swamat/followers", "following_url": "https://api.github.com/users/swamat/following{/other_user}", "gists_url": "https://api.github.com/users/swamat/gists{/gist_id}", "starred_url": "https://api.github.com/users/swamat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/swamat/subscriptions", "organizations_url": "https://api.github.com/users/swamat/orgs", "repos_url": "https://api.github.com/users/swamat/repos", "events_url": "https://api.github.com/users/swamat/events{/privacy}", "received_events_url": "https://api.github.com/users/swamat/received_events", "type": "User", "site_admin": false }
[ { "id": 930476737, "node_id": "MDU6TGFiZWw5MzA0NzY3Mzc=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/help%20wanted", "name": "help wanted", "color": "db1203", "default": true, "description": "The community is welcome to contribute." }, { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "This is a current limitation. Welcome contributions!", "Related code:\r\n* persistence agent calls read artifact API in backend to read metrics artifacts: https://github.com/kubeflow/pipelines/blob/fff62e1521e89dd31c87eec03073b8f8627d73a3/backend/src/agent/persistence/worker/metrics_reporter.go#L149\r\n* read artifact API gets object using the object store: https://github.com/kubeflow/pipelines/blob/fff62e1521e89dd31c87eec03073b8f8627d73a3/backend/src/apiserver/resource/resource_manager.go#L1163\r\n* we only support the minio object store: https://github.com/kubeflow/pipelines/blob/fff62e1521e89dd31c87eec03073b8f8627d73a3/backend/src/apiserver/storage/object_store.go#L81", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-15T07:15:55
2022-03-03T02:05:26
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)?https://www.kubeflow.org/docs/distributions/aws/deploy/install-kubeflow/ kubeflow 1.2 on aws <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.0.1 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. -->The artifacts were getting stored in minio initially. Changed the storage to s3. After that am unable to see the metrics in run output ### Expected result Metrics getting displayed on run output <!-- What should the correct behavior be? --> ### Materials and Reference https://www.kubeflow.org/docs/distributions/aws/ <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6049/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6049/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6048
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6048/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6048/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6048/events
https://github.com/kubeflow/pipelines/issues/6048
944,941,746
MDU6SXNzdWU5NDQ5NDE3NDY=
6,048
[feature] Return a list of runs' metadata including run parameters sorted by run details
{ "login": "dutingda", "id": 38673967, "node_id": "MDQ6VXNlcjM4NjczOTY3", "avatar_url": "https://avatars.githubusercontent.com/u/38673967?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dutingda", "html_url": "https://github.com/dutingda", "followers_url": "https://api.github.com/users/dutingda/followers", "following_url": "https://api.github.com/users/dutingda/following{/other_user}", "gists_url": "https://api.github.com/users/dutingda/gists{/gist_id}", "starred_url": "https://api.github.com/users/dutingda/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dutingda/subscriptions", "organizations_url": "https://api.github.com/users/dutingda/orgs", "repos_url": "https://api.github.com/users/dutingda/repos", "events_url": "https://api.github.com/users/dutingda/events{/privacy}", "received_events_url": "https://api.github.com/users/dutingda/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @dutingda, you can follow this example for filtering (same feature exists for runs too): https://www.kubeflow.org/docs/components/pipelines/tutorials/sdk-examples/#example-2-listing-pipelines-with-a-filter.\r\n\r\nPlease let us know what's missing from documentation and feature", "> Hi @dutingda, you can follow this example for filtering (same feature exists for runs too): https://www.kubeflow.org/docs/components/pipelines/tutorials/sdk-examples/#example-2-listing-pipelines-with-a-filter.\r\n> \r\n> Please let us know what's missing from documentation and feature\r\n\r\n@Bobgy So what should be the key if I want to filter by start time and what should be the return value of this filter. And what is the version for kfp client sdk, I am using kfp 1.0.0 version now.", "@Bobgy How do I read the response object type and what is the type?", "/assign @zijianjoy \nCan you help answer this question?\nThanks", "Hello @dutingda , \r\n\r\n- API definition for `list_runs`: https://kubeflow-pipelines.readthedocs.io/en/stable/source/kfp.server_api.html#kfp_server_api.api.run_service_api.RunServiceApi.list_runs\r\n- Fields for time related filter: `created_at`, `finished_at`, `scheduled_at`. Format of time value is `2021-08-26T23:00:34Z`. Example: `{\"predicates\":[{\"key\":\"created_at\",\"op\":\"GREATER_THAN\",\"string_value\":\"2021-08-26T23:00:34Z\"}]}`\r\n- kfp 1.0.0 seems to be too old. You can find the new versions in https://pypi.org/project/kfp/#history. But makes sure you use the corresponded version of KFP backend too.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-15T02:34:24
2022-03-02T21:05:17
null
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> <!-- /area backend --> area sdk <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? <!-- Provide a description of this feature and the user experience. --> I just need help about the query the above result, is there a way to do that? I need to query the list of runs that is filtered by the start time and get the metadata of the run parameters for all the runs. But not sure about which SDK I need to use and how it should be used since the documentation does not provide enough details of one possible API I can used (`list_runs`) ### What is the use case or pain point? For the need of cleaning up the least recently used cache of the data I used stored in the PVC ### Is there a workaround currently? Not sure <!-- Without this feature, how do you accomplish your task today? --> --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6048/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6048/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6033
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6033/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6033/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6033/events
https://github.com/kubeflow/pipelines/issues/6033
943,980,123
MDU6SXNzdWU5NDM5ODAxMjM=
6,033
[bug] Pipeline does not retain the logs as an artifact for failed/crash component: Failed to retrieve pod logs
{ "login": "jalola", "id": 1896808, "node_id": "MDQ6VXNlcjE4OTY4MDg=", "avatar_url": "https://avatars.githubusercontent.com/u/1896808?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jalola", "html_url": "https://github.com/jalola", "followers_url": "https://api.github.com/users/jalola/followers", "following_url": "https://api.github.com/users/jalola/following{/other_user}", "gists_url": "https://api.github.com/users/jalola/gists{/gist_id}", "starred_url": "https://api.github.com/users/jalola/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jalola/subscriptions", "organizations_url": "https://api.github.com/users/jalola/orgs", "repos_url": "https://api.github.com/users/jalola/repos", "events_url": "https://api.github.com/users/jalola/events{/privacy}", "received_events_url": "https://api.github.com/users/jalola/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "Hello @jalola , this is related to Argo workflow issue, would you like to create an issue there to resolve this problem? https://github.com/argoproj/argo-workflows", "Thanks @zijianjoy \r\nThere is an issue related: https://github.com/argoproj/argo-workflows/issues/6201\r\n", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-14T02:41:17
2022-03-03T02:05:21
null
NONE
null
### What steps did you take 1. Create a pipeline 2. Make a component with some bug so it will crash later in the pipeline 3. Run the pipeline until the component fail 4. Wait for the pod to be deleted ### What happened: 5. Check the Logs tab of that pod, it shows "Failed to retrieve pod logs." 6. Check the Input/Output of that pod, no main logs was saved ### What did you expect to happen: I think 5 is expected because it get the logs directly from the Pod so when the Pod is deleted, the logs cannot be retrieved But for 6, I think it should have the main-logs (main.log) so we can know what is wrong with the code ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Kubeflow manifest v1.3 bu kustomize https://github.com/kubeflow/manifests/tree/v1.3-branch <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: build version dev_local <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: 1.6.3 <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> I notice that, If the pipeline failed because of a bug or manually terminate (error message: "This step is in Failed state with this message: failed with exit code 1" or "This step is in Failed state with this message: failed with exit code 137"), I cannot see the main-logs If The component failed with error message "This step is in Failed state with this message: Step exceeded its deadline", the main-logs is there after the pod deleted ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6033/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6033/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6026
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6026/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6026/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6026/events
https://github.com/kubeflow/pipelines/issues/6026
942,882,193
MDU6SXNzdWU5NDI4ODIxOTM=
6,026
[bug] v2 Tutorial pipeline failed at PutParentContexts
{ "login": "rebsp", "id": 6863864, "node_id": "MDQ6VXNlcjY4NjM4NjQ=", "avatar_url": "https://avatars.githubusercontent.com/u/6863864?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rebsp", "html_url": "https://github.com/rebsp", "followers_url": "https://api.github.com/users/rebsp/followers", "following_url": "https://api.github.com/users/rebsp/following{/other_user}", "gists_url": "https://api.github.com/users/rebsp/gists{/gist_id}", "starred_url": "https://api.github.com/users/rebsp/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rebsp/subscriptions", "organizations_url": "https://api.github.com/users/rebsp/orgs", "repos_url": "https://api.github.com/users/rebsp/repos", "events_url": "https://api.github.com/users/rebsp/events{/privacy}", "received_events_url": "https://api.github.com/users/rebsp/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Nevermind, found it:\r\nhttps://github.com/kubeflow/pipelines/blob/ec9021f5b04521d394fb3a95903b86b478bf6550/v2/test/README.md" ]
2021-07-13T07:13:02
2021-07-13T08:59:55
2021-07-13T08:59:55
NONE
null
### What steps did you take 1. Deploy kubeflow pipelines (using kind) on local cluster according to: https://www.kubeflow.org/docs/components/pipelines/installation/localcluster-deployment/ 2. Build a sample pipeline with v2 SDK according to: https://www.kubeflow.org/docs/components/pipelines/sdk/v2/build-pipeline/ 3. Upload generated pipeline.yaml file to kubeflow pipelines frontend, use default values (pre-filled) 4. Create run from newly built pipeline, paste `https://storage.googleapis.com/ml-pipeline-playground/iris-csv-files.tar.gz` as URL parameter ### What happened: The pipeline fails at the first step with the following log information: ```bash 33 main.go:55] Failed to execute component: unable to get pipeline with PipelineName "my-pipeline" PipelineRunID "my-pipeline-xbv4z": Failed PutParentContexts(parent_contexts:{child_id:11 parent_id:9}): rpc error: code = Unimplemented desc = ``` ![image](https://user-images.githubusercontent.com/6863864/125406310-fc38ca80-e3b8-11eb-8bf5-8265d917d8c1.png) ### What did you expect to happen: The pipeline runs through 🤷‍♂️ ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Local cluster: https://www.kubeflow.org/docs/components/pipelines/installation/localcluster-deployment/ * KFP version: 1.6.0 * KFP SDK version: kfp 1.6.4 kfp-pipeline-spec 0.1.8 kfp-server-api 1.6.0 ### Anything else you would like to add: I followed the tutorials strictly, no own code whatsoever. It also fails with the same error when I manually change the pipeline name to 'my-pipeline' like it is named in the code. ### Labels <!-- /area frontend --> /area backend /area sdk <!-- /area testing --> <!-- /area samples --> /area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6026/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6026/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6020
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6020/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6020/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6020/events
https://github.com/kubeflow/pipelines/issues/6020
942,204,503
MDU6SXNzdWU5NDIyMDQ1MDM=
6,020
[sdk] Some keys in json used in ParallelFor break compilation
{ "login": "Udiknedormin", "id": 20307949, "node_id": "MDQ6VXNlcjIwMzA3OTQ5", "avatar_url": "https://avatars.githubusercontent.com/u/20307949?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Udiknedormin", "html_url": "https://github.com/Udiknedormin", "followers_url": "https://api.github.com/users/Udiknedormin/followers", "following_url": "https://api.github.com/users/Udiknedormin/following{/other_user}", "gists_url": "https://api.github.com/users/Udiknedormin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Udiknedormin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Udiknedormin/subscriptions", "organizations_url": "https://api.github.com/users/Udiknedormin/orgs", "repos_url": "https://api.github.com/users/Udiknedormin/repos", "events_url": "https://api.github.com/users/Udiknedormin/events{/privacy}", "received_events_url": "https://api.github.com/users/Udiknedormin/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @chensun", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-12T15:54:54
2022-03-03T02:05:29
null
CONTRIBUTOR
null
### Environment * KFP SDK version: 1.6.3 * All dependencies version: * kfp 1.6.3 * kfp-pipeline-spec 0.1.8 * kfp-server-api 1.4.1 ### Steps to reproduce Create a pipeline where `dsl.ParallelFor` either: * accepts a list literal with the first item being a dictionary with `"name"` key * accesses its item's `.name` field * both (behaves like the first one) In the first and third case, the compilation fails with a bizarre errors. In the second case, it simply produces wrong yaml. Example of the first case: ```python from kfp import dsl from kfp.components import load_component_from_text from kfp_tekton.tekton import CEL_ConditionOp PrintOp = load_component_from_text(""" name: print inputs: - name: msg outputs: - name: stdout implementation: container: image: alpine:3.6 command: - concat: - "echo " - { inputValue: msg } """) @dsl.pipeline("decomposition test: non-dict") def decomposition_test_00(): string_list_literal = ["foo", "bar"] with dsl.ParallelFor(string_list_literal) as item: PrintOp(f"{item}") @dsl.pipeline("decomposition test: dict without 'name' key") def decomposition_test_01(): dict_ok_list_literal = [ { "_name": "foo", "_value": 42, }, { "_name": "bar", "_value": 3141592 } ] with dsl.ParallelFor(dict_ok_list_literal) as item: name = item._name value = item._value PrintOp(f"{name}={value}") @dsl.pipeline("decomposition test: dict with 'name' key") def decomposition_test_02(): dict_err_list_literal = [ { "name": "foo", "value": 42, }, { "name": "bar", "value": 3141592 } ] with dsl.ParallelFor(dict_err_list_literal) as item: name = item.name value = item.value PrintOp(f"{name}={value}") decomposition_tests = [ decomposition_test_00, decomposition_test_01, decomposition_test_02, ] if __name__ == '__main__': from kfp.compiler import Compiler for i, decomposition_test in enumerate(decomposition_tests): print(decomposition_test._component_human_name) Compiler().compile(decomposition_test, __file__.replace('.py', f'-{i}.argo.yaml')) ``` The compilation breaks in two distinct ways, I'm not sure what determines which is triggered, as they seem to interleave when I call the script multiple times. First one: ``` $ python3 decompose.py decomposition test: non-dict decomposition test: dict with 'name' key Traceback (most recent call last): File "(...)/decompose.py", line 71, in <module> Compiler().compile(decomposition_test, __file__.replace('.py', f'-{i}.argo.yaml')) File "(...)/site-packages/kfp/compiler/compiler.py", line 1053, in compile self._create_and_write_workflow( File "(...)/site-packages/kfp/compiler/compiler.py", line 1106, in _create_and_write_workflow workflow = self._create_workflow( File "(...)/site-packages/kfp/compiler/compiler.py", line 946, in _create_workflow workflow = self._create_pipeline_workflow( File "(...)/site-packages/kfp/compiler/compiler.py", line 713, in _create_pipeline_workflow templates = self._create_dag_templates(pipeline, op_transformers) File "(...)/site-packages/kfp/compiler/compiler.py", line 653, in _create_dag_templates inputs, outputs = self._get_inputs_outputs( File "(...)/site-packages/kfp/compiler/compiler.py", line 303, in _get_inputs_outputs if loop_group.loop_args.name in param.name: TypeError: 'in <string>' requires string as left operand, not LoopArgumentVariable ``` Second one: ``` $ python3 decompose.py decomposition test: non-dict decomposition test: dict with 'name' key Traceback (most recent call last): File "(...)/decompose.py", line 71, in <module> Compiler().compile(decomposition_test, __file__.replace('.py', f'-{i}.argo.yaml')) File "(...)/site-packages/kfp/compiler/compiler.py", line 1053, in compile self._create_and_write_workflow( File "(...)/site-packages/kfp/compiler/compiler.py", line 1106, in _create_and_write_workflow workflow = self._create_workflow( File "(...)/site-packages/kfp/compiler/compiler.py", line 896, in _create_workflow pipeline_func(*args_list, **kwargs_dict) File "(...)/decompose.py", line 56, in decomposition_test_02 with dsl.ParallelFor(dict_err_list_literal) as item: File "(...)/site-packages/kfp/dsl/_ops_group.py", line 226, in __init__ loop_args = _for_loop.LoopArguments( File "(...)/site-packages/kfp/dsl/_for_loop.py", line 76, in __init__ LoopArgumentVariable( File "(...)/site-packages/kfp/dsl/_for_loop.py", line 159, in __init__ super().__init__( File "(...)/site-packages/kfp/dsl/_pipeline_param.py", line 171, in __init__ raise ValueError( ValueError: Only letters, numbers, spaces, "_", and "-" are allowed in name. Must begin with a letter. Got name: {{pipelineparam:op=;name=loop-item-param-2-subvar-name}}-subvar-value ``` If the json is passed as a parameter instead of a literal: ```python from kfp import dsl from kfp.components import load_component_from_text from kfp_tekton.tekton import CEL_ConditionOp PrintOp = load_component_from_text(""" name: print inputs: - name: msg outputs: - name: stdout implementation: container: image: alpine:3.6 command: - concat: - "echo " - { inputValue: msg } """) @dsl.pipeline("decomposition test: non-dict") def decomposition_test_00( string_list_literal = ["foo", "bar"] ): with dsl.ParallelFor(string_list_literal) as item: PrintOp(f"{item}") @dsl.pipeline("decomposition test: dict without 'name' key") def decomposition_test_01( dict_ok_list_literal = [ { "_name": "foo", "_value": 42, }, { "_name": "bar", "_value": 3141592 } ] ): with dsl.ParallelFor(dict_ok_list_literal) as item: name = item._name value = item._value PrintOp(f"{name}={value}") @dsl.pipeline("decomposition test: dict with 'name' key") def decomposition_test_02( dict_err_list_literal = [ { "name": "foo", "value": 42, }, { "name": "bar", "value": 3141592 } ] ): with dsl.ParallelFor(dict_err_list_literal) as item: name = item.name value = item.value PrintOp(f"{name}={value}") decomposition_tests = [ decomposition_test_00, decomposition_test_01, decomposition_test_02, ] if __name__ == '__main__': from kfp.compiler import Compiler for i, decomposition_test in enumerate(decomposition_tests): print(decomposition_test._component_human_name) Compiler().compile(decomposition_test, __file__.replace('.py', f'-{i}.argo.yaml')) ``` then all three compile, but there is a vital difference between the second and third pipeline: Second pipeline (no `"name"` key used): ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: generateName: decomposition-test-dict-without-name-key- annotations: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline_compilation_time: '2021-07-12T15:38:57.369128', pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "[{\"_name\": \"foo\", \"_value\": 42}, {\"_name\": \"bar\", \"_value\": 3141592}]", "name": "dict_ok_list_literal", "optional": true}], "name": "decomposition test: dict without ''name'' key"}'} labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3} spec: entrypoint: decomposition-test-dict-without-name-key templates: - name: decomposition-test-dict-without-name-key dag: tasks: - name: for-loop-1 template: for-loop-1 arguments: parameters: - {name: dict_ok_list_literal-loop-item-subvar-_name, value: '{{item._name}}'} - {name: dict_ok_list_literal-loop-item-subvar-_value, value: '{{item._value}}'} withParam: '{{workflow.parameters.dict_ok_list_literal}}' - name: for-loop-1 inputs: parameters: - {name: dict_ok_list_literal-loop-item-subvar-_name} - {name: dict_ok_list_literal-loop-item-subvar-_value} dag: tasks: - name: print template: print arguments: parameters: - {name: dict_ok_list_literal-loop-item-subvar-_name, value: '{{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_name}}'} - {name: dict_ok_list_literal-loop-item-subvar-_value, value: '{{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_value}}'} - name: print container: args: [] command: ['echo {{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_name}}={{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_value}}'] image: alpine:3.6 inputs: parameters: - {name: dict_ok_list_literal-loop-item-subvar-_name} - {name: dict_ok_list_literal-loop-item-subvar-_value} metadata: labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline-sdk-type: kfp} annotations: {pipelines.kubeflow.org/component_spec: '{"implementation": {"container": {"command": [{"concat": ["echo ", {"inputValue": "msg"}]}], "image": "alpine:3.6"}}, "inputs": [{"name": "msg"}], "name": "print", "outputs": [{"name": "stdout"}]}', pipelines.kubeflow.org/component_ref: '{"digest": "759d33433106734a4f8d748310b0ec7fc618e428176f270b6e33b782526870c8"}', pipelines.kubeflow.org/arguments.parameters: '{"msg": "{{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_name}}={{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_value}}"}'} arguments: parameters: - {name: dict_ok_list_literal, value: '[{"_name": "foo", "_value": 42}, {"_name": "bar", "_value": 3141592}]'} serviceAccountName: pipeline-runner ``` Third pipeline (`"name"` used): ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: generateName: decomposition-test-dict-with-name-key- annotations: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline_compilation_time: '2021-07-12T15:38:57.381484', pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "[{\"name\": \"foo\", \"value\": 42}, {\"name\": \"bar\", \"value\": 3141592}]", "name": "dict_err_list_literal", "optional": true}], "name": "decomposition test: dict with ''name'' key"}'} labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3} spec: entrypoint: decomposition-test-dict-with-name-key templates: - name: decomposition-test-dict-with-name-key dag: tasks: - {name: for-loop-1, template: for-loop-1, withParam: '{{workflow.parameters.dict_err_list_literal}}'} - name: for-loop-1 dag: tasks: - {name: print, template: print} - name: print container: args: [] command: [echo dict_err_list_literal-loop-item=None] image: alpine:3.6 metadata: labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline-sdk-type: kfp} annotations: {pipelines.kubeflow.org/component_spec: '{"implementation": {"container": {"command": [{"concat": ["echo ", {"inputValue": "msg"}]}], "image": "alpine:3.6"}}, "inputs": [{"name": "msg"}], "name": "print", "outputs": [{"name": "stdout"}]}', pipelines.kubeflow.org/component_ref: '{"digest": "759d33433106734a4f8d748310b0ec7fc618e428176f270b6e33b782526870c8"}', pipelines.kubeflow.org/arguments.parameters: '{"msg": "dict_err_list_literal-loop-item=None"}'} arguments: parameters: - {name: dict_err_list_literal, value: '[{"name": "foo", "value": 42}, {"name": "bar", "value": 3141592}]'} serviceAccountName: pipeline-runner ``` The key difference being: ``` < command: ['echo {{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_name}}={{inputs.parameters.dict_ok_list_literal-loop-item-subvar-_value}}'] --- > command: [echo dict_err_list_literal-loop-item=None] ``` `dict_err_list_literal-loop-item=None` is nothing like the `"name"` field of the item. ### Expected result The compilation either working just fine and producing results which are analogous. ### Materials and Reference While the second case was very puzzling for me, the second error message of the first case certainly looks like stringified PipelineParam of a sub-var, i.e. `LoopArguments`. So I took a look at its code and [here is what I found](https://github.com/kubeflow/pipelines/blob/a0fd14b276bc05fb1ddcd3681621e9f44a23bb2a/sdk/python/kfp/dsl/_for_loop.py#L59-L76): ```python if isinstance(items, list) and isinstance(items[0], dict): subvar_names = set(items[0].keys()) for item in items: if not set(item.keys()) == subvar_names: raise ValueError( 'If you input a list of dicts then all dicts should have the same keys. ' 'Got: {}.'.format(items)) # then this block creates loop_args.variable_a and loop_args.variable_b for subvar_name in subvar_names: if not self._subvar_name_is_legal(subvar_name): raise ValueError( "Tried to create subvariable named {} but that's not a legal Python variable " 'name.'.format(subvar_name)) setattr( self, subvar_name, LoopArgumentVariable( self.name, subvar_name, loop_args_op_name=self.op_name)) ``` i.e. if a literal if used, an attribute gets assigned to the class. But later on in the class, custom attribute accessor [is defined anyway](https://github.com/kubeflow/pipelines/blob/a0fd14b276bc05fb1ddcd3681621e9f44a23bb2a/sdk/python/kfp/dsl/_for_loop.py#L91-L96): ```python def __getattr__(self, item): # this is being overridden so that we can access subvariables of the # LoopArguments (i.e.: item.a) without knowing the subvariable names ahead # of time self.referenced_subvar_names.append(item) return LoopArgumentVariable(self.name, item, loop_args_op_name=self.op_name) ``` So when I removed the first fragment entirely, I immediately got rid of the error and the first case produced yaml similar to the second case, i.e. containing `command: [echo loop-item-param-2=None]`. That explains the difference between literal and non-literal versions. But why both contain any error in general? Because `LoopArguments` is a subclass of `PipelineParam`, I checked [its code](https://github.com/kubeflow/pipelines/blob/a0fd14b276bc05fb1ddcd3681621e9f44a23bb2a/sdk/python/kfp/dsl/_pipeline_param.py#L140-L183): ```python class PipelineParam(object): """(...) Args: name: name of the pipeline parameter. (...) value: The actual value of the PipelineParam. If provided, the PipelineParam is "resolved" immediately. For now, we support string only. (...) """ def __init__(self, name: str, op_name: Optional[str] = None, value: Optional[str] = None, param_type: Optional[Union[str, Dict]] = None, pattern: Optional[str] = None): valid_name_regex = r'^[A-Za-z][A-Za-z0-9\s_-]*$' if not re.match(valid_name_regex, name): raise ValueError( 'Only letters, numbers, spaces, "_", and "-" are allowed in name. ' 'Must begin with a letter. Got name: {}'.format(name)) if op_name and value: raise ValueError('op_name and value cannot be both set.') self.name = name # ensure value is None even if empty string or empty list # so that serialization and unserialization remain consistent # (i.e. None => '' => None) self.op_name = op_name if op_name else None self.value = value ``` Which explains `dict_err_list_literal-loop-item=None` --- it's the `LoopArgument`'s `name` field, `=`, and the default value of the `value` field --- `None`. Now, that's inconsistent with how the user can access any other field in the list of json objects/dicts. What's worse: adding a new field in PipelineParam can potentially break the now-working-fine user pipelines. I expect few users used `name` and `value` of `LoopArguments` expecting the ones from `PipelineParam` --- in fact, I don't think its fields are useful to users in general, as they describe the internal compiler info, like what's the name (most of the time it's similar to the name of variable, so why would user need that?), its default value (it's written directly in the signature, so why would user need that?) or the task that produced it (PipelineParams are produced either in pipeline-or-graph signatures, `dsl.ParallelFor(...) as item` or `.output`/`.outputs[...]` --- why would user need that?). Besides, it seems to me that it's much easier to explain to the user that ALL of the fields accessed in `LoopArguments` are simply it's keys. Therefore, I assume it's ok to simply change it so that during the pipeline building, all fields of `LoopArguments` are treated as json fields, and it only turns back to the behaviour of the regular `PipelineParam` inside of the `Compiler`. ## Proposed solution (early POC) For that, I created a POC with the following changes: * remove the above-mentioned setting attributes in `LoopArguments` --- as already shown, it's not really needed for the syntax to work fine (alternatively, one may add a check which only sets the attribute if it's not there yet) * change attribute access in PipelineParam's magical methods methods to `object.__getattribute__` so that e.g. `__str__` (which used a lot) works just like before no matter where it's used * introduce a global variable `loop_arguments_decomposing`, defaulting to `True` * temporarily set that variable to `False` in `LoopArgument`'s constructor (otherwise, `object.__getattribute__` would need to be used again, which is more effort), and placing it inside of try-catch or some context manager so that exceptions won't affect setting it back * change `__getattr__` method into a `__getattribute__` method with the following implementation (very similar to the previous one, except for checking for `loop_arguments_decomposing` and `object.__getattribute__` (could use `super()` too): ```python def __getattribute__(self, item): global loop_arguments_decomposing if not loop_arguments_decomposing: return object.__getattribute__(self, item) referenced_subvar_names = object.__getattribute__(self, 'referenced_subvar_names') name = object.__getattribute__(self, 'name') op_name = object.__getattribute__(self, 'op_name') referenced_subvar_names.append(item) return LoopArgumentVariable(name, item, loop_args_op_name=op_name) ``` * temporarily set the `loop_arguments_decomposing` to `False` in `_create_workflow` in compiler.py, right after the `pipeline_func` call, and placing it inside of try-catch or some context manager so that exceptions won't affect setting it back * (optional) add `__getitem__` to `LoopArguments` to be able to access keys that aren't valid Python identifiers, e.g. containing `-` --- it should work fine with the `loop_arguments_decomposing`, as it's a magical method When I did all of that, I got both cases (with literals and without literals) working just fine: With literals: ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: generateName: decomposition-test-dict-with-name-key- annotations: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline_compilation_time: '2021-07-12T17:27:43.321192', pipelines.kubeflow.org/pipeline_spec: '{"name": "decomposition test: dict with ''name'' key"}'} labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3} spec: entrypoint: decomposition-test-dict-with-name-key templates: - name: decomposition-test-dict-with-name-key dag: tasks: - name: for-loop-3 template: for-loop-3 arguments: parameters: - {name: loop-item-param-2-subvar-name, value: '{{item.name}}'} - {name: loop-item-param-2-subvar-value, value: '{{item.value}}'} withItems: - {name: foo, value: 42} - {name: bar, value: 3141592} - name: for-loop-3 inputs: parameters: - {name: loop-item-param-2-subvar-name} - {name: loop-item-param-2-subvar-value} dag: tasks: - name: print template: print arguments: parameters: - {name: loop-item-param-2-subvar-name, value: '{{inputs.parameters.loop-item-param-2-subvar-name}}'} - {name: loop-item-param-2-subvar-value, value: '{{inputs.parameters.loop-item-param-2-subvar-value}}'} - name: print container: args: [] command: ['echo {{inputs.parameters.loop-item-param-2-subvar-name}}={{inputs.parameters.loop-item-param-2-subvar-value}}'] image: alpine:3.6 inputs: parameters: - {name: loop-item-param-2-subvar-name} - {name: loop-item-param-2-subvar-value} metadata: labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline-sdk-type: kfp} annotations: {pipelines.kubeflow.org/component_spec: '{"implementation": {"container": {"command": [{"concat": ["echo ", {"inputValue": "msg"}]}], "image": "alpine:3.6"}}, "inputs": [{"name": "msg"}], "name": "print", "outputs": [{"name": "stdout"}]}', pipelines.kubeflow.org/component_ref: '{"digest": "759d33433106734a4f8d748310b0ec7fc618e428176f270b6e33b782526870c8"}', pipelines.kubeflow.org/arguments.parameters: '{"msg": "{{inputs.parameters.loop-item-param-2-subvar-name}}={{inputs.parameters.loop-item-param-2-subvar-value}}"}'} arguments: parameters: [] serviceAccountName: pipeline-runner ``` With pipeline parameter: ```yaml apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: generateName: decomposition-test-dict-with-name-key- annotations: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline_compilation_time: '2021-07-12T17:27:52.110929', pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "[{\"name\": \"foo\", \"value\": 42}, {\"name\": \"bar\", \"value\": 3141592}]", "name": "dict_err_list_literal", "optional": true}], "name": "decomposition test: dict with ''name'' key"}'} labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3} spec: entrypoint: decomposition-test-dict-with-name-key templates: - name: decomposition-test-dict-with-name-key dag: tasks: - name: for-loop-1 template: for-loop-1 arguments: parameters: - {name: dict_err_list_literal-loop-item-subvar-name, value: '{{item.name}}'} - {name: dict_err_list_literal-loop-item-subvar-value, value: '{{item.value}}'} withParam: '{{workflow.parameters.dict_err_list_literal}}' - name: for-loop-1 inputs: parameters: - {name: dict_err_list_literal-loop-item-subvar-name} - {name: dict_err_list_literal-loop-item-subvar-value} dag: tasks: - name: print template: print arguments: parameters: - {name: dict_err_list_literal-loop-item-subvar-name, value: '{{inputs.parameters.dict_err_list_literal-loop-item-subvar-name}}'} - {name: dict_err_list_literal-loop-item-subvar-value, value: '{{inputs.parameters.dict_err_list_literal-loop-item-subvar-value}}'} - name: print container: args: [] command: ['echo {{inputs.parameters.dict_err_list_literal-loop-item-subvar-name}}={{inputs.parameters.dict_err_list_literal-loop-item-subvar-value}}'] image: alpine:3.6 inputs: parameters: - {name: dict_err_list_literal-loop-item-subvar-name} - {name: dict_err_list_literal-loop-item-subvar-value} metadata: labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.6.3, pipelines.kubeflow.org/pipeline-sdk-type: kfp} annotations: {pipelines.kubeflow.org/component_spec: '{"implementation": {"container": {"command": [{"concat": ["echo ", {"inputValue": "msg"}]}], "image": "alpine:3.6"}}, "inputs": [{"name": "msg"}], "name": "print", "outputs": [{"name": "stdout"}]}', pipelines.kubeflow.org/component_ref: '{"digest": "759d33433106734a4f8d748310b0ec7fc618e428176f270b6e33b782526870c8"}', pipelines.kubeflow.org/arguments.parameters: '{"msg": "{{inputs.parameters.dict_err_list_literal-loop-item-subvar-name}}={{inputs.parameters.dict_err_list_literal-loop-item-subvar-value}}"}'} arguments: parameters: - {name: dict_err_list_literal, value: '[{"name": "foo", "value": 42}, {"name": "bar", "value": 3141592}]'} serviceAccountName: pipeline-runner ``` It seems to me that this solution is quite simple and very general, as one can add as many fields to the `PipelineParam` as they like as no non-magical methods are expected to be available to the user (the only one I can think of is `ignore_type`, but I don't think it's really used with pipeline items? we could add it as an exception in `__getattribute__`, but I'd rather avoid that, as it has the same problems as `"name"` and `"value"`, just a little less probable to be used, maybe --- it seems easier to explain to the user that they need to use a function `ignore_type` instead of a method `.ignore_type()`, rather than explain why they can't have a field called `"ignore_type"` in their json...) and all magical methods are implemented using `object.__getattribute__` instead of the usual dot access --- which isn't much, considering it's just two classes with a little amount of methods, most of which are only used in the compiler where the dot access is fine. What do you think? Does that solution sound ok? I will post a PR later on, after I polish the code a little bit. --- Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6020/reactions", "total_count": 6, "+1": 6, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6020/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6019
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6019/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6019/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6019/events
https://github.com/kubeflow/pipelines/issues/6019
942,154,790
MDU6SXNzdWU5NDIxNTQ3OTA=
6,019
[feature] Ability to Specify Kaniko Job Name
{ "login": "jhamet93", "id": 7623671, "node_id": "MDQ6VXNlcjc2MjM2NzE=", "avatar_url": "https://avatars.githubusercontent.com/u/7623671?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jhamet93", "html_url": "https://github.com/jhamet93", "followers_url": "https://api.github.com/users/jhamet93/followers", "following_url": "https://api.github.com/users/jhamet93/following{/other_user}", "gists_url": "https://api.github.com/users/jhamet93/gists{/gist_id}", "starred_url": "https://api.github.com/users/jhamet93/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jhamet93/subscriptions", "organizations_url": "https://api.github.com/users/jhamet93/orgs", "repos_url": "https://api.github.com/users/jhamet93/repos", "events_url": "https://api.github.com/users/jhamet93/events{/privacy}", "received_events_url": "https://api.github.com/users/jhamet93/received_events", "type": "User", "site_admin": false }
[ { "id": 930476737, "node_id": "MDU6TGFiZWw5MzA0NzY3Mzc=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/help%20wanted", "name": "help wanted", "color": "db1203", "default": true, "description": "The community is welcome to contribute." }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "/cc @chensun \r\nI think we welcome contributions if you would like to.", "@Bobgy If this feature is still needed and no one has worked on it yet, can I pick it up as a first time issue?", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-12T15:01:07
2022-03-02T10:06:27
null
NONE
null
### Feature Area /area sdk ### What feature would you like to see? The ability to specify the name of the `kaniko` Job generated by kfp.containers.ContainerBuilder ### What is the use case or pain point? For internal users of Twitter, we want to track the state of their image building jobs to display in different mediums such as a Web Dashboard or CLI command. Since the name is currently randomly generated, the best way of fetching the name is to use the `kubectl` CLI to grab the name of the most recent job. This has a small chance of error if multiple users are running jobs under the same namespace so it would be ideal to specify the name or store the name as an accessible property. Happy to work on a pull request if this seams reasonable. ### Is there a workaround currently? Subclass kfp.containers.ContainerBuilder and override a private API. --- Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6019/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6019/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6018
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6018/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6018/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6018/events
https://github.com/kubeflow/pipelines/issues/6018
942,068,765
MDU6SXNzdWU5NDIwNjg3NjU=
6,018
[feature] Passthrough any supported Kaniko Argument
{ "login": "jhamet93", "id": 7623671, "node_id": "MDQ6VXNlcjc2MjM2NzE=", "avatar_url": "https://avatars.githubusercontent.com/u/7623671?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jhamet93", "html_url": "https://github.com/jhamet93", "followers_url": "https://api.github.com/users/jhamet93/followers", "following_url": "https://api.github.com/users/jhamet93/following{/other_user}", "gists_url": "https://api.github.com/users/jhamet93/gists{/gist_id}", "starred_url": "https://api.github.com/users/jhamet93/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jhamet93/subscriptions", "organizations_url": "https://api.github.com/users/jhamet93/orgs", "repos_url": "https://api.github.com/users/jhamet93/repos", "events_url": "https://api.github.com/users/jhamet93/events{/privacy}", "received_events_url": "https://api.github.com/users/jhamet93/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "/cc @chensun ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-12T13:31:56
2022-03-03T02:05:28
null
NONE
null
### Feature Area /area sdk ### What feature would you like to see? The ability to pass through any Kaniko supported argument to kfp.containers.ContainerBuilder.build to build the Pod spec with. ### What is the use case or pain point? For internal users of Twitter, we would like the ability for them to specify any supported Kaniko argument to kfp.containers.ContainerBuilder.build to build their images. Happy to work on a pull request if this seems like a reasonable feature improvement. ### Is there a workaround currently? Not really besides modding the class in our repo and requiring users to use that instead. --- Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6018/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6018/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6015
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6015/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6015/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6015/events
https://github.com/kubeflow/pipelines/issues/6015
941,634,479
MDU6SXNzdWU5NDE2MzQ0Nzk=
6,015
[release] 1.8.0 tracker
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "cc @chensun ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "We have completed the last item: switch to Emissary executor by default: https://www.kubeflow.org/docs/components/pipelines/installation/choose-executor/#docker-executor" ]
2021-07-12T03:34:20
2022-06-04T06:59:59
2022-06-04T06:57:00
CONTRIBUTOR
null
TODOs: * [x] https://github.com/kubeflow/pipelines/issues/7148 * [x] #6014 * [x] #6987 * [x] change emissary executor to default * [x] MLMD upgrade to v1.5.0 * [x] Argo upgrade to v3.2.3
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6015/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6015/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6014
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6014/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6014/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6014/events
https://github.com/kubeflow/pipelines/issues/6014
941,633,676
MDU6SXNzdWU5NDE2MzM2NzY=
6,014
[pH] prepare KFP for K8s 1.22
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "I think we need to plan this for the next release.", "Hi KFP team, my team uses GKE and we got an email from GCP today: \"[Action Required] Prepare for GKE 1.22 Open source upstream beta API Removal\". The email includes:\r\n\r\n> As of Kubernetes 1.22, API clients will no longer be able to use previously deprecated Beta API versions. Manifests using those versions will no longer be able to be applied.\r\n\r\nBased on the [GKE release schedule](https://cloud.google.com/kubernetes-engine/docs/release-schedule), seems like k8s 1.22 is already available for the \"rapid\" release channel, and will affect the \"regular\" release channel in November 2021 (my team is on \"regular\", which I believe is the default and what most GKE users are likely using).\r\n\r\nIf I understand correctly, this means when our GKE cluster is updated to 1.22, then we won't be able to deploy *any* current version of KFP to it. And, even if the next KFP release (1.8) is updated, we won't be able to roll back to pre-1.8 KFP versions if we encounter any issues with the new release. Does that match your understanding?\r\n\r\nIf so, I think it's important to have a new KFP release soon so users can test it before GKE starts updating users' clusters to 1.22.", "Research: Application CRD currently only has v1beta1 version: https://github.com/kubernetes-sigs/application/blob/master/config/crd/bases/app.k8s.io_applications.yaml#L46", "All manifests are upgraded to v1. I have personally deployed to a k8s 1.22 cluster and have validated the deployment." ]
2021-07-12T03:31:51
2022-01-08T21:55:21
2022-01-08T21:55:21
CONTRIBUTOR
null
Deprecated APIs in k8s 1.22: https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-22 TODOs: * [x] move CRDs to v1 version: https://github.com/kubeflow/pipelines/search?q=apiextensions.k8s.io%2Fv1beta1&type= #6679 * [x] move cluster role and cluster role binding to v1 version, got warning message: ``` Warning: rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole ``` * [x] Upgrade third-party dependencies to a version with v1 crd, including application controller and metacontroller
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6014/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6014/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6010
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6010/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6010/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6010/events
https://github.com/kubeflow/pipelines/issues/6010
941,366,124
MDU6SXNzdWU5NDEzNjYxMjQ=
6,010
[pH] adopt skaffold?
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-11T03:44:06
2022-03-03T02:05:34
null
CONTRIBUTOR
null
## Problem When developing KFP, we have ad hoc scripts for building images and deploying them to the cluster. It's painful to maintain all the scripts, document & educate people to use them. Besides that, we lack a way to deploy from head with one single command, which is often useful for testing. ## Proposal Adopt https://skaffold.dev/docs/, whenever we want to develop and test, just run: ``` skaffold dev ```
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6010/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6010/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6002
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6002/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6002/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6002/events
https://github.com/kubeflow/pipelines/issues/6002
940,545,376
MDU6SXNzdWU5NDA1NDUzNzY=
6,002
[bug] No cache in recurring runs
{ "login": "Itega", "id": 8017414, "node_id": "MDQ6VXNlcjgwMTc0MTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/8017414?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Itega", "html_url": "https://github.com/Itega", "followers_url": "https://api.github.com/users/Itega/followers", "following_url": "https://api.github.com/users/Itega/following{/other_user}", "gists_url": "https://api.github.com/users/Itega/gists{/gist_id}", "starred_url": "https://api.github.com/users/Itega/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Itega/subscriptions", "organizations_url": "https://api.github.com/users/Itega/orgs", "repos_url": "https://api.github.com/users/Itega/repos", "events_url": "https://api.github.com/users/Itega/events{/privacy}", "received_events_url": "https://api.github.com/users/Itega/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "Does your cache work other than cases of recurring run ?\r\n\r\nI have seen this issue occurring in the past. Where in cache did not work at all. Try restarting the deployment called `cache-server`.\r\n\r\n```\r\nkubectl rollout restart deploy/cache-server\r\n```", "> Does your cache work other than cases of recurring run ?\r\n> \r\n> I have seen this issue occurring in the past. Where in cache did not work at all. Try restarting the deployment called `cache-server`.\r\n> \r\n> ```\r\n> kubectl rollout restart deploy/cache-server\r\n> ```\r\n\r\nThanks for your answer !\r\nUnfortunately, this did not change anything and the cache is still not working in recurring runs.", "Hello @Itega ,\r\n\r\nCache will be enabled only when workflow template is the same between first pipeline **run** and second pipeline **run**. Would you like to share the first and second pipeline run pods information?", "Hello @zijianjoy ,\r\nI will happily provide you any needed information, here are the first and second runs pods information :\r\n\r\nRecurring run :\r\n- First run, First step : https://pastebin.com/PE0EfiLK\r\n- First run, Second step : https://pastebin.com/HgSgSUyr\r\n- Second run, First step : https://pastebin.com/X2zKM26y\r\n- Second run, Second step : https://pastebin.com/gQuMzKXM\r\n\r\nManual run (working as expected) :\r\n\r\n- First run, First step : https://pastebin.com/SpNXKuDT\r\n- First run, Second step : https://pastebin.com/qP3VGZGq\r\n- Second run, First step : https://pastebin.com/Ut09aNYw\r\n- Second run, Second step : https://pastebin.com/BjGELhHF", "Hello @Itega ,\r\n\r\nIn your manual run, you have caching enabled for your pipeline, see annotation `pipelines.kubeflow.org/cache_enabled: 'true'`. However, this annotation doesn't exist in Recurring run, so KFP didn't generate the cache key for you `pipelines.kubeflow.org/execution_cache_key`.\r\n\r\nWould you like to enable caching for recurring run and see if this fixes your issue? \r\n\r\nReference: https://github.com/kubeflow/pipelines/blob/master/backend/src/cache/server/mutation.go#L87-L100", "Hello @zijianjoy ,\r\n\r\nThanks for the explanations !\r\nI found that I can enable caching only in V2 compatible mode so I updated KFP to 1.7.0-rc.1 (master branch), KFP SDK to 1.6.5 and KFP server api to 1.7.0a2.\r\n\r\nI did some tests using the sample from https://www.kubeflow.org/docs/components/pipelines/sdk/v2/python-function-components/#getting-started-with-python-function-based-components\r\n\r\nThe first run is fine but the second run tries to get result from cache and throw an error : `Failed to execute component: failed to store output parameter value from cache: failed to parse parameter name=\"Output\" value =11 to double: %!w(<nil>)`\r\n\r\nWhen replacing `float` with `str`, this is working well when using manual runs but in recurring runs I get : `Failed to execute component: failed to create cache entry: failed to create task: rpc error: code = InvalidArgument desc = Validate create task request failed.: Invalid input error: Invalid task: must specify RunId`.\r\n\r\nShould I create some new issues and close this one ?", "I am not sure what is the root cause for experiencing this `invalid input error` failure, it sounds good for creating a new issue to debug this further.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-09T08:31:22
2022-03-03T00:05:30
null
NONE
null
### What steps did you take - Create a pipeline with two steps and data passing - Compile it - Create a recurring run via KFP UI and schedule a periodic run every minutes/hours/days (doesn't change anything) ### What happened: - Second run does not use cache from the first run ### What did you expect to happen: - Second run should use cache from the first run ### Environment: * How do you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines standalone on GCP * KFP version: KFP 1.6.0 * KFP SDK version: KFP SDK 1.6.4 ### Anything else you would like to add: Cache is working fine when I run the pipeline manually. Pipeline : https://pastebin.com/bUguyB7X Pipeline YAML : https://pastebin.com/RrktipPC Pod YAML (first step) : https://pastebin.com/t18AWtvX Pod YAML (second step) : https://pastebin.com/S7pCCXzY --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6002/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6002/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/6001
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/6001/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/6001/comments
https://api.github.com/repos/kubeflow/pipelines/issues/6001/events
https://github.com/kubeflow/pipelines/issues/6001
940,427,321
MDU6SXNzdWU5NDA0MjczMjE=
6,001
[feature] CRD for Recurring Run
{ "login": "munagekar", "id": 10258799, "node_id": "MDQ6VXNlcjEwMjU4Nzk5", "avatar_url": "https://avatars.githubusercontent.com/u/10258799?v=4", "gravatar_id": "", "url": "https://api.github.com/users/munagekar", "html_url": "https://github.com/munagekar", "followers_url": "https://api.github.com/users/munagekar/followers", "following_url": "https://api.github.com/users/munagekar/following{/other_user}", "gists_url": "https://api.github.com/users/munagekar/gists{/gist_id}", "starred_url": "https://api.github.com/users/munagekar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/munagekar/subscriptions", "organizations_url": "https://api.github.com/users/munagekar/orgs", "repos_url": "https://api.github.com/users/munagekar/repos", "events_url": "https://api.github.com/users/munagekar/events{/privacy}", "received_events_url": "https://api.github.com/users/munagekar/received_events", "type": "User", "site_admin": false }
[ { "id": 930476737, "node_id": "MDU6TGFiZWw5MzA0NzY3Mzc=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/help%20wanted", "name": "help wanted", "color": "db1203", "default": true, "description": "The community is welcome to contribute." }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
{ "login": "NikeNano", "id": 22057410, "node_id": "MDQ6VXNlcjIyMDU3NDEw", "avatar_url": "https://avatars.githubusercontent.com/u/22057410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikeNano", "html_url": "https://github.com/NikeNano", "followers_url": "https://api.github.com/users/NikeNano/followers", "following_url": "https://api.github.com/users/NikeNano/following{/other_user}", "gists_url": "https://api.github.com/users/NikeNano/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikeNano/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikeNano/subscriptions", "organizations_url": "https://api.github.com/users/NikeNano/orgs", "repos_url": "https://api.github.com/users/NikeNano/repos", "events_url": "https://api.github.com/users/NikeNano/events{/privacy}", "received_events_url": "https://api.github.com/users/NikeNano/received_events", "type": "User", "site_admin": false }
[ { "login": "NikeNano", "id": 22057410, "node_id": "MDQ6VXNlcjIyMDU3NDEw", "avatar_url": "https://avatars.githubusercontent.com/u/22057410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikeNano", "html_url": "https://github.com/NikeNano", "followers_url": "https://api.github.com/users/NikeNano/followers", "following_url": "https://api.github.com/users/NikeNano/following{/other_user}", "gists_url": "https://api.github.com/users/NikeNano/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikeNano/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikeNano/subscriptions", "organizations_url": "https://api.github.com/users/NikeNano/orgs", "repos_url": "https://api.github.com/users/NikeNano/repos", "events_url": "https://api.github.com/users/NikeNano/events{/privacy}", "received_events_url": "https://api.github.com/users/NikeNano/received_events", "type": "User", "site_admin": false } ]
null
[ "@munagekar Welcome contributing to create a RecurringRun CRD for this feature!", "/assign\r\nI will take a look at this. ", "To get some more background, what is the difference that you expect from this `CRD` compare to the [scheduledworkflow](https://github.com/kubeflow/pipelines/tree/master/backend/src/crd#running-scheduledworkflow-controller-from-the-command-line) `CRD` that Kubeflow pipelines uses to today for recurring runs? @munagekar ", "@NikeNano Thank you for looking into this.\r\n\r\nI was not aware of the `scheduled workflow` crd, when I created the issue. This CRD largely solves the issue.\r\n\r\nHowever, kubeflow pipelines stores scheduled workflows in a db instead of relying on etcd. This can lead to inconsistency when creating & deleting recurring runs from the kubectl. The scheduled workflows shown in the UI do not match those on the cluster.\r\n\r\nREF: https://github.com/kubeflow/pipelines/issues/4862", " ~~Hmm do you mean that `etcd` should replace the `Mysql` database? I think in that case it will be completely seperated from kubeflow pipelines and all the tooling around it~~\r\n\r\n ~~As I understand the `CRD`:s will be stored in `etcd` since they are custom resources? But I think I might miss your point, could you elaborate @munagekar~~\r\n\r\n\r\nUPDATE: missed to check your ref @munagekar, will do it. \r\n", "After reading up and checking the code I definitely see your point @munagekar. I will continue to dig in to what is required to actually get ride of using `Mysql` for recurring runs. \r\n\r\nI think we can continue the discussion from https://github.com/kubeflow/pipelines/issues/4862#issuecomment-906323221 here. What was the original reason for using `Mysql`, it seems to have the same information as on the CRD when I check now @Bobgy ?", "I will write a design doc during the week. ", "To explain a bit of history, KFP decided to use mysql as source of truth instead of Kubernetes API, because many KFP features rely heavily on\r\n* filter by field, order by field, pagination functionalities that is not very well supported by Kubernetes API\r\n* number of pipeline runs can be large enough that the Kubernetes API cannot handle efficiently\r\n\r\nHowever, it seems reasonable to assume the number of enabled recurring run configs should be at a size that can be handled by Kubernetes API. The tooling for gitops + kubernetes API seems like a natural fit for managing recurring runs. That's why I think https://github.com/kubeflow/pipelines/issues/4862#issuecomment-906090718 is a brilliant idea.\r\n\r\nIt's still worth investigating whether Kubernetes API's capability for filter, order and pagination is enough for recurring run API or not. That's what I'd like to see in the design doc. Even if there's sth still missing, we may now consider a solution that takes Kubernetes API as source of truth, so that new recurring run custom resources can be auto-synced to KFP DB.", "cc @NikeNano, what I mentioned in https://github.com/kubeflow/pipelines/pull/6207#issuecomment-906807494 is exactly meant for this use-case. If users can manually author recurring run spec and apply them to cluster using gitops, then it's more user-friendly to let them specify pipeline names + default pipeline version, or pipeline name + version name, compared to IDs.", "I have started to work on the design doc, will update it during the next days but leave it here if you like to add something already now: https://docs.google.com/document/d/1En7UCME3PabqPwaJZSk0GSx61B8BPdkhW_kW9t7Xdkg/edit?usp=sharing", "Will update it during the weekend, have a better understanding of the persistance agent now. \r\n", "Would be great to get some feeback from you guys @Bobgy and @munagekar ", "@NikeNano I went through the design doc. The proposed changes sound good.\r\n\r\nLike you mentioned in your design doc, if advanced filtering for scheduled jobs is a requirement, it might make sense to replicate changes from kubernetes to mysql, otherwise relying on kubernetes api makes sense and should be faster to implement.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Hello, you are always very helpful. Thank you for your efforts.\r\nHas the ability to control scheduledworkflows with kubectl been developed? It's hard to keep track of the exact progress.\r\nAs shown below, when scheduledworkflows are deleted with kubectl, they are not deleted in kubeflow web, but the resources are deleted and the pipeline does not run according to the schedule.\r\nI want to check the synced deletion status on the web, what should I do?\r\n`kubectl delete scheduledworkflows --all`\r\n<img width=\"676\" alt=\"image\" src=\"https://user-images.githubusercontent.com/56992106/210510699-def87822-e958-43dd-97b6-783bd42f9291.png\">\r\n<img width=\"1127\" alt=\"image\" src=\"https://user-images.githubusercontent.com/56992106/210510743-1472b520-4eb6-4bb5-9355-e40946e06a2d.png\">\r\n", "Hi guys, @Bobgy @NikeNano \r\n\r\nThank you for the thoughtful discussion. \r\nIs there any progress on this issue? ", "@chensun @connor-mccarthy I think this is a good idea for KFP: symmetrically syncing between the `ScheduledRun` CRD and the MySQL backend, because right now, if you PATCH the CRD (or CREATE one, not using the KFP API), you will end up in an out-of-sync state.\r\n\r\nThis feature will also allow people to use git-ops tools like ArgoCD to manage which pipelines are scheduled in each namespace.", "Following up on this feature request: I think we should consider the scope of Kubernetes Resource Management (KRM) support in KFP in general. I suspect that if we implement RecurringRun CRD today, we will receive similar requests for Run/Pipeline and other objects in the future.\r\n\r\nAgain, welcome contributions on this CRD for KFP topic!\r\n\r\nReference: We are currently using a mock API group and CRD to represent KFP objects: https://github.com/kubeflow/pipelines/blob/f626629f79c833b159336fe9963d44b77071c14f/backend/src/apiserver/common/const.go#L18-L20. ", "Thank you guys, @thesuperzapper and @zijianjoy \r\nRegarding contribution, I see that the design proposal already was provided - https://docs.google.com/document/d/1En7UCME3PabqPwaJZSk0GSx61B8BPdkhW_kW9t7Xdkg/edit?pli=1#heading=h.rebcfzcla50x\r\n\r\nDoes it still require an agreement? ", "> Hi guys, @Bobgy @NikeNano\r\n> \r\n> Thank you for the thoughtful discussion. Is there any progress on this issue?\r\n\r\nHey, I sadly don't have time to work on this so feel free to pick this up. Would be great so see this be used. ", "@zijianjoy @NikeNano this issue is related to supporting updates on Scheduled Runs (which is not currently supported), see my comment in that issue for reference:\r\n\r\n- https://github.com/kubeflow/pipelines/issues/3789#issuecomment-1581396796", "Hey everyone, I have released a reference repository that demonstrates using GitOps to manage the schedules and definitions of Kubeflow Pipelines.\r\n\r\n### You can find it at [`deployKF/kubeflow-pipelines-gitops`](https://github.com/deployKF/kubeflow-pipelines-gitops)\r\n\r\nIt has limitations caused by Kubeflow Pipeline not allowing updates to \"recurring runs\" (only deleting and recreating), but I think the solution it uses will work for most production use-cases." ]
2021-07-09T05:19:47
2023-08-10T01:43:29
null
CONTRIBUTOR
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> /area backend ### What feature would you like to see? A CRD for recurring run. To make it possible to delete recurring runs with kubectl. Currently it is possible to delete and create recurring runs only with sdk or kubeflow gui, there is no state for recurring runs on the cluster. Kubernetes has CronJobs, similarly pipeline runs are tracked with [workflow](https://argoproj.github.io/argo-workflows/fields/#workflow), I would like a similar CRD for recurring run. Argo has [cron-worklfow](https://argoproj.github.io/argo-workflows/fields/#cronworkflow) and [cron-workflow status](https://argoproj.github.io/argo-workflows/fields/#cronworkflowstatus) could these be used instead ? ### What is the use case or pain point? It is easier for dev-ops to track recurring-run with kubectl and see the status. ### Is there a workaround currently? <!-- Without this feature, how do you accomplish your task today? --> Use the sdk or gui. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/6001/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/6001/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5995
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5995/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5995/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5995/events
https://github.com/kubeflow/pipelines/issues/5995
940,074,430
MDU6SXNzdWU5NDAwNzQ0MzA=
5,995
[feature] Avoiding busy waiting for long running external tasks
{ "login": "harshsaini", "id": 5178654, "node_id": "MDQ6VXNlcjUxNzg2NTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/5178654?v=4", "gravatar_id": "", "url": "https://api.github.com/users/harshsaini", "html_url": "https://github.com/harshsaini", "followers_url": "https://api.github.com/users/harshsaini/followers", "following_url": "https://api.github.com/users/harshsaini/following{/other_user}", "gists_url": "https://api.github.com/users/harshsaini/gists{/gist_id}", "starred_url": "https://api.github.com/users/harshsaini/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/harshsaini/subscriptions", "organizations_url": "https://api.github.com/users/harshsaini/orgs", "repos_url": "https://api.github.com/users/harshsaini/repos", "events_url": "https://api.github.com/users/harshsaini/events{/privacy}", "received_events_url": "https://api.github.com/users/harshsaini/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "Hello @harshsaini , for now you can create two components, one for launching the Katib jobs, another for waiting for this external job to finish. \r\n\r\nFor the performance issue, we need to implement a control plane concept to introduce a watcher for external job completion. This is currently blocked for the introduction of control plane. We will prioritize it accordingly.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-08T17:28:11
2022-03-03T02:05:35
null
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> /area backend /area sdk <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? Kubeflow Pipelines does not support a way to intermittently poll for completion of longer running tasks on an external system. Request: Create an op that allows for intermittent polling or execution to determine if an external task has finished. ### What is the use case or pain point? The use case is around model training. KFP provides HPO execution option that launches a job via [Katib](https://github.com/kubeflow/pipelines/blob/master/components/kubeflow/katib-launcher/component.yaml), however, it [busy waits](https://github.com/kubeflow/pipelines/blob/b57bbeeb982744556ab79a132de5d5fd0c7c0c82/components/kubeflow/katib-launcher/src/launch_experiment.py#L70) for completion of the job. This is problematic for a few reasons: - waste of compute resources and cost since such jobs can take days or weeks to complete depending on the type of model and task. - KFP hosted on transient/preemptible environments can lose track of the experiment if the pod is evicted or killed. - it is hard for end users to determine max busy wait time via the [current launcher](https://github.com/kubeflow/pipelines/blob/b57bbeeb982744556ab79a132de5d5fd0c7c0c82/components/kubeflow/katib-launcher/component.yaml#L7) ### Is there a workaround currently? Currently, we do not use pipelines to launch Katib jobs but have our own wrappers to manage this execution. Another option is to use the provided launcher but we have noticed that we lose track of the experiment if the node hosting the launcher gets preemptied. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5995/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5995/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5987
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5987/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5987/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5987/events
https://github.com/kubeflow/pipelines/issues/5987
938,981,067
MDU6SXNzdWU5Mzg5ODEwNjc=
5,987
[v2compat] key samples to show off new features
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
null
[]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Closing as v2 compatible mode is no longer supported." ]
2021-07-07T14:55:52
2023-01-18T08:21:49
2023-01-18T08:21:48
CONTRIBUTOR
null
Samples: * [ ] Consume artifact by URI sample (use tfjob etc) * [ ] Support for components that can produce/consume MLMD metadata * [ ] Visualize v2 metrics -- components can output metrics artifacts that are rendered in UI. [Sample pipeline](https://github.com/kubeflow/pipelines/blob/307e91aaae5e9c71dde1fddaffa10ffd751a40e8/samples/test/metrics_visualization_v2.py#L103) (UI not working) * [ ] Pipeline root documentation
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5987/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5987/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5986
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5986/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5986/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5986/events
https://github.com/kubeflow/pipelines/issues/5986
938,414,054
MDU6SXNzdWU5Mzg0MTQwNTQ=
5,986
[v2compat] re-evaluate execution custom properties schema
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 952830767, "node_id": "MDU6TGFiZWw5NTI4MzA3Njc=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/size/M", "name": "size/M", "color": "ededed", "default": false, "description": null } ]
closed
false
null
[]
null
[]
2021-07-07T02:20:56
2021-07-20T05:26:15
2021-07-20T05:26:15
CONTRIBUTOR
null
* [ ] rename task execution `task_name` to `display_name`?
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5986/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5986/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5985
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5985/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5985/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5985/events
https://github.com/kubeflow/pipelines/issues/5985
938,413,984
MDU6SXNzdWU5Mzg0MTM5ODQ=
5,985
[v2compat] move types from `kfp.XXX` namespace to `system.XXX` namespace
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 942985146, "node_id": "MDU6TGFiZWw5NDI5ODUxNDY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/size/XS", "name": "size/XS", "color": "ededed", "default": false, "description": null } ]
closed
false
null
[]
null
[]
2021-07-07T02:20:46
2021-07-20T05:26:15
2021-07-20T05:26:15
CONTRIBUTOR
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5985/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5985/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5984
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5984/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5984/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5984/events
https://github.com/kubeflow/pipelines/issues/5984
938,413,064
MDU6SXNzdWU5Mzg0MTMwNjQ=
5,984
[v2compat] remove redundant fields “pipeline_name”, “pipeline_run_id” from execution custom properties
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 942985146, "node_id": "MDU6TGFiZWw5NDI5ODUxNDY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/size/XS", "name": "size/XS", "color": "ededed", "default": false, "description": null } ]
closed
false
null
[]
null
[ "the issue is too small to track" ]
2021-07-07T02:18:41
2021-07-15T14:16:24
2021-07-15T14:16:24
CONTRIBUTOR
null
These fields are already present in pipeline/pipeline run contexts (size/XS)
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5984/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5984/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5982
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5982/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5982/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5982/events
https://github.com/kubeflow/pipelines/issues/5982
938,356,581
MDU6SXNzdWU5MzgzNTY1ODE=
5,982
[backend] How to specify python files to include using create_component_from_func
{ "login": "aronchick", "id": 51317, "node_id": "MDQ6VXNlcjUxMzE3", "avatar_url": "https://avatars.githubusercontent.com/u/51317?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aronchick", "html_url": "https://github.com/aronchick", "followers_url": "https://api.github.com/users/aronchick/followers", "following_url": "https://api.github.com/users/aronchick/following{/other_user}", "gists_url": "https://api.github.com/users/aronchick/gists{/gist_id}", "starred_url": "https://api.github.com/users/aronchick/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aronchick/subscriptions", "organizations_url": "https://api.github.com/users/aronchick/orgs", "repos_url": "https://api.github.com/users/aronchick/repos", "events_url": "https://api.github.com/users/aronchick/events{/privacy}", "received_events_url": "https://api.github.com/users/aronchick/received_events", "type": "User", "site_admin": false }
[ { "id": 1122445895, "node_id": "MDU6TGFiZWwxMTIyNDQ1ODk1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk/components", "name": "area/sdk/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
open
false
{ "login": "Ark-kun", "id": 1829149, "node_id": "MDQ6VXNlcjE4MjkxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ark-kun", "html_url": "https://github.com/Ark-kun", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "repos_url": "https://api.github.com/users/Ark-kun/repos", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "type": "User", "site_admin": false }
[ { "login": "Ark-kun", "id": 1829149, "node_id": "MDQ6VXNlcjE4MjkxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ark-kun", "html_url": "https://github.com/Ark-kun", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "repos_url": "https://api.github.com/users/Ark-kun/repos", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "type": "User", "site_admin": false } ]
null
[ "There are several possible solutions to this.\r\n\r\nFirst thing to understand is how the `create_component_from_func` function works. This function realizes the \"Lightweight python components\" feature.\r\nAll KFP components are `ComponentSpec` files (`component.yaml`). Many people write them manually and that's fine. But for python functions I've added a way to *generate* the `component.yaml` from the function signature and code. The resulting components are \"Lightweight\" is sense that there is no need to build and push a new container. The code is included in the command-line. This makes development much easier, but also limits the amount of code that can be included.\r\n\r\nSo, what can be done:\r\n\r\n1) You can put the `utils` in the container and use it as base image. The `kfp.container.build_image_from_working_dir` can help you with that. See the [sample](https://github.com/kubeflow/pipelines/blob/master/samples/core/container_build/container_build.ipynb)\r\n2) Put `utils` into its own package that can be installed via `packages_to_install`. BTW, python can install from GIT.\r\n3) Use code pickling to include extra dependencies in the lightweight component. This option (`use_code_pickling`) is not exposed in `create_component_from_func`, but the older `func_to_container_op` still has it. You need to be very careful to use the same python version in your environment and in the container.\r\n\r\n4) There is a passive idea to add a new feature - `additional_files` to the `create_component_from_func` function, but I think it stretches the idea of \" code inlining\" too far.\r\n\r\n>same_step_0.generated_main\r\n\r\nIf you're generating the main function yourself, maybe you want to generate the whole `kfp.components.structures.ComponentSpec`/`component.yaml`?\r\n", "This is really interesting! I think what I'm going to try first is just to pickle all files in the sub directories, and inline them into the generated root file. I think this will work though is obviously pretty fragile (assumes all sub files are of a certain layout/etc). \r\n\r\nThe challenge here, of course, is we do WANT data scientists breaking out their functions into submodules without having to build packages or containers - would this be the idea about the \"additional_files\"?", "/cc @chensun ", "> 4. There is a passive idea to add a new feature - `additional_files` to the `create_component_from_func` function, but I think it stretches the idea of \" code inlining\" too far.\r\n\r\n@Ark-kun I landed on this issue because I was looking for a way to add other functions to the component file -- do you think that's a viable option at all?\r\n\r\nMy team and I are finding it useful to use functions defined for components in other contexts, but it would be incredibly useful if it were possible to inline both a kfp \"wrapper\" function and a main function that did the work. For example, something like:\r\n\r\n def process_data(\r\n params: typing.Dict[str,str], input: pd.DataFrame\r\n ) -> pd.DataFrame:\r\n ...\r\n return processed_data\r\n\r\n def process_csv_data(\r\n params: typing.Dict[str,str], input_path: InputPath('CSV'),\r\n output_path: OutputPath('CSV')\r\n ):\r\n input = pd.read_csv(input_path)\r\n processed_data = process_data(params, input)\r\n processed_data.to_csv(output_path)\r\n\r\nIf we could pass `process_data` as an extra function to inline to `create_component_from_func(process_csv_data, ...)` then this would allow us to have a kfp component but also be able to export `process_data` as a function that can be packaged and offered in a module for use by other applications. An `extra_functions` parameter would be very useful, but the `additional_files` parameter would probably also solve the same need.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-07T00:16:22
2022-03-02T15:05:16
null
NONE
null
I have a deployment like the following: ``` root_dir ├── root.py ├── same.yaml ├── same_step_0.py ├── same_step_1.py ├── same_step_2.py └── utils ├── __init__.py ├── file_one.py └── file_two.py ``` The `root.py` file is the standard kubeflow root file, and then all the steps have functions in them that are handled with `create_component_from_func` - like this: ``` same_step_0_op = create_component_from_func( func=same_step_0.generated_main, base_image="library/python:3.9-slim-buster", packages_to_install=[ "dill", "requests", "numpy==1.19.5", "requests", ], ) ``` The file same_step_0.py references a function in utils.file_one - but when building the package and uploading, it's not being included. It does appear (AFAICT) that the functionality to copy all dependent files is there - https://github.com/kubeflow/pipelines/blob/cc83e1089b573256e781ed2e4ac90f604129e769/sdk/python/kfp/containers/_component_builder.py#L222 - is there a special flag I need to set? Thanks! --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5982/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5982/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5980
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5980/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5980/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5980/events
https://github.com/kubeflow/pipelines/issues/5980
937,727,745
MDU6SXNzdWU5Mzc3Mjc3NDU=
5,980
[bug(sdk)] serializer does not work for complex type hint.
{ "login": "TrsNium", "id": 11388424, "node_id": "MDQ6VXNlcjExMzg4NDI0", "avatar_url": "https://avatars.githubusercontent.com/u/11388424?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TrsNium", "html_url": "https://github.com/TrsNium", "followers_url": "https://api.github.com/users/TrsNium/followers", "following_url": "https://api.github.com/users/TrsNium/following{/other_user}", "gists_url": "https://api.github.com/users/TrsNium/gists{/gist_id}", "starred_url": "https://api.github.com/users/TrsNium/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TrsNium/subscriptions", "organizations_url": "https://api.github.com/users/TrsNium/orgs", "repos_url": "https://api.github.com/users/TrsNium/repos", "events_url": "https://api.github.com/users/TrsNium/events{/privacy}", "received_events_url": "https://api.github.com/users/TrsNium/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "The typing hints using `typing` did not work, which is fixed in the PR here.\r\nhttps://github.com/kubeflow/pipelines/pull/5979", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-07-06T10:02:21
2022-03-03T03:04:41
null
CONTRIBUTOR
null
### What steps did you take ```python from typing import Dict, List @component def get_parallel_offsets( rows_count: int, parallel_num: int, ) -> List[Dict[str, int]]: """ Get the offset of each parallel (from the number of rows and the number of parallels in the table.) Parameters ---------- rows_count: int number of bigquery table's rows parallel_num: int number of parallels """ from collections import namedtuple import math import json if rows_count % parallel_num == 0: offset_step = limit = int(rows_count / parallel_num) else: offset_step = limit = math.ceil(rows_count / parallel_num) # NOTE: When using `json.dump`, if a number with a large number of digits is included, the number will be converted to Scientific Notation format, so convert it to a string type once. offsets = [ {"index": index, "offset": offset, "upper_bounds": offset+limit} for index, offset in enumerate(range(0, rows_count, offset_step)) ] return offsets ``` When a type is specified for a dict like the one above, or a type hint is added to a list element, the return value is serialized as string type. This behavior may result in incorrect deserialization. This happens because we are only looking at one type name. ref. https://github.com/kubeflow/pipelines/blob/df1ab4db5e72e2ddb6f098343a3faf51599087d1/sdk/python/kfp/components/_data_passing.py#L109-L118 I want it to be serialized by pattern matching using regex(`List\[.+\]`) etc. ### What happened: Returns a value like `List[Dict[A,B]]` and fails to deserialize when trying to read it with other functions. ### What did you expect to happen: I want even complex type hints to be serialized correctly. ### Environment: kfp master branch version ### Labels <!-- /area sdk --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5980/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5980/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5978
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5978/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5978/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5978/events
https://github.com/kubeflow/pipelines/issues/5978
937,481,999
MDU6SXNzdWU5Mzc0ODE5OTk=
5,978
[v2compat] Unify pipeline name and pipeline context name
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[]
2021-07-06T03:18:04
2021-07-15T03:53:51
2021-07-15T03:53:51
CONTRIBUTOR
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5978/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5978/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5977
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5977/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5977/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5977/events
https://github.com/kubeflow/pipelines/issues/5977
937,477,161
MDU6SXNzdWU5Mzc0NzcxNjE=
5,977
[v2compat] UI features for cached tasks (size/S)
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1116354964, "node_id": "MDU6TGFiZWwxMTE2MzU0OTY0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/size/S", "name": "size/S", "color": "ededed", "default": false, "description": null } ]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "Discussed with @zijianjoy offline,\r\nCurrently with v2 compatible mode, \r\n\r\n- cached status icon\r\n- a link to cached execution\r\nThe above tasks can be achieved now.\r\n\r\nFor the following tasks, currently we can't fetch original execution with current logic in launcher.\r\nWe may need to store the original mlmd execution id in the cache mlmd execution first(Maybe via a custom property called `cached_execution_id`). I'm not sure whether we need to support these in v2 compatible mode. I think these features will be useful to users once we support cache staleness. \r\nFor my perspective, we can defer these two tasks until we support cache staless. @Bobgy What do you think of it?\r\n\r\n- (p1) a link to original execution this cache is taken from\r\n- (p2) the original execution should have a link to the original run (deleting the original run will invalidate cache)", "I think it will be great to describe the user scenario for referencing original execution from cached execution. Based on the discussion, looks like there is no cache management exposed to user in v2 compatible mode, so the linking from cached execution to original execution is informational at this point, and help user to understand more about the caching mechanism (debugging).\r\n\r\nThe solution with custom property `cached_execution_id` should work, but I suggest to get confirmation from the team about defining this field, so we can avoid changing/removing this property in the future, where FE needs to support property field compatibility across versions." ]
2021-07-06T03:05:46
2021-09-17T06:22:36
2021-09-14T16:49:32
CONTRIBUTOR
null
For cached tasks, the following are helpful on UI: - [x] * cached status icon - [x] * a link to cached execution - [x] * (p1) a link to original execution this cache is taken from - [x] * (p2) the original execution should have a link to the original run (deleting the original run will invalidate cache) - [x] * (p2) the run details page should ideally support using url hash to select a node, so that we can generate a URL that directly selects a node in a run details page
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5977/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5977/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5967
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5967/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5967/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5967/events
https://github.com/kubeflow/pipelines/issues/5967
936,111,870
MDU6SXNzdWU5MzYxMTE4NzA=
5,967
[sdk] Block using v2 `@component` decorator in v1 mode.
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "This issue has been automatically closed because it has not had recent activity. Please comment \"/reopen\" to reopen it.\n" ]
2021-07-02T23:13:19
2022-03-03T04:05:13
2022-03-03T04:05:13
COLLABORATOR
null
The new decorator is meant for v2 and v2_compatible mode only. We should throw an explicit error if users use this decorator but compile in v1 mode.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5967/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5967/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5965
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5965/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5965/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5965/events
https://github.com/kubeflow/pipelines/issues/5965
936,066,531
MDU6SXNzdWU5MzYwNjY1MzE=
5,965
[bug] Post submit integration tests failing - argoproj/argo repo not found
{ "login": "joeliedtke", "id": 11788735, "node_id": "MDQ6VXNlcjExNzg4NzM1", "avatar_url": "https://avatars.githubusercontent.com/u/11788735?v=4", "gravatar_id": "", "url": "https://api.github.com/users/joeliedtke", "html_url": "https://github.com/joeliedtke", "followers_url": "https://api.github.com/users/joeliedtke/followers", "following_url": "https://api.github.com/users/joeliedtke/following{/other_user}", "gists_url": "https://api.github.com/users/joeliedtke/gists{/gist_id}", "starred_url": "https://api.github.com/users/joeliedtke/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/joeliedtke/subscriptions", "organizations_url": "https://api.github.com/users/joeliedtke/orgs", "repos_url": "https://api.github.com/users/joeliedtke/repos", "events_url": "https://api.github.com/users/joeliedtke/events{/privacy}", "received_events_url": "https://api.github.com/users/joeliedtke/received_events", "type": "User", "site_admin": false }
[ { "id": 930619528, "node_id": "MDU6TGFiZWw5MzA2MTk1Mjg=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/testing", "name": "area/testing", "color": "00daff", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "This should be fixed by [kubeflow/pipelines #5966](https://github.com/kubeflow/pipelines/pull/5966). I added [kubeflow/pipelines #5969](https://github.com/kubeflow/pipelines/pull/5969) to clean up other instances of **argoproj/argo**\r\n\r\n" ]
2021-07-02T21:06:02
2021-07-09T00:48:18
2021-07-09T00:48:18
MEMBER
null
### What steps did you take <!-- A clear and concise description of what the bug is.--> ### What happened: The following error occurs while building the Dockerfile at `test/sample-test/Dockerfile`: ``` gzip: argo-linux-amd64.gz: not in gzip format The command '/bin/sh -c curl -sLO https://github.com/argoproj/argo/releases/download/${ARGO_VERSION}/argo-linux-amd64.gz && gunzip argo-linux-amd64.gz && chmod +x argo-linux-amd64 && mv ./argo-linux-amd64 /usr/local/bin/argo' returned a non-zero code: 1 ``` ### What did you expect to happen: `argo-linux-amd64.gz` should have been downloaded and the container should have been built. ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> Argo appears to have removed the argoproj/argo repository, or renamed it to argoproj/argo-workflows. Requests for argoproj/argo urls now redirect to argoproj/argocon21. ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> /area testing <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5965/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5965/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5962
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5962/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5962/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5962/events
https://github.com/kubeflow/pipelines/issues/5962
935,199,277
MDU6SXNzdWU5MzUxOTkyNzc=
5,962
[backend] Failed to retry run: Workflow cannot be retried with node in Omitted phase
{ "login": "caleb-artifact", "id": 68619622, "node_id": "MDQ6VXNlcjY4NjE5NjIy", "avatar_url": "https://avatars.githubusercontent.com/u/68619622?v=4", "gravatar_id": "", "url": "https://api.github.com/users/caleb-artifact", "html_url": "https://github.com/caleb-artifact", "followers_url": "https://api.github.com/users/caleb-artifact/followers", "following_url": "https://api.github.com/users/caleb-artifact/following{/other_user}", "gists_url": "https://api.github.com/users/caleb-artifact/gists{/gist_id}", "starred_url": "https://api.github.com/users/caleb-artifact/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/caleb-artifact/subscriptions", "organizations_url": "https://api.github.com/users/caleb-artifact/orgs", "repos_url": "https://api.github.com/users/caleb-artifact/repos", "events_url": "https://api.github.com/users/caleb-artifact/events{/privacy}", "received_events_url": "https://api.github.com/users/caleb-artifact/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "As https://github.com/kubeflow/pipelines/issues/5878#issuecomment-863651456 mentioned, you need to wait for next KFP release 1.7.0. ", "I think we can close now", "> As [#5878 (comment)](https://github.com/kubeflow/pipelines/issues/5878#issuecomment-863651456) mentioned, you need to wait for next KFP release 1.7.0.\r\n\r\nAh got ya. I think what confused me is that v1.5.1 included the fix and I had linked that comment to the 1.5.1 release because the author was using 1.5.0 and thus 1.5.1 would be the \"next release\" for their KFP version." ]
2021-07-01T20:42:08
2021-07-06T14:57:34
2021-07-05T06:32:27
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipeline Standalone via kustomize manifests on GKE * KFP version: 1.6.0 * KFP SDK version: ``` kfp 1.6.3 kfp-pipeline-spec 0.1.8 kfp-server-api 1.6.0 ``` ### Steps to reproduce 1. Run a pipeline with a component depending upon another failing component 2. Retry the run ```jsonc // Failed to retry run: 5b79857a-be4f-4876-98ae-105845e892ee with error: { "error": "Retry run failed.: InternalServerError: Workflow cannot be retried with node nlp-pipeline-mcclt-4094140295 in Omitted phase: workflow cannot be retried", "code": 13, "message": "Retry run failed.: InternalServerError: Workflow cannot be retried with node nlp-pipeline-mcclt-4094140295 in Omitted phase: workflow cannot be retried", "details": [{ "@type": "type.googleapis.com/api.Error", "error_message": "Internal Server Error", "error_details": "Retry run failed.: InternalServerError: Workflow cannot be retried with node nlp-pipeline-mcclt-4094140295 in Omitted phase: workflow cannot be retried" }] } ``` ### Expected result The pipeline is retreid. ### Materials and Reference I noticed this bug was fixed in 1.5.1 already but hasn't been fixed in 1.6.x. I tired to get some info on when a fix would land in a 1.6 release but have not seen any activity there since the issue was closed. https://github.com/kubeflow/pipelines/issues/5878 Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5962/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5962/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5955
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5955/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5955/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5955/events
https://github.com/kubeflow/pipelines/issues/5955
933,914,995
MDU6SXNzdWU5MzM5MTQ5OTU=
5,955
Wait Container is Stuck and so is the Argo Workflow Controller for Large datasets only.
{ "login": "sharmahemlata", "id": 29674709, "node_id": "MDQ6VXNlcjI5Njc0NzA5", "avatar_url": "https://avatars.githubusercontent.com/u/29674709?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sharmahemlata", "html_url": "https://github.com/sharmahemlata", "followers_url": "https://api.github.com/users/sharmahemlata/followers", "following_url": "https://api.github.com/users/sharmahemlata/following{/other_user}", "gists_url": "https://api.github.com/users/sharmahemlata/gists{/gist_id}", "starred_url": "https://api.github.com/users/sharmahemlata/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sharmahemlata/subscriptions", "organizations_url": "https://api.github.com/users/sharmahemlata/orgs", "repos_url": "https://api.github.com/users/sharmahemlata/repos", "events_url": "https://api.github.com/users/sharmahemlata/events{/privacy}", "received_events_url": "https://api.github.com/users/sharmahemlata/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "We are using argo version 2.x. The latest argo version is 3.x\r\nCan you also create a issue in argo side so that argo side can confirm whether this is a argo issue that could also happen in version 3.x or it is a argo issue that only happens before version 3.x?", "@capri-xiyue. Thanks for the quick reply. I changed the image of the workflow controller to the newest and also tried some older versions, both 2.x and 3.x have the same problem.\r\nPs. Just raised the same issue for argo workflow.", "@sharmahemlata While I am missing context. This seems like an issue of data-passing between containers. I suspect you are incorrectly using inputValue instead of inputPath for a large file. causing this issue. This is probably causing OOM and throttling. \r\n\r\nYou can read up further on: https://www.kubeflow.org/docs/components/pipelines/reference/component-spec/ on how to use inputPath.", "@munagekar sorry for the delayed response. All data is mounted into the container from volumes. Here is the pod definition, values are never directly used. \r\n[pod-def.txt](https://github.com/kubeflow/pipelines/files/6806659/pod-def.txt)\r\n", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-06-30T17:00:50
2022-03-03T02:05:31
null
NONE
null
### What steps did you take I have a machine learning pipeline that runs several machine learning algorithms in parallel as components (pods in kubernetes) When I run the pipeline on relatively small datasets (200,000 rows), the pipeline takes some time but finishes executing (3+ hours) with all components finished. When I run the pipeline on a large dataset ([UCI poker dataset](https://archive.ics.uci.edu/ml/datasets/Poker%2BHand) : 1,000,000 rows), some components do not finish executing. Logs of wait container in the pod end with: ``` time="2021-06-30T14:12:58Z" level=info msg="Waiting on main container" time="2021-06-30T14:13:16Z" level=info msg="main container started with container ID: ba40e83f3a08789e921b340f3f8d9f1b04f1ff299b5777e9c4551840b1a13f82" time="2021-06-30T14:13:16Z" level=info msg="Starting annotations monitor" time="2021-06-30T14:13:16Z" level=info msg="docker wait ba40e83f3a08789e921b340f3f8d9f1b04f1ff299b5777e9c4551840b1a13f82" time="2021-06-30T14:13:16Z" level=info msg="Starting deadline monitor" time="2021-06-30T14:17:58Z" level=info msg="Alloc=3679 TotalAlloc=11473 Sys=70080 NumGC=6 Goroutines=12" time="2021-06-30T14:22:58Z" level=info msg="Alloc=3681 TotalAlloc=11484 Sys=70080 NumGC=8 Goroutines=12" time="2021-06-30T14:27:58Z" level=info msg="Alloc=3679 TotalAlloc=11494 Sys=70080 NumGC=11 Goroutines=12" time="2021-06-30T14:32:58Z" level=info msg="Alloc=3681 TotalAlloc=11504 Sys=70080 NumGC=13 Goroutines=12" time="2021-06-30T14:37:58Z" level=info msg="Alloc=3679 TotalAlloc=11514 Sys=70080 NumGC=16 Goroutines=12" time="2021-06-30T14:42:59Z" level=info msg="Alloc=3681 TotalAlloc=11524 Sys=70080 NumGC=18 Goroutines=12" time="2021-06-30T14:47:58Z" level=info msg="Alloc=3679 TotalAlloc=11535 Sys=70080 NumGC=21 Goroutines=12" ``` Logs of workflow controller end with: ``` time="2021-06-30T16:29:19Z" level=info msg="Alloc=4714 TotalAlloc=92880 Sys=70080 NumGC=154 Goroutines=101" W0630 16:34:14.567224 1 reflector.go:302] pkg/mod/k8s.io/client-go@v0.0.0-20191225075139-73fd2ddc9180/tools/cache/reflector.go:98: watch of *v1.ConfigMap ended with: too old resource version: 126291 (129732) time="2021-06-30T16:34:19Z" level=info msg="Alloc=4801 TotalAlloc=93109 Sys=70080 NumGC=157 Goroutines=101" time="2021-06-30T16:39:19Z" level=info msg="Alloc=4770 TotalAlloc=93249 Sys=70080 NumGC=159 Goroutines=101" ``` According to [this post](https://github.com/argoproj/argo-workflows/issues/6152), I should update the argo workflow image. The image is: `gcr.io/cloud-marketplace/google-cloud-ai-platform/kubeflow-pipelines/argoexecutor:1.4.1` So, I update the workflow-controller to use the image: `argoproj/argoexec:latest` After this also the workflow container ends with the following logs: ``` time="2021-03-11T20:06:43.616Z" level=info msg="Get leases 200" time="2021-03-11T20:06:43.621Z" level=info msg="Update leases 200" time="2021-03-11T20:06:48.630Z" level=info msg="Get leases 200" time="2021-03-11T20:06:48.650Z" level=info msg="Update leases 200" ``` According to [this ](https://github.com/argoproj/argo-workflows/issues/5371)issue, there should be `serviceAccountName: argo` in the workflow-controller deployment file. My deployment file already has that. But it still does not work. The looks like an argo issue and I am reporting a bug here because I think it maybe due to kubeflow not using the right argo workflow version. ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Python SDK * KFP version: 1.4.1 * KFP SDK version: kfp 1.6.3 kfp-pipeline-spec 0.1.8 kfp-server-api 1.6.0 ### Anything else you would like to add: Since the dataset is big, I have specified cpu and memory requests and limits. The usage is always less than the limits. ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> /area backend /area sdk /area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5955/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5955/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5954
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5954/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5954/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5954/events
https://github.com/kubeflow/pipelines/issues/5954
933,796,487
MDU6SXNzdWU5MzM3OTY0ODc=
5,954
[backend] missing 'v' prefix in repo tags
{ "login": "smthpickboy", "id": 18735716, "node_id": "MDQ6VXNlcjE4NzM1NzE2", "avatar_url": "https://avatars.githubusercontent.com/u/18735716?v=4", "gravatar_id": "", "url": "https://api.github.com/users/smthpickboy", "html_url": "https://github.com/smthpickboy", "followers_url": "https://api.github.com/users/smthpickboy/followers", "following_url": "https://api.github.com/users/smthpickboy/following{/other_user}", "gists_url": "https://api.github.com/users/smthpickboy/gists{/gist_id}", "starred_url": "https://api.github.com/users/smthpickboy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/smthpickboy/subscriptions", "organizations_url": "https://api.github.com/users/smthpickboy/orgs", "repos_url": "https://api.github.com/users/smthpickboy/repos", "events_url": "https://api.github.com/users/smthpickboy/events{/privacy}", "received_events_url": "https://api.github.com/users/smthpickboy/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "@Bobgy is there any reason for not having \"v\" as a prefix atm? ", "It was the convention from the start when we didn't consider this use-case, but I think you raised a good point for adding \"v\" prefix.\r\n\r\nWelcome some discussion on what's the best way forward, do we create two tags for each release in a transition period or do we migrate to v1.7.0 like tags right away?", "Also you can help by finding out the places we need to change to fit the new format", "> Welcome some discussion on what's the best way forward, do we create two tags for each release in a transition period or do we migrate to v1.7.0 like tags right away?\r\n\r\nLooking at https://github.com/kubeflow/kubeflow and https://github.com/kubeflow/tf-operator other parts of Kubeflow seems to use the \"v\" prefix. I dont see any reason for keeping both, and suggest we switch right away in next release. \r\n", "> Also you can help by finding out the places we need to change to fit the new format\r\n\r\nAs I understand we dont need to change anything, looking [here](https://github.com/kubeflow/pipelines/tree/master/test/release)\r\n\r\nwe can just set the tag: \r\n\r\n```\r\nTAG=v1.4.0 BRANCH=release-1.4 make release\r\n```\r\n\r\nwe could however force a prefix to avoid error during the release. ", "Yep, probably also\r\n\r\nTODOs:\r\n* [x] the release docs `TAG=v1.4.0 BRANCH=release-1.4 make release`\r\n* [x] In some build scripts, we parse tag version\r\n* [x] update cloud build trigger pattern (only admins can udpate)", "Added to release tracker", "I will take a look at it further and make a PR. ", "@capri-xiyue is facing release problems related to this change", "I encountered errorStep #49 - \"tagCacheServerForMarketplace\": Error parsing reference: \"gcr.io/ml-pipeline/google/pipelines/cacheserver:\" is not a valid repository/tag: invalid reference format when I tried to make a release.", "We resolved the issue, it's not related to this change.\r\n\r\nHowever, I do find one issue that's related to this change.\r\nSent out PR https://github.com/kubeflow/pipelines/pull/6025", "https://oss-prow.knative.dev/view/gs/oss-prow/logs/kubeflow-pipeline-postsubmit-mkp-e2e-test/1414831787711926272#1:build-log.txt%3A1234\r\n\r\n> Step #1 - \"verify\": config_helper.InvalidSchema: Invalid schema publishedVersion \"v1.7.0-alpha.3\"; must be semver including patch version\r\n\r\nit seems marketplace also do not like tags with v prefix for full semver version.", "Also found that python package version should not have the 'v' prefix either: https://www.python.org/dev/peps/pep-0440/.\r\n\r\nI feel that we should only add the 'v' prefix on git tags, but avoid it in other cases. It might take quite some trial and error to figure out which versions require the v prefix and which don't.\r\n\r\n@NikeNano I'm leaning towards reverting the PR and revisit it in the next release to delay resolving the problems we've seen. What do you think?", "I will take a stab at this again @Bobgy, as I understand all the issues we only want to add the `v` tag to the golang packages then which is then the same as the name of the `github tags`?\r\n\r\nThis will make the tags deviate between different components of the release, which is not ideal but. I dont see any alternatives if we like to support it as a golang package. ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-06-30T14:53:57
2022-03-11T21:28:18
null
NONE
null
When trying to use the go client in github.com/kubeflow/pipelines/backend/api/go_client, I find that `go mod` can't fetch the corresponding tags in this github repo, because all tags defined are missing the 'v' prefix and are considered non-canonical tag formats. E.g., tag '1.5.1' should be 'v1.5.1'.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5954/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5954/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5951
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5951/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5951/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5951/events
https://github.com/kubeflow/pipelines/issues/5951
933,360,961
MDU6SXNzdWU5MzMzNjA5NjE=
5,951
[bug] Using set_gpu_limit with bad resource manage
{ "login": "rylynchen", "id": 1971286, "node_id": "MDQ6VXNlcjE5NzEyODY=", "avatar_url": "https://avatars.githubusercontent.com/u/1971286?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rylynchen", "html_url": "https://github.com/rylynchen", "followers_url": "https://api.github.com/users/rylynchen/followers", "following_url": "https://api.github.com/users/rylynchen/following{/other_user}", "gists_url": "https://api.github.com/users/rylynchen/gists{/gist_id}", "starred_url": "https://api.github.com/users/rylynchen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rylynchen/subscriptions", "organizations_url": "https://api.github.com/users/rylynchen/orgs", "repos_url": "https://api.github.com/users/rylynchen/repos", "events_url": "https://api.github.com/users/rylynchen/events{/privacy}", "received_events_url": "https://api.github.com/users/rylynchen/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Can you help provide your definition of pipeline? ", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "This issue has been automatically closed because it has not had recent activity. Please comment \"/reopen\" to reopen it.\n" ]
2021-06-30T06:44:21
2022-03-03T04:05:10
2022-03-03T04:05:10
NONE
null
### What steps did you take Add set_gpu_limit(1) to my pipeline, then create run. But I can only create 2 runs. In fact, my k8s have 4 nodes, and each node has 4 gpu, like that: ![gpu_1](https://user-images.githubusercontent.com/1971286/123913722-1df58300-d9b1-11eb-9c3d-c6713d81fdcd.png) ### What happened: When I create third run, I got the error like this: ![gpu2](https://user-images.githubusercontent.com/1971286/123913901-5ac17a00-d9b1-11eb-99db-fb674159a9db.png) Then I remove set_gpu_limit(1) from my pipeline, and I can create so many runs, like this: ![gpu3](https://user-images.githubusercontent.com/1971286/123914460-ffdc5280-d9b1-11eb-8e6e-5a85666cc82b.png) ### Environment: * pipeline version: 1.0.0 So anyone has goods idea ? --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5951/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5951/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5948
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5948/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5948/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5948/events
https://github.com/kubeflow/pipelines/issues/5948
933,251,885
MDU6SXNzdWU5MzMyNTE4ODU=
5,948
[bug] <run Tutorial failed>
{ "login": "hellobiek", "id": 2854520, "node_id": "MDQ6VXNlcjI4NTQ1MjA=", "avatar_url": "https://avatars.githubusercontent.com/u/2854520?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hellobiek", "html_url": "https://github.com/hellobiek", "followers_url": "https://api.github.com/users/hellobiek/followers", "following_url": "https://api.github.com/users/hellobiek/following{/other_user}", "gists_url": "https://api.github.com/users/hellobiek/gists{/gist_id}", "starred_url": "https://api.github.com/users/hellobiek/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hellobiek/subscriptions", "organizations_url": "https://api.github.com/users/hellobiek/orgs", "repos_url": "https://api.github.com/users/hellobiek/repos", "events_url": "https://api.github.com/users/hellobiek/events{/privacy}", "received_events_url": "https://api.github.com/users/hellobiek/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "new info: could not chroot into main for artifact collection", "1. Can you help uploading the screenshot of error msg again?(You didn't upload it successfully)\r\n2. Which KFP version are you using?\r\n3. Which storage(minio, aws or gcp XXX)provider do you choose for artifact storage?", "Hello @hellobiek , I am able to run `[Tutorial] Data passing in python components` successfully. Would you like to confirm the cluster provider you use and Kubernetes version as well?", "using minio", "fixed by myself", "> fixed by myself\r\n\r\nDo you mind posting your fix? I'm wondering if I'm running into a similar issue" ]
2021-06-30T02:39:34
2021-11-10T03:34:00
2021-07-03T18:14:25
NONE
null
when I run this tutiol:Run of [Tutorial] Data passing in python components I met the error msg as follows: This step is in Error state with this message: failed to save outputs: key unsupported: cannot get key for artifact location, because it is invalid ![Uploading 截屏2021-06-30 上午10.38.40.png…]()
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5948/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5948/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5944
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5944/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5944/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5944/events
https://github.com/kubeflow/pipelines/issues/5944
932,759,134
MDU6SXNzdWU5MzI3NTkxMzQ=
5,944
[frontend] incorrect DAG with argo v3.1.0
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619511, "node_id": "MDU6TGFiZWw5MzA2MTk1MTE=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/priority/p0", "name": "priority/p0", "color": "db1203", "default": false, "description": "" }, { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @zijianjoy ", "A quick walkthrough of the workflow changes in https://github.com/argoproj/argo-workflows/blame/v3.1.0/ui/src/models/workflows.ts:\r\n\r\nThere are 3 areas which have changed since 2.12.9:\r\n\r\n1. `ContainerNode` interface: https://github.com/argoproj/argo-workflows/blame/v3.1.0/ui/src/models/workflows.ts#L296\r\n2. interface `Template` added fields \r\n``` \r\ncontainerSet?: {\r\n containers: ContainerNode[];\r\n };\r\n```\r\n3. `NodeType` enum add new type `Container`.\r\n\r\nReference PRs:\r\nhttps://github.com/argoproj/argo-workflows/pull/5099\r\nhttps://github.com/argoproj/argo-workflows/pull/5348", "See a sample runtime node info about `build_kfp_launcher`:\r\n\r\n```\r\n\"v2-sample-test-lmx4x-2942687590\": {\r\n \"id\": \"v2-sample-test-lmx4x-2942687590\",\r\n \"name\": \"v2-sample-test-lmx4x.kaniko\",\r\n \"displayName\": \"kaniko\",\r\n \"type\": \"Pod\",\r\n \"templateName\": \"kaniko\",\r\n \"templateScope\": \"local/v2-sample-test-lmx4x\",\r\n \"phase\": \"Running\",\r\n \"boundaryID\": \"v2-sample-test-lmx4x\",\r\n \"startedAt\": \"2021-06-29T18:41:14Z\",\r\n \"finishedAt\": null,\r\n \"progress\": \"0/1\",\r\n \"inputs\": {\r\n \"parameters\": [\r\n {\r\n \"name\": \"launcher_destination\",\r\n \"value\": \"gcr.io/jamxl-kfp-dev/kfp-launcher/kfp-launcher\"\r\n }\r\n ],\r\n \"artifacts\": [\r\n {\r\n \"name\": \"download-folder-from-tarball-on-gcs-Folder\",\r\n \"path\": \"/tmp/inputs/context_artifact/data\",\r\n \"s3\": {\r\n \"key\": \"artifacts/v2-sample-test-lmx4x/2021/06/29/v2-sample-test-lmx4x-2365399356/download-folder-from-tarball-on-gcs-Folder.tgz\"\r\n }\r\n }\r\n ]\r\n },\r\n \"children\": [\r\n \"v2-sample-test-lmx4x-1625946649\"\r\n ],\r\n \"hostNodeName\": \"gke-kfp-v2-compatible-default-pool-1d50a9c4-3t50\"\r\n }\r\n }\r\n```\r\n\r\nNote that `children` has `v2-sample-test-lmx4x-1625946649`, which is `build_samples_image`. This caused the frontend thinks that there are dependencies between two steps. ", "I think containerSet and the first change you mentioned are related to argo's new introduction of container DAG in one single Pod. KFP does not plan to use them, so we can safely ignore the changes.", "> Note that children has v2-sample-test-lmx4x-1625946649, which is build_samples_image. This caused the frontend thinks that there are dependencies between two steps.\r\n\r\nthis is strange, can it be a bug in argo?\r\nor did our workflow spec need any changes upgrading to argo v3.1?", "Validated offline that there is a misrepresentation of diamond workflow in argo v3.1. Created https://github.com/argoproj/argo-workflows/issues/6267 for reporting and tracking this bug." ]
2021-06-29T14:32:05
2021-07-03T12:52:49
2021-07-03T12:52:49
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> kfp standalone * KFP version: 1.7.0-alpha.1 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> 1. run v2 sample test 2. note that, two steps are running at the same time, but there's a dependency edge between them: ![image](https://user-images.githubusercontent.com/4957653/123816141-a6304580-d929-11eb-92e2-892bf9501feb.png) ### Expected result <!-- What should the correct behavior be? --> the two build image steps should have no dependency to each other ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5944/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5944/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5943
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5943/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5943/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5943/events
https://github.com/kubeflow/pipelines/issues/5943
932,530,011
MDU6SXNzdWU5MzI1MzAwMTE=
5,943
[bug] pipelines flaky on GKE after upgrading to argo v3.1+ docker executor
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619531, "node_id": "MDU6TGFiZWw5MzA2MTk1MzE=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/platform/gcp", "name": "platform/gcp", "color": "2515fc", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Downgraded to KFP 1.6.0 and verified the same problems, they seem to be related to my GKE cluster than argo/kfp.", "I'm seeing this again on test infra:\r\nhttps://4e18c21c9d33d20f-dot-datalab-vm-staging.googleusercontent.com/#/runs/details/d249c419-a9a7-4653-8212-856f87ad1e91\r\nhttps://4e18c21c9d33d20f-dot-datalab-vm-staging.googleusercontent.com/#/runs/details/cb35184f-60a5-44e1-832d-6ec1d7866c98\r\n\r\nagain on\r\n> This step is in Error state with this message: Error (exit code 1): path /tmp/outputs/Output/data does not exist in archive /tmp/argo/outputs/artifacts/output-artifact-Output.tgz\r\n\r\nhttps://4e18c21c9d33d20f-dot-datalab-vm-staging.googleusercontent.com/#/runs/details/dfb5fe83-dd21-4a26-b494-c9382e700de5", "Restarted nodepools and the issue is temporarily resolved", "Just saw a different type of error:\r\nhttps://github.com/kubeflow/pipelines/pull/5970#issuecomment-876941351\r\n\r\n> F0709 05:29:17.078200 1 main.go:56] Failed to execute component: failed to download input artifact \"input_dir\" from remote storage URI \"gs://kfp-ci/4ca1dd2d702e106386af42120c73df46ed8c0cc5/v2-sample-test/data/samples_config-loop-item/dir-pipeline-v2/dir-pipeline-v2-hd2tw/produce-dir-with-files-v2-python-op/output_dir\": failed to list objects in remote storage \"dir-pipeline-v2/dir-pipeline-v2-hd2tw/produce-dir-with-files-v2-python-op/output_dir\": blob (code=Unknown): Get \"", "Decided to stop working on this issue, because the problems proves to be issues in argo docker executor itself.\r\nOur plan has been upgrading to emissary executor, so there's not much value stabilizing argo docker executor if it isn't stable already." ]
2021-06-29T11:33:27
2021-08-04T13:55:42
2021-08-04T13:55:42
CONTRIBUTOR
null
### What steps did you take <!-- A clear and concise description of what the bug is.--> pipelines flaky after upgrading to argo v3.1.0, when using argo docker executor 1. install kfp 1.7.0-alpha.1 2. run v2 sample tests in the local cluster ### What happened: Some pipelines fail randomly with several types of error: `This step is in Error state with this message: Error (exit code 1): path /tmp/outputs/metrics/data does not exist in archive /tmp/argo/outputs/artifacts/output-named-tuple-metrics.tgz` `This step is in Error state with this message: Error (exit code 1): Error: No such container:path: 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5:/tmp/outputs/sum/data tar: This does not look like a tar archive tar: Exiting with failure status due to previous errors` https://1bbe723ceaf1ede1-dot-asia-east1.pipelines.googleusercontent.com/#/runs/details/da866240-98f4-42d1-86e1-2c6b17a7b944 `This step is in Error state with this message: Error (exit code 1): failed to wait for main container to complete: timed out waiting for the condition: Error response from daemon: No such container: 2723a1dcd93b23f100a96bd82480b8917b8f805827d41e1f70f515655cb1d9e1` https://1bbe723ceaf1ede1-dot-asia-east1.pipelines.googleusercontent.com/#/runs/details/1f5a60a1-885a-4de5-8222-d906e92130bc Both type of errors have very similar logs in argo wait container: ``` time="2021-06-29T11:14:29.706Z" level=info msg="Starting annotations monitor" time="2021-06-29T11:14:29.706Z" level=info msg="Starting deadline monitor" time="2021-06-29T11:14:29.707Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:29.790Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Created {0 63760562057 <nil>}}]" time="2021-06-29T11:14:29.790Z" level=info msg="mapped container name \"wait\" to container ID \"1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431\" (created at 2021-06-29 11:14:17 +0000 UTC, status Created)" time="2021-06-29T11:14:29.790Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:30.791Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:30.862Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:30.862Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:31.862Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:31.990Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:31.990Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:32.990Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:33.067Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:33.067Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:34.067Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:34.158Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:34.158Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:35.159Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:35.286Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:35.286Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:36.286Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:36.359Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:36.359Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:37.360Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:37.535Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:37.535Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:38.535Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:38.598Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:38.598Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:39.598Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:39.661Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:39.661Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:40.661Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:40.782Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:40.782Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:41.782Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:41.877Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:41.877Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:41.877Z" level=info msg="mapped container name \"main\" to container ID \"32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5\" (created at 2021-06-29 11:14:32 +0000 UTC, status Created)" time="2021-06-29T11:14:42.710Z" level=info msg="docker wait 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5" time="2021-06-29T11:14:42.768Z" level=error msg="`docker wait 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5` failed: Error response from daemon: No such container: 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5\n" time="2021-06-29T11:14:42.877Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:42.935Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:42.935Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:43.935Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:44.012Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:44.012Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:44.071Z" level=info msg="docker wait 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5" time="2021-06-29T11:14:44.071Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:44.265Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:44.265Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:44.302Z" level=info msg="Main container completed" time="2021-06-29T11:14:44.302Z" level=info msg="No Script output reference in workflow. Capturing script output ignored" time="2021-06-29T11:14:44.302Z" level=info msg="Saving logs" time="2021-06-29T11:14:44.303Z" level=info msg="[docker logs 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5]" time="2021-06-29T11:14:45.013Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:45.112Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:45.112Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:45.268Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:45.423Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:45.423Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:46.113Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:46.167Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:46.168Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:46.423Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:46.589Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Created {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:46.589Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:46.878Z" level=info msg="S3 Save path: /tmp/argo/outputs/logs/main.log, key: artifacts/add-pipeline-fz5cn/2021/06/29/add-pipeline-fz5cn-2565795640/main.log" time="2021-06-29T11:14:46.878Z" level=info msg="Creating minio client minio-service.kubeflow:9000 using static credentials" time="2021-06-29T11:14:46.879Z" level=info msg="Saving from /tmp/argo/outputs/logs/main.log to s3 (endpoint: minio-service.kubeflow:9000, bucket: mlpipeline, key: artifacts/add-pipeline-fz5cn/2021/06/29/add-pipeline-fz5cn-2565795640/main.log)" time="2021-06-29T11:14:47.168Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:47.259Z" level=info msg="not deleting local artifact" localArtPath=/tmp/argo/outputs/logs/main.log time="2021-06-29T11:14:47.259Z" level=info msg="Successfully saved file: /tmp/argo/outputs/logs/main.log" time="2021-06-29T11:14:47.259Z" level=info msg="Saving output parameters" time="2021-06-29T11:14:47.259Z" level=info msg="Saving path output parameter: add-2-sum" time="2021-06-29T11:14:47.259Z" level=info msg="Copying /tmp/outputs/sum/data from base image layer" time="2021-06-29T11:14:47.259Z" level=info msg="sh -c docker cp -a 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5:/tmp/outputs/sum/data - | tar -ax -O" time="2021-06-29T11:14:47.284Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Up {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:47.284Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:47.413Z" level=error msg="`sh -c docker cp -a 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5:/tmp/outputs/sum/data - | tar -ax -O` failed: Error: No such container:path: 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5:/tmp/outputs/sum/data\ntar: This does not look like a tar archive\ntar: Exiting with failure status due to previous errors\n" time="2021-06-29T11:14:47.413Z" level=error msg="executor error: Error: No such container:path: 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5:/tmp/outputs/sum/data\ntar: This does not look like a tar archive\ntar: Exiting with failure status due to previous errors\ngithub.com/argoproj/argo-workflows/v3/errors.New\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:49\ngithub.com/argoproj/argo-workflows/v3/errors.InternalError\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:60\ngithub.com/argoproj/argo-workflows/v3/workflow/common.RunCommand\n\t/go/src/github.com/argoproj/argo-workflows/workflow/common/util.go:238\ngithub.com/argoproj/argo-workflows/v3/workflow/common.RunShellCommand\n\t/go/src/github.com/argoproj/argo-workflows/workflow/common/util.go:255\ngithub.com/argoproj/argo-workflows/v3/workflow/executor/docker.(*DockerExecutor).GetFileContents\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/docker/docker.go:58\ngithub.com/argoproj/argo-workflows/v3/workflow/executor.(*WorkflowExecutor).SaveParameters\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/executor.go:492\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.waitContainer\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:57\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.NewWaitCommand.func1\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:18\ngithub.com/spf13/cobra.(*Command).execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950\ngithub.com/spf13/cobra.(*Command).Execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887\nmain.main\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/main.go:15\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:204\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1374" time="2021-06-29T11:14:47.413Z" level=info msg="Saving output artifacts" time="2021-06-29T11:14:47.414Z" level=info msg="Staging artifact: add-2-sum" time="2021-06-29T11:14:47.414Z" level=info msg="Copying /tmp/outputs/sum/data from container base image layer to /tmp/argo/outputs/artifacts/add-2-sum.tgz" time="2021-06-29T11:14:47.414Z" level=info msg="Archiving main:/tmp/outputs/sum/data to /tmp/argo/outputs/artifacts/add-2-sum.tgz" time="2021-06-29T11:14:47.414Z" level=info msg="sh -c docker cp -a 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5:/tmp/outputs/sum/data - | gzip > /tmp/argo/outputs/artifacts/add-2-sum.tgz" time="2021-06-29T11:14:47.589Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:47.680Z" level=warning msg="path /tmp/outputs/sum/data does not exist in archive /tmp/argo/outputs/artifacts/add-2-sum.tgz" time="2021-06-29T11:14:47.680Z" level=error msg="executor error: path /tmp/outputs/sum/data does not exist in archive /tmp/argo/outputs/artifacts/add-2-sum.tgz\ngithub.com/argoproj/argo-workflows/v3/errors.New\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:49\ngithub.com/argoproj/argo-workflows/v3/errors.Errorf\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:55\ngithub.com/argoproj/argo-workflows/v3/workflow/executor/docker.(*DockerExecutor).CopyFile\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/docker/docker.go:88\ngithub.com/argoproj/argo-workflows/v3/workflow/executor.(*WorkflowExecutor).stageArchiveFile\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/executor.go:402\ngithub.com/argoproj/argo-workflows/v3/workflow/executor.(*WorkflowExecutor).saveArtifact\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/executor.go:290\ngithub.com/argoproj/argo-workflows/v3/workflow/executor.(*WorkflowExecutor).SaveArtifacts\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/executor.go:276\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.waitContainer\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:62\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.NewWaitCommand.func1\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:18\ngithub.com/spf13/cobra.(*Command).execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950\ngithub.com/spf13/cobra.(*Command).Execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887\nmain.main\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/main.go:15\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:204\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1374" time="2021-06-29T11:14:47.681Z" level=info msg="Annotating pod with output" time="2021-06-29T11:14:47.694Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Up {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:47.694Z" level=info msg="ignoring container \"kfp-launcher\" created at 2021-06-29 11:13:20 +0000 UTC, too long before process started" time="2021-06-29T11:14:47.739Z" level=info msg="Patch pods 200" time="2021-06-29T11:14:47.898Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow --filter=label=io.kubernetes.pod.name=add-pipeline-fz5cn-2565795640" time="2021-06-29T11:14:48.025Z" level=info msg="listed containers" containers="map[kfp-launcher:{7a39cf1866758e4c53dd138c38f90b2a683ae0059a3ecc2ea0b439a28dbb38c1 Exited {0 63760562000 <nil>}} main:{32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5 Up {0 63760562072 <nil>}} wait:{1531d46bf944fcdb3ac8ea356e0536bb2a01ba9791e9007699f6d195401fc431 Up {0 63760562057 <nil>}}]" time="2021-06-29T11:14:48.025Z" level=info msg="Killing sidecars [\"kfp-launcher\"]" time="2021-06-29T11:14:48.032Z" level=info msg="Get pods 200" time="2021-06-29T11:14:48.034Z" level=info msg="zero container IDs, assuming all containers have exited successfully" time="2021-06-29T11:14:48.034Z" level=info msg="Alloc=10350 TotalAlloc=17307 Sys=73297 NumGC=5 Goroutines=13" time="2021-06-29T11:14:48.173Z" level=fatal msg="Error: No such container:path: 32b49d8ac659f4e77ec768bd22ca38cfa97abd2006a185a4cce5c7d4a4f418f5:/tmp/outputs/sum/data\ntar: This does not look like a tar archive\ntar: Exiting with failure status due to previous errors\ngithub.com/argoproj/argo-workflows/v3/errors.New\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:49\ngithub.com/argoproj/argo-workflows/v3/errors.InternalError\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:60\ngithub.com/argoproj/argo-workflows/v3/workflow/common.RunCommand\n\t/go/src/github.com/argoproj/argo-workflows/workflow/common/util.go:238\ngithub.com/argoproj/argo-workflows/v3/workflow/common.RunShellCommand\n\t/go/src/github.com/argoproj/argo-workflows/workflow/common/util.go:255\ngithub.com/argoproj/argo-workflows/v3/workflow/executor/docker.(*DockerExecutor).GetFileContents\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/docker/docker.go:58\ngithub.com/argoproj/argo-workflows/v3/workflow/executor.(*WorkflowExecutor).SaveParameters\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/executor.go:492\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.waitContainer\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:57\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.NewWaitCommand.func1\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:18\ngithub.com/spf13/cobra.(*Command).execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950\ngithub.com/spf13/cobra.(*Command).Execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887\nmain.main\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/main.go:15\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:204\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1374" ``` I did some investigation, `/tmp/outputs/metrics/data` is path in the container where output artifacts are expected to be emitted. The archive path is an implementation detail of docker executor, so we can ignore it here. To verify, we may need to confirm whether inside the container the artifact was correctly generated. ### What did you expect to happen: Pipelines run successfully ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5943/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5943/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5942
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5942/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5942/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5942/events
https://github.com/kubeflow/pipelines/issues/5942
932,478,646
MDU6SXNzdWU5MzI0Nzg2NDY=
5,942
[pH] add PR title conventional format check
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Some mistakes I see often:\r\n* `feat(scope) title` (miss `:`)\r\n* `feat(scope):title` (miss space after `:`)\r\n* `feat (scope): title` (extra space between feat and (scope))\r\n\r\nthey can be identified by the bot that checks PR title as semantic format." ]
2021-06-29T10:38:58
2021-06-30T07:02:36
2021-06-30T07:02:36
CONTRIBUTOR
null
I just learned that https://github.com/zeke/semantic-pull-requests can check PR title format. It looks convenient to set up. This issue is the last item of https://github.com/kubeflow/pipelines/issues/3920#issuecomment-645280671
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5942/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5942/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5941
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5941/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5941/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5941/events
https://github.com/kubeflow/pipelines/issues/5941
932,392,708
MDU6SXNzdWU5MzIzOTI3MDg=
5,941
[backend & dsl] use argo workflows v1 API
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "I made a mistake, the change was that argo upgraded to CRD version v1.\r\nBut argo CRDs are still of version v1alpha1." ]
2021-06-29T09:03:05
2021-06-29T11:08:01
2021-06-29T11:08:01
CONTRIBUTOR
null
After upgrading to argo workflows v3.1.0, the workflow CRD has upgraded to v1. We should upgrade our backend API usages and SDK compilation targets to v1.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5941/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5941/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5940
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5940/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5940/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5940/events
https://github.com/kubeflow/pipelines/issues/5940
932,382,227
MDU6SXNzdWU5MzIzODIyMjc=
5,940
[test infra] api server has frequent "Failed to update run" error logs
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "There is no visible problems in test infra, but reducing noisy error message is still vastly better when trying to debug and find the actual error message.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "I got the same error when updating one of my pipelines.\r\n\r\nNow I am not able to update or delete the pipeline.\r\n", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-06-29T08:51:16
2022-03-02T15:05:32
null
CONTRIBUTOR
null
> /go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30 github.com/kubeflow/pipelines/backend/api/go_client._ReportService_ReportWorkflow_Handler InternalServerError: Failed to update run a7a0c80b-e863-45de-ad4b-a8320c64b425. Row not found Not sure why this happens, need to confirm whether there are any leaked workflows.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5940/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5940/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5937
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5937/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5937/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5937/events
https://github.com/kubeflow/pipelines/issues/5937
932,142,684
MDU6SXNzdWU5MzIxNDI2ODQ=
5,937
[bug] condition sample stuck in running state with argo v3.1.0
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619511, "node_id": "MDU6TGFiZWw5MzA2MTk1MTE=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/priority/p0", "name": "priority/p0", "color": "db1203", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false } ]
null
[ "I tried to delete cache db and run the condition pipeline again without cache, but it is stuck in running state again.\r\n![image](https://user-images.githubusercontent.com/4957653/123820962-931f7480-d92d-11eb-83a5-ab1e346485a5.png)\r\n\r\nConclusion: we can rule out caching from possible reasons for this.", "```\r\napiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n annotations:\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline_compilation_time: 2021-06-29T15:04:57.873529\r\n pipelines.kubeflow.org/pipeline_spec: '{\"inputs\": [{\"default\": \"condition test\",\r\n \"name\": \"text\", \"optional\": true, \"type\": \"String\"}, {\"default\": \"\", \"name\":\r\n \"force_flip_result\", \"optional\": true, \"type\": \"String\"}], \"name\": \"single-condition-pipeline\"}'\r\n pipelines.kubeflow.org/run_name: my_pipeline 2021-06-29 15-04-57\r\n creationTimestamp: \"2021-06-29T15:05:23Z\"\r\n generateName: single-condition-pipeline-\r\n generation: 5\r\n labels:\r\n pipeline/runid: 2f797483-ea0f-47d5-ba09-4eb19fa1e271\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n workflows.argoproj.io/phase: Running\r\n manager: workflow-controller\r\n operation: Update\r\n time: \"2021-06-29T15:05:53Z\"\r\n name: single-condition-pipeline-cmn6x\r\n namespace: kubeflow\r\n resourceVersion: \"30785245\"\r\n selfLink: /apis/argoproj.io/v1alpha1/namespaces/kubeflow/workflows/single-condition-pipeline-cmn6x\r\n uid: 44151d24-2ac8-4a05-b28f-66e556901f89\r\nspec:\r\n arguments:\r\n parameters:\r\n - name: text\r\n value: condition test\r\n - name: force_flip_result\r\n value: heads\r\n entrypoint: single-condition-pipeline\r\n serviceAccountName: pipeline-runner\r\n templates:\r\n - dag:\r\n tasks:\r\n - arguments: {}\r\n name: flip-coin-2\r\n template: flip-coin-2\r\n - arguments:\r\n parameters:\r\n - name: flip-coin-2-Output\r\n value: '{{tasks.flip-coin-2.outputs.parameters.flip-coin-2-Output}}'\r\n dependencies:\r\n - flip-coin-2\r\n name: print-msg-2\r\n template: print-msg-2\r\n - arguments:\r\n parameters:\r\n - name: text\r\n value: '{{inputs.parameters.text}}'\r\n name: print-msg-3\r\n template: print-msg-3\r\n inputs:\r\n parameters:\r\n - name: text\r\n metadata:\r\n annotations:\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n name: condition-1\r\n outputs: {}\r\n - container:\r\n args:\r\n - --force-flip-result\r\n - '{{inputs.parameters.force_flip_result}}'\r\n - '----output-paths'\r\n - /tmp/outputs/Output/data\r\n command:\r\n - sh\r\n - -ec\r\n - |\r\n program_path=$(mktemp)\r\n printf \"%s\" \"$0\" > \"$program_path\"\r\n python3 -u \"$program_path\" \"$@\"\r\n - |\r\n def flip_coin(force_flip_result = ''):\r\n \"\"\"Flip a coin and output heads or tails randomly.\"\"\"\r\n if force_flip_result:\r\n return force_flip_result\r\n import random\r\n result = 'heads' if random.randint(0, 1) == 0 else 'tails'\r\n return result\r\n\r\n def _serialize_str(str_value: str) -> str:\r\n if not isinstance(str_value, str):\r\n raise TypeError('Value \"{}\" has type \"{}\" instead of str.'.format(str(str_value), str(type(str_value))))\r\n return str_value\r\n\r\n import argparse\r\n _parser = argparse.ArgumentParser(prog='Flip coin', description='Flip a coin and output heads or tails randomly.')\r\n _parser.add_argument(\"--force-flip-result\", dest=\"force_flip_result\", type=str, required=False, default=argparse.SUPPRESS)\r\n _parser.add_argument(\"----output-paths\", dest=\"_output_paths\", type=str, nargs=1)\r\n _parsed_args = vars(_parser.parse_args())\r\n _output_files = _parsed_args.pop(\"_output_paths\", [])\r\n\r\n _outputs = flip_coin(**_parsed_args)\r\n\r\n _outputs = [_outputs]\r\n\r\n _output_serializers = [\r\n _serialize_str,\r\n\r\n ]\r\n\r\n import os\r\n for idx, output_file in enumerate(_output_files):\r\n try:\r\n os.makedirs(os.path.dirname(output_file))\r\n except OSError:\r\n pass\r\n with open(output_file, 'w') as f:\r\n f.write(_output_serializers[idx](_outputs[idx]))\r\n image: python:3.7\r\n name: \"\"\r\n resources: {}\r\n inputs:\r\n parameters:\r\n - name: force_flip_result\r\n metadata:\r\n annotations:\r\n pipelines.kubeflow.org/arguments.parameters: '{\"force_flip_result\": \"{{inputs.parameters.force_flip_result}}\"}'\r\n pipelines.kubeflow.org/component_ref: '{}'\r\n pipelines.kubeflow.org/component_spec: '{\"description\": \"Flip a coin and output\r\n heads or tails randomly.\", \"implementation\": {\"container\": {\"args\": [{\"if\":\r\n {\"cond\": {\"isPresent\": \"force_flip_result\"}, \"then\": [\"--force-flip-result\",\r\n {\"inputValue\": \"force_flip_result\"}]}}, \"----output-paths\", {\"outputPath\":\r\n \"Output\"}], \"command\": [\"sh\", \"-ec\", \"program_path=$(mktemp)\\nprintf \\\"%s\\\"\r\n \\\"$0\\\" > \\\"$program_path\\\"\\npython3 -u \\\"$program_path\\\" \\\"$@\\\"\\n\", \"def\r\n flip_coin(force_flip_result = ''''):\\n \\\"\\\"\\\"Flip a coin and output heads\r\n or tails randomly.\\\"\\\"\\\"\\n if force_flip_result:\\n return force_flip_result\\n import\r\n random\\n result = ''heads'' if random.randint(0, 1) == 0 else ''tails''\\n return\r\n result\\n\\ndef _serialize_str(str_value: str) -> str:\\n if not isinstance(str_value,\r\n str):\\n raise TypeError(''Value \\\"{}\\\" has type \\\"{}\\\" instead of\r\n str.''.format(str(str_value), str(type(str_value))))\\n return str_value\\n\\nimport\r\n argparse\\n_parser = argparse.ArgumentParser(prog=''Flip coin'', description=''Flip\r\n a coin and output heads or tails randomly.'')\\n_parser.add_argument(\\\"--force-flip-result\\\",\r\n dest=\\\"force_flip_result\\\", type=str, required=False, default=argparse.SUPPRESS)\\n_parser.add_argument(\\\"----output-paths\\\",\r\n dest=\\\"_output_paths\\\", type=str, nargs=1)\\n_parsed_args = vars(_parser.parse_args())\\n_output_files\r\n = _parsed_args.pop(\\\"_output_paths\\\", [])\\n\\n_outputs = flip_coin(**_parsed_args)\\n\\n_outputs\r\n = [_outputs]\\n\\n_output_serializers = [\\n _serialize_str,\\n\\n]\\n\\nimport\r\n os\\nfor idx, output_file in enumerate(_output_files):\\n try:\\n os.makedirs(os.path.dirname(output_file))\\n except\r\n OSError:\\n pass\\n with open(output_file, ''w'') as f:\\n f.write(_output_serializers[idx](_outputs[idx]))\\n\"],\r\n \"image\": \"python:3.7\"}}, \"inputs\": [{\"default\": \"\", \"name\": \"force_flip_result\",\r\n \"optional\": true, \"type\": \"String\"}], \"name\": \"Flip coin\", \"outputs\": [{\"name\":\r\n \"Output\", \"type\": \"String\"}]}'\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline-sdk-type: kfp\r\n name: flip-coin\r\n outputs:\r\n artifacts:\r\n - name: flip-coin-Output\r\n path: /tmp/outputs/Output/data\r\n parameters:\r\n - name: flip-coin-Output\r\n valueFrom:\r\n path: /tmp/outputs/Output/data\r\n - container:\r\n args:\r\n - --force-flip-result\r\n - \"\"\r\n - '----output-paths'\r\n - /tmp/outputs/Output/data\r\n command:\r\n - sh\r\n - -ec\r\n - |\r\n program_path=$(mktemp)\r\n printf \"%s\" \"$0\" > \"$program_path\"\r\n python3 -u \"$program_path\" \"$@\"\r\n - |\r\n def flip_coin(force_flip_result = ''):\r\n \"\"\"Flip a coin and output heads or tails randomly.\"\"\"\r\n if force_flip_result:\r\n return force_flip_result\r\n import random\r\n result = 'heads' if random.randint(0, 1) == 0 else 'tails'\r\n return result\r\n\r\n def _serialize_str(str_value: str) -> str:\r\n if not isinstance(str_value, str):\r\n raise TypeError('Value \"{}\" has type \"{}\" instead of str.'.format(str(str_value), str(type(str_value))))\r\n return str_value\r\n\r\n import argparse\r\n _parser = argparse.ArgumentParser(prog='Flip coin', description='Flip a coin and output heads or tails randomly.')\r\n _parser.add_argument(\"--force-flip-result\", dest=\"force_flip_result\", type=str, required=False, default=argparse.SUPPRESS)\r\n _parser.add_argument(\"----output-paths\", dest=\"_output_paths\", type=str, nargs=1)\r\n _parsed_args = vars(_parser.parse_args())\r\n _output_files = _parsed_args.pop(\"_output_paths\", [])\r\n\r\n _outputs = flip_coin(**_parsed_args)\r\n\r\n _outputs = [_outputs]\r\n\r\n _output_serializers = [\r\n _serialize_str,\r\n\r\n ]\r\n\r\n import os\r\n for idx, output_file in enumerate(_output_files):\r\n try:\r\n os.makedirs(os.path.dirname(output_file))\r\n except OSError:\r\n pass\r\n with open(output_file, 'w') as f:\r\n f.write(_output_serializers[idx](_outputs[idx]))\r\n image: python:3.7\r\n name: \"\"\r\n resources: {}\r\n inputs: {}\r\n metadata:\r\n annotations:\r\n pipelines.kubeflow.org/arguments.parameters: '{\"force_flip_result\": \"\"}'\r\n pipelines.kubeflow.org/component_ref: '{}'\r\n pipelines.kubeflow.org/component_spec: '{\"description\": \"Flip a coin and output\r\n heads or tails randomly.\", \"implementation\": {\"container\": {\"args\": [{\"if\":\r\n {\"cond\": {\"isPresent\": \"force_flip_result\"}, \"then\": [\"--force-flip-result\",\r\n {\"inputValue\": \"force_flip_result\"}]}}, \"----output-paths\", {\"outputPath\":\r\n \"Output\"}], \"command\": [\"sh\", \"-ec\", \"program_path=$(mktemp)\\nprintf \\\"%s\\\"\r\n \\\"$0\\\" > \\\"$program_path\\\"\\npython3 -u \\\"$program_path\\\" \\\"$@\\\"\\n\", \"def\r\n flip_coin(force_flip_result = ''''):\\n \\\"\\\"\\\"Flip a coin and output heads\r\n or tails randomly.\\\"\\\"\\\"\\n if force_flip_result:\\n return force_flip_result\\n import\r\n random\\n result = ''heads'' if random.randint(0, 1) == 0 else ''tails''\\n return\r\n result\\n\\ndef _serialize_str(str_value: str) -> str:\\n if not isinstance(str_value,\r\n str):\\n raise TypeError(''Value \\\"{}\\\" has type \\\"{}\\\" instead of\r\n str.''.format(str(str_value), str(type(str_value))))\\n return str_value\\n\\nimport\r\n argparse\\n_parser = argparse.ArgumentParser(prog=''Flip coin'', description=''Flip\r\n a coin and output heads or tails randomly.'')\\n_parser.add_argument(\\\"--force-flip-result\\\",\r\n dest=\\\"force_flip_result\\\", type=str, required=False, default=argparse.SUPPRESS)\\n_parser.add_argument(\\\"----output-paths\\\",\r\n dest=\\\"_output_paths\\\", type=str, nargs=1)\\n_parsed_args = vars(_parser.parse_args())\\n_output_files\r\n = _parsed_args.pop(\\\"_output_paths\\\", [])\\n\\n_outputs = flip_coin(**_parsed_args)\\n\\n_outputs\r\n = [_outputs]\\n\\n_output_serializers = [\\n _serialize_str,\\n\\n]\\n\\nimport\r\n os\\nfor idx, output_file in enumerate(_output_files):\\n try:\\n os.makedirs(os.path.dirname(output_file))\\n except\r\n OSError:\\n pass\\n with open(output_file, ''w'') as f:\\n f.write(_output_serializers[idx](_outputs[idx]))\\n\"],\r\n \"image\": \"python:3.7\"}}, \"inputs\": [{\"default\": \"\", \"name\": \"force_flip_result\",\r\n \"optional\": true, \"type\": \"String\"}], \"name\": \"Flip coin\", \"outputs\": [{\"name\":\r\n \"Output\", \"type\": \"String\"}]}'\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline-sdk-type: kfp\r\n name: flip-coin-2\r\n outputs:\r\n artifacts:\r\n - name: flip-coin-2-Output\r\n path: /tmp/outputs/Output/data\r\n parameters:\r\n - name: flip-coin-2-Output\r\n valueFrom:\r\n path: /tmp/outputs/Output/data\r\n - container:\r\n args:\r\n - --msg\r\n - '{{inputs.parameters.flip-coin-Output}}'\r\n command:\r\n - sh\r\n - -ec\r\n - |\r\n program_path=$(mktemp)\r\n printf \"%s\" \"$0\" > \"$program_path\"\r\n python3 -u \"$program_path\" \"$@\"\r\n - |\r\n def print_msg(msg):\r\n \"\"\"Print a message.\"\"\"\r\n print(msg)\r\n\r\n import argparse\r\n _parser = argparse.ArgumentParser(prog='Print msg', description='Print a message.')\r\n _parser.add_argument(\"--msg\", dest=\"msg\", type=str, required=True, default=argparse.SUPPRESS)\r\n _parsed_args = vars(_parser.parse_args())\r\n\r\n _outputs = print_msg(**_parsed_args)\r\n image: python:3.7\r\n name: \"\"\r\n resources: {}\r\n inputs:\r\n parameters:\r\n - name: flip-coin-Output\r\n metadata:\r\n annotations:\r\n pipelines.kubeflow.org/arguments.parameters: '{\"msg\": \"{{inputs.parameters.flip-coin-Output}}\"}'\r\n pipelines.kubeflow.org/component_ref: '{}'\r\n pipelines.kubeflow.org/component_spec: '{\"description\": \"Print a message.\",\r\n \"implementation\": {\"container\": {\"args\": [\"--msg\", {\"inputValue\": \"msg\"}],\r\n \"command\": [\"sh\", \"-ec\", \"program_path=$(mktemp)\\nprintf \\\"%s\\\" \\\"$0\\\" >\r\n \\\"$program_path\\\"\\npython3 -u \\\"$program_path\\\" \\\"$@\\\"\\n\", \"def print_msg(msg):\\n \\\"\\\"\\\"Print\r\n a message.\\\"\\\"\\\"\\n print(msg)\\n\\nimport argparse\\n_parser = argparse.ArgumentParser(prog=''Print\r\n msg'', description=''Print a message.'')\\n_parser.add_argument(\\\"--msg\\\",\r\n dest=\\\"msg\\\", type=str, required=True, default=argparse.SUPPRESS)\\n_parsed_args\r\n = vars(_parser.parse_args())\\n\\n_outputs = print_msg(**_parsed_args)\\n\"],\r\n \"image\": \"python:3.7\"}}, \"inputs\": [{\"name\": \"msg\", \"type\": \"String\"}],\r\n \"name\": \"Print msg\"}'\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline-sdk-type: kfp\r\n name: print-msg\r\n outputs: {}\r\n - container:\r\n args:\r\n - --msg\r\n - '{{inputs.parameters.flip-coin-2-Output}}'\r\n command:\r\n - sh\r\n - -ec\r\n - |\r\n program_path=$(mktemp)\r\n printf \"%s\" \"$0\" > \"$program_path\"\r\n python3 -u \"$program_path\" \"$@\"\r\n - |\r\n def print_msg(msg):\r\n \"\"\"Print a message.\"\"\"\r\n print(msg)\r\n\r\n import argparse\r\n _parser = argparse.ArgumentParser(prog='Print msg', description='Print a message.')\r\n _parser.add_argument(\"--msg\", dest=\"msg\", type=str, required=True, default=argparse.SUPPRESS)\r\n _parsed_args = vars(_parser.parse_args())\r\n\r\n _outputs = print_msg(**_parsed_args)\r\n image: python:3.7\r\n name: \"\"\r\n resources: {}\r\n inputs:\r\n parameters:\r\n - name: flip-coin-2-Output\r\n metadata:\r\n annotations:\r\n pipelines.kubeflow.org/arguments.parameters: '{\"msg\": \"{{inputs.parameters.flip-coin-2-Output}}\"}'\r\n pipelines.kubeflow.org/component_ref: '{}'\r\n pipelines.kubeflow.org/component_spec: '{\"description\": \"Print a message.\",\r\n \"implementation\": {\"container\": {\"args\": [\"--msg\", {\"inputValue\": \"msg\"}],\r\n \"command\": [\"sh\", \"-ec\", \"program_path=$(mktemp)\\nprintf \\\"%s\\\" \\\"$0\\\" >\r\n \\\"$program_path\\\"\\npython3 -u \\\"$program_path\\\" \\\"$@\\\"\\n\", \"def print_msg(msg):\\n \\\"\\\"\\\"Print\r\n a message.\\\"\\\"\\\"\\n print(msg)\\n\\nimport argparse\\n_parser = argparse.ArgumentParser(prog=''Print\r\n msg'', description=''Print a message.'')\\n_parser.add_argument(\\\"--msg\\\",\r\n dest=\\\"msg\\\", type=str, required=True, default=argparse.SUPPRESS)\\n_parsed_args\r\n = vars(_parser.parse_args())\\n\\n_outputs = print_msg(**_parsed_args)\\n\"],\r\n \"image\": \"python:3.7\"}}, \"inputs\": [{\"name\": \"msg\", \"type\": \"String\"}],\r\n \"name\": \"Print msg\"}'\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline-sdk-type: kfp\r\n name: print-msg-2\r\n outputs: {}\r\n - container:\r\n args:\r\n - --msg\r\n - '{{inputs.parameters.text}}'\r\n command:\r\n - sh\r\n - -ec\r\n - |\r\n program_path=$(mktemp)\r\n printf \"%s\" \"$0\" > \"$program_path\"\r\n python3 -u \"$program_path\" \"$@\"\r\n - |\r\n def print_msg(msg):\r\n \"\"\"Print a message.\"\"\"\r\n print(msg)\r\n\r\n import argparse\r\n _parser = argparse.ArgumentParser(prog='Print msg', description='Print a message.')\r\n _parser.add_argument(\"--msg\", dest=\"msg\", type=str, required=True, default=argparse.SUPPRESS)\r\n _parsed_args = vars(_parser.parse_args())\r\n\r\n _outputs = print_msg(**_parsed_args)\r\n image: python:3.7\r\n name: \"\"\r\n resources: {}\r\n inputs:\r\n parameters:\r\n - name: text\r\n metadata:\r\n annotations:\r\n pipelines.kubeflow.org/arguments.parameters: '{\"msg\": \"{{inputs.parameters.text}}\"}'\r\n pipelines.kubeflow.org/component_ref: '{}'\r\n pipelines.kubeflow.org/component_spec: '{\"description\": \"Print a message.\",\r\n \"implementation\": {\"container\": {\"args\": [\"--msg\", {\"inputValue\": \"msg\"}],\r\n \"command\": [\"sh\", \"-ec\", \"program_path=$(mktemp)\\nprintf \\\"%s\\\" \\\"$0\\\" >\r\n \\\"$program_path\\\"\\npython3 -u \\\"$program_path\\\" \\\"$@\\\"\\n\", \"def print_msg(msg):\\n \\\"\\\"\\\"Print\r\n a message.\\\"\\\"\\\"\\n print(msg)\\n\\nimport argparse\\n_parser = argparse.ArgumentParser(prog=''Print\r\n msg'', description=''Print a message.'')\\n_parser.add_argument(\\\"--msg\\\",\r\n dest=\\\"msg\\\", type=str, required=True, default=argparse.SUPPRESS)\\n_parsed_args\r\n = vars(_parser.parse_args())\\n\\n_outputs = print_msg(**_parsed_args)\\n\"],\r\n \"image\": \"python:3.7\"}}, \"inputs\": [{\"name\": \"msg\", \"type\": \"String\"}],\r\n \"name\": \"Print msg\"}'\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline-sdk-type: kfp\r\n name: print-msg-3\r\n outputs: {}\r\n - dag:\r\n tasks:\r\n - arguments:\r\n parameters:\r\n - name: text\r\n value: '{{inputs.parameters.text}}'\r\n dependencies:\r\n - flip-coin\r\n name: condition-1\r\n template: condition-1\r\n when: '\"{{tasks.flip-coin.outputs.parameters.flip-coin-Output}}\" == \"heads\"'\r\n - arguments:\r\n parameters:\r\n - name: force_flip_result\r\n value: '{{inputs.parameters.force_flip_result}}'\r\n name: flip-coin\r\n template: flip-coin\r\n - arguments:\r\n parameters:\r\n - name: flip-coin-Output\r\n value: '{{tasks.flip-coin.outputs.parameters.flip-coin-Output}}'\r\n dependencies:\r\n - flip-coin\r\n name: print-msg\r\n template: print-msg\r\n inputs:\r\n parameters:\r\n - name: force_flip_result\r\n - name: text\r\n metadata:\r\n annotations:\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n name: single-condition-pipeline\r\n outputs: {}\r\nstatus:\r\n artifactRepositoryRef:\r\n default: true\r\n conditions:\r\n - status: \"False\"\r\n type: PodRunning\r\n finishedAt: null\r\n nodes:\r\n single-condition-pipeline-cmn6x:\r\n children:\r\n - single-condition-pipeline-cmn6x-511824490\r\n displayName: single-condition-pipeline-cmn6x\r\n finishedAt: null\r\n id: single-condition-pipeline-cmn6x\r\n inputs:\r\n parameters:\r\n - name: force_flip_result\r\n value: heads\r\n - name: text\r\n value: condition test\r\n name: single-condition-pipeline-cmn6x\r\n phase: Running\r\n progress: 4/4\r\n startedAt: \"2021-06-29T15:05:23Z\"\r\n templateName: single-condition-pipeline\r\n templateScope: local/single-condition-pipeline-cmn6x\r\n type: DAG\r\n single-condition-pipeline-cmn6x-511824490:\r\n boundaryID: single-condition-pipeline-cmn6x\r\n children:\r\n - single-condition-pipeline-cmn6x-1818944638\r\n displayName: flip-coin\r\n finishedAt: \"2021-06-29T15:05:24Z\"\r\n id: single-condition-pipeline-cmn6x-511824490\r\n inputs:\r\n parameters:\r\n - name: force_flip_result\r\n value: heads\r\n name: single-condition-pipeline-cmn6x.flip-coin\r\n outputs:\r\n artifacts:\r\n - name: flip-coin-Output\r\n path: /tmp/outputs/Output/data\r\n s3:\r\n key: artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1478846994/flip-coin-Output.tgz\r\n - name: main-logs\r\n s3:\r\n key: artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1478846994/main.log\r\n exitCode: \"0\"\r\n parameters:\r\n - name: flip-coin-Output\r\n value: heads\r\n valueFrom:\r\n path: /tmp/outputs/Output/data\r\n phase: Succeeded\r\n progress: 1/1\r\n resourcesDuration:\r\n cpu: 0\r\n memory: 0\r\n startedAt: \"2021-06-29T15:05:23Z\"\r\n templateName: flip-coin\r\n templateScope: local/single-condition-pipeline-cmn6x\r\n type: Pod\r\n single-condition-pipeline-cmn6x-1818944638:\r\n boundaryID: single-condition-pipeline-cmn6x\r\n children:\r\n - single-condition-pipeline-cmn6x-2284007376\r\n - single-condition-pipeline-cmn6x-2992979395\r\n displayName: condition-1\r\n finishedAt: \"2021-06-29T15:05:53Z\"\r\n id: single-condition-pipeline-cmn6x-1818944638\r\n inputs:\r\n parameters:\r\n - name: text\r\n value: condition test\r\n name: single-condition-pipeline-cmn6x.condition-1\r\n outboundNodes:\r\n - single-condition-pipeline-cmn6x-2976201776\r\n - single-condition-pipeline-cmn6x-2992979395\r\n phase: Succeeded\r\n progress: 3/3\r\n resourcesDuration:\r\n cpu: 0\r\n memory: 0\r\n startedAt: \"2021-06-29T15:05:33Z\"\r\n templateName: condition-1\r\n templateScope: local/single-condition-pipeline-cmn6x\r\n type: DAG\r\n single-condition-pipeline-cmn6x-2284007376:\r\n boundaryID: single-condition-pipeline-cmn6x-1818944638\r\n children:\r\n - single-condition-pipeline-cmn6x-2976201776\r\n displayName: flip-coin-2\r\n finishedAt: \"2021-06-29T15:05:34Z\"\r\n id: single-condition-pipeline-cmn6x-2284007376\r\n name: single-condition-pipeline-cmn6x.condition-1.flip-coin-2\r\n outputs:\r\n artifacts:\r\n - name: flip-coin-2-Output\r\n path: /tmp/outputs/Output/data\r\n s3:\r\n key: artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1354333128/flip-coin-2-Output.tgz\r\n - name: main-logs\r\n s3:\r\n key: artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1354333128/main.log\r\n exitCode: \"0\"\r\n parameters:\r\n - name: flip-coin-2-Output\r\n value: tails\r\n valueFrom:\r\n path: /tmp/outputs/Output/data\r\n phase: Succeeded\r\n progress: 1/1\r\n resourcesDuration:\r\n cpu: 0\r\n memory: 0\r\n startedAt: \"2021-06-29T15:05:33Z\"\r\n templateName: flip-coin-2\r\n templateScope: local/single-condition-pipeline-cmn6x\r\n type: Pod\r\n single-condition-pipeline-cmn6x-2501038730:\r\n boundaryID: single-condition-pipeline-cmn6x\r\n displayName: print-msg\r\n finishedAt: \"2021-06-29T15:05:35Z\"\r\n id: single-condition-pipeline-cmn6x-2501038730\r\n inputs:\r\n parameters:\r\n - name: flip-coin-Output\r\n value: heads\r\n name: single-condition-pipeline-cmn6x.print-msg\r\n outputs:\r\n artifacts:\r\n - name: main-logs\r\n s3:\r\n key: artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3067881266/main.log\r\n exitCode: \"0\"\r\n phase: Succeeded\r\n progress: 1/1\r\n resourcesDuration:\r\n cpu: 0\r\n memory: 0\r\n startedAt: \"2021-06-29T15:05:33Z\"\r\n templateName: print-msg\r\n templateScope: local/single-condition-pipeline-cmn6x\r\n type: Pod\r\n single-condition-pipeline-cmn6x-2976201776:\r\n boundaryID: single-condition-pipeline-cmn6x-1818944638\r\n displayName: print-msg-2\r\n finishedAt: \"2021-06-29T15:05:44Z\"\r\n id: single-condition-pipeline-cmn6x-2976201776\r\n inputs:\r\n parameters:\r\n - name: flip-coin-2-Output\r\n value: tails\r\n name: single-condition-pipeline-cmn6x.condition-1.print-msg-2\r\n outputs:\r\n artifacts:\r\n - name: main-logs\r\n s3:\r\n key: artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3432968360/main.log\r\n exitCode: \"0\"\r\n phase: Succeeded\r\n progress: 1/1\r\n resourcesDuration:\r\n cpu: 0\r\n memory: 0\r\n startedAt: \"2021-06-29T15:05:43Z\"\r\n templateName: print-msg-2\r\n templateScope: local/single-condition-pipeline-cmn6x\r\n type: Pod\r\n single-condition-pipeline-cmn6x-2992979395:\r\n boundaryID: single-condition-pipeline-cmn6x-1818944638\r\n displayName: print-msg-3\r\n finishedAt: \"2021-06-29T15:05:35Z\"\r\n id: single-condition-pipeline-cmn6x-2992979395\r\n inputs:\r\n parameters:\r\n - name: text\r\n value: condition test\r\n name: single-condition-pipeline-cmn6x.condition-1.print-msg-3\r\n outputs:\r\n artifacts:\r\n - name: main-logs\r\n s3:\r\n key: artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3449745979/main.log\r\n exitCode: \"0\"\r\n phase: Succeeded\r\n progress: 1/1\r\n resourcesDuration:\r\n cpu: 0\r\n memory: 0\r\n startedAt: \"2021-06-29T15:05:33Z\"\r\n templateName: print-msg-3\r\n templateScope: local/single-condition-pipeline-cmn6x\r\n type: Pod\r\n phase: Running\r\n progress: 5/5\r\n resourcesDuration:\r\n cpu: 0\r\n memory: 0\r\n startedAt: \"2021-06-29T15:05:23Z\"\r\n```", "workflow controller logs related to this pipeline run:\r\n\r\n> time=\"2021-06-29T14:53:52.825Z\" level=info msg=\"Processing workflow\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:53:52.921Z\" level=info msg=\"Updated phase -> Running\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:53:52.922Z\" level=info msg=\"DAG node single-condition-pipeline-dbznb initialized Running\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:53:52.924Z\" level=info msg=\"All of node single-condition-pipeline-dbznb.flip-coin dependencies [] completed\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:53:52.925Z\" level=info msg=\"Pod node single-condition-pipeline-dbznb-1478846994 initialized Pending\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:53:53.358Z\" level=info msg=\"Created pod: single-condition-pipeline-dbznb.flip-coin (single-condition-pipeline-dbznb-1478846994)\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:53:53.496Z\" level=info msg=\"Workflow update successful\" namespace=kubeflow phase=Running resourceVersion=30779093 workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.488Z\" level=info msg=\"Processing workflow\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.489Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-1478846994 exit code 0\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.489Z\" level=info msg=\"Setting node single-condition-pipeline-dbznb-1478846994 outputs: {\\\"parameters\\\":[{\\\"name\\\":\\\"flip-coin-Output\\\",\\\"value\\\":\\\"heads\\\",\\\"valueFrom\\\":{\\\"path\\\":\\\"/tmp/outputs/Output/data\\\"}}],\\\"artifacts\\\":[{\\\"name\\\":\\\"flip-coin-Output\\\",\\\"path\\\":\\\"/tmp/outputs/Output/data\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1478846994/flip-coin-Output.tgz\\\"}},{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1478846994/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.489Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-1478846994 status Pending -> Succeeded\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.490Z\" level=info msg=\"All of node single-condition-pipeline-dbznb.condition-1 dependencies [flip-coin] completed\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.490Z\" level=info msg=\"DAG node single-condition-pipeline-dbznb-2442073942 initialized Running\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.490Z\" level=info msg=\"All of node single-condition-pipeline-dbznb.condition-1.flip-coin-2 dependencies [] completed\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.490Z\" level=info msg=\"Pod node single-condition-pipeline-dbznb-1354333128 initialized Pending\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.734Z\" level=info msg=\"Created pod: single-condition-pipeline-dbznb.condition-1.flip-coin-2 (single-condition-pipeline-dbznb-1354333128)\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.734Z\" level=info msg=\"All of node single-condition-pipeline-dbznb.condition-1.print-msg-3 dependencies [] completed\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:03.735Z\" level=info msg=\"Pod node single-condition-pipeline-dbznb-3449745979 initialized Pending\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:04.237Z\" level=info msg=\"Created pod: single-condition-pipeline-dbznb.condition-1.print-msg-3 (single-condition-pipeline-dbznb-3449745979)\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:04.237Z\" level=info msg=\"All of node single-condition-pipeline-dbznb.print-msg dependencies [flip-coin] completed\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:04.237Z\" level=info msg=\"Pod node single-condition-pipeline-dbznb-3067881266 initialized Pending\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:04.419Z\" level=info msg=\"Created pod: single-condition-pipeline-dbznb.print-msg (single-condition-pipeline-dbznb-3067881266)\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:04.548Z\" level=info msg=\"Workflow update successful\" namespace=kubeflow phase=Running resourceVersion=30779379 workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:04.555Z\" level=info msg=\"cleaning up pod\" action=labelPodCompleted key=kubeflow/single-condition-pipeline-dbznb-1478846994/labelPodCompleted\r\ntime=\"2021-06-29T14:54:13.789Z\" level=info msg=\"Processing workflow\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.790Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3449745979 exit code 0\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.790Z\" level=info msg=\"Setting node single-condition-pipeline-dbznb-3449745979 outputs: {\\\"artifacts\\\":[{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3449745979/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.790Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3449745979 status Pending -> Succeeded\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.790Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3067881266 message: ContainerCreating\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.790Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-1354333128 exit code 0\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.790Z\" level=info msg=\"Setting node single-condition-pipeline-dbznb-1354333128 outputs: {\\\"parameters\\\":[{\\\"name\\\":\\\"flip-coin-2-Output\\\",\\\"value\\\":\\\"tails\\\",\\\"valueFrom\\\":{\\\"path\\\":\\\"/tmp/outputs/Output/data\\\"}}],\\\"artifacts\\\":[{\\\"name\\\":\\\"flip-coin-2-Output\\\",\\\"path\\\":\\\"/tmp/outputs/Output/data\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1354333128/flip-coin-2-Output.tgz\\\"}},{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1354333128/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.790Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-1354333128 status Pending -> Succeeded\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.791Z\" level=info msg=\"All of node single-condition-pipeline-dbznb.condition-1.print-msg-2 dependencies [flip-coin-2] completed\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.791Z\" level=info msg=\"Pod node single-condition-pipeline-dbznb-3432968360 initialized Pending\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.822Z\" level=info msg=\"Created pod: single-condition-pipeline-dbznb.condition-1.print-msg-2 (single-condition-pipeline-dbznb-3432968360)\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.857Z\" level=info msg=\"Workflow update successful\" namespace=kubeflow phase=Running resourceVersion=30779541 workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:13.866Z\" level=info msg=\"cleaning up pod\" action=labelPodCompleted key=kubeflow/single-condition-pipeline-dbznb-3449745979/labelPodCompleted\r\ntime=\"2021-06-29T14:54:13.866Z\" level=info msg=\"cleaning up pod\" action=labelPodCompleted key=kubeflow/single-condition-pipeline-dbznb-1354333128/labelPodCompleted\r\ntime=\"2021-06-29T14:54:23.823Z\" level=info msg=\"Processing workflow\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:23.824Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3432968360 message: ContainerCreating\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:23.824Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3067881266 exit code 0\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:23.824Z\" level=info msg=\"Setting node single-condition-pipeline-dbznb-3067881266 outputs: {\\\"artifacts\\\":[{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3067881266/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:23.824Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3067881266 status Pending -> Succeeded\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:23.883Z\" level=info msg=\"Workflow update successful\" namespace=kubeflow phase=Running resourceVersion=30779712 workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:23.891Z\" level=info msg=\"cleaning up pod\" action=labelPodCompleted key=kubeflow/single-condition-pipeline-dbznb-3067881266/labelPodCompleted\r\ntime=\"2021-06-29T14:54:33.920Z\" level=info msg=\"Processing workflow\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.920Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3432968360 exit code 0\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.920Z\" level=info msg=\"Setting node single-condition-pipeline-dbznb-3432968360 outputs: {\\\"artifacts\\\":[{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3432968360/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.920Z\" level=info msg=\"Updating node single-condition-pipeline-dbznb-3432968360 status Pending -> Succeeded\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.921Z\" level=info msg=\"Outbound nodes of single-condition-pipeline-dbznb-2442073942 set to [single-condition-pipeline-dbznb-3432968360 single-condition-pipeline-dbznb-3449745979]\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.921Z\" level=info msg=\"node single-condition-pipeline-dbznb-2442073942 phase Running -> Succeeded\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.921Z\" level=info msg=\"node single-condition-pipeline-dbznb-2442073942 finished: 2021-06-29 14:54:33.921751136 +0000 UTC\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.921Z\" level=info msg=\"Checking daemoned children of single-condition-pipeline-dbznb-2442073942\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.953Z\" level=info msg=\"Workflow update successful\" namespace=kubeflow phase=Running resourceVersion=30779847 workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T14:54:33.962Z\" level=info msg=\"cleaning up pod\" action=labelPodCompleted key=kubeflow/single-condition-pipeline-dbznb-3432968360/labelPodCompleted\r\ntime=\"2021-06-29T14:54:43.984Z\" level=info msg=\"Processing workflow\" namespace=kubeflow workflow=single-condition-pipeline-dbznb\r\ntime=\"2021-06-29T15:05:33.264Z\" level=info msg=\"Setting node single-condition-pipeline-cmn6x-511824490 outputs: {\\\"parameters\\\":[{\\\"name\\\":\\\"flip-coin-Output\\\",\\\"value\\\":\\\"heads\\\",\\\"valueFrom\\\":{\\\"path\\\":\\\"/tmp/outputs/Output/data\\\"}}],\\\"artifacts\\\":[{\\\"name\\\":\\\"flip-coin-Output\\\",\\\"path\\\":\\\"/tmp/outputs/Output/data\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1478846994/flip-coin-Output.tgz\\\"}},{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1478846994/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-cmn6x\r\ntime=\"2021-06-29T15:05:43.308Z\" level=info msg=\"Setting node single-condition-pipeline-cmn6x-2992979395 outputs: {\\\"artifacts\\\":[{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3449745979/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-cmn6x\r\ntime=\"2021-06-29T15:05:43.308Z\" level=info msg=\"Setting node single-condition-pipeline-cmn6x-2284007376 outputs: {\\\"parameters\\\":[{\\\"name\\\":\\\"flip-coin-2-Output\\\",\\\"value\\\":\\\"tails\\\",\\\"valueFrom\\\":{\\\"path\\\":\\\"/tmp/outputs/Output/data\\\"}}],\\\"artifacts\\\":[{\\\"name\\\":\\\"flip-coin-2-Output\\\",\\\"path\\\":\\\"/tmp/outputs/Output/data\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1354333128/flip-coin-2-Output.tgz\\\"}},{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-1354333128/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-cmn6x\r\ntime=\"2021-06-29T15:05:43.308Z\" level=info msg=\"Setting node single-condition-pipeline-cmn6x-2501038730 outputs: {\\\"artifacts\\\":[{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3067881266/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-cmn6x\r\ntime=\"2021-06-29T15:05:53.339Z\" level=info msg=\"Setting node single-condition-pipeline-cmn6x-2976201776 outputs: {\\\"artifacts\\\":[{\\\"name\\\":\\\"main-logs\\\",\\\"s3\\\":{\\\"key\\\":\\\"artifacts/single-condition-pipeline-dbznb/2021/06/29/single-condition-pipeline-dbznb-3432968360/main.log\\\"}}]}\" namespace=kubeflow workflow=single-condition-pipeline-cmn6x", "After some iterations, the minimal reproducing example is like:\r\n\r\n```python\r\nimport kfp\r\nfrom kfp import compiler\r\nfrom kfp import components\r\nfrom kfp import dsl\r\n\r\nflip_coin_op = components.load_component_from_text(\r\n '''\r\nname: flip\r\ninputs:\r\n - name: force_flip_result\r\n type: String\r\n default: 'heads'\r\noutputs:\r\n - {name: flip_result, type: String}\r\nimplementation:\r\n container:\r\n image: alpine\r\n command: [sh, -c]\r\n args:\r\n - |\r\n mkdir -p \"$(dirname $1)\"\r\n echo $0 > $1\r\n - {inputValue: force_flip_result}\r\n - {outputPath: flip_result}\r\n'''\r\n)\r\n\r\nprint_op = components.load_component_from_text(\r\n '''\r\nname: print\r\ninputs:\r\n - {name: text, type: String}\r\nimplementation:\r\n container:\r\n image: alpine\r\n command: [sh, -c]\r\n args:\r\n - 'echo $0'\r\n - {inputValue: text}\r\n'''\r\n)\r\n\r\n# stuck\r\n@dsl.pipeline(name='single-condition-pipeline')\r\ndef my_pipeline_4():\r\n flip1 = flip_coin_op()\r\n print_op(flip1.output)\r\n\r\n with dsl.Condition(flip1.output == 'heads'):\r\n print_op(flip1.output)\r\n\r\n\r\nif __name__ == '__main__':\r\n pipeline = my_pipeline_4\r\n compiler.Compiler().compile(pipeline, package_path='condition.yaml')\r\n # kfp.Client().create_run_from_pipeline_func(\r\n # pipeline,\r\n # arguments={'force_flip_result': 'heads'},\r\n # # mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE\r\n # )\r\n```", "Created upstream issue in argo after reproducing with a native argo workflow:\r\nhttps://github.com/argoproj/argo-workflows/issues/6254", "I verified the bug has been fixed in argo v3.1.1" ]
2021-06-29T02:21:49
2021-07-03T12:52:49
2021-07-03T12:52:49
CONTRIBUTOR
null
### What steps did you take 1. run v2 sample test ### What happened: 1. it times out waiting for condition sample to finish 2. the sample runs and gets cached as usual ![image](https://user-images.githubusercontent.com/4957653/123727051-9847d980-d8c3-11eb-8793-6e9a8724ab65.png) 3. but the workflow itself is always stuck in running state ### What did you expect to happen: the sample should finish as normal ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? KFP standalone 1.6.0-alpha.1 <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP SDK version: master <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> Here's status of the argo workflow, it's also stuck in running. So this is not a problem with persistence agent. ``` status: artifactRepositoryRef: default: true conditions: - status: "False" type: PodRunning finishedAt: null nodes: single-condition-pipeline-xb2hw: children: - single-condition-pipeline-xb2hw-690456091 displayName: single-condition-pipeline-xb2hw finishedAt: null id: single-condition-pipeline-xb2hw inputs: parameters: - name: force_flip_result value: heads - name: text value: condition test name: single-condition-pipeline-xb2hw phase: Running progress: 4/4 startedAt: "2021-06-28T13:58:12Z" templateName: single-condition-pipeline templateScope: local/single-condition-pipeline-xb2hw type: DAG single-condition-pipeline-xb2hw-151083147: boundaryID: single-condition-pipeline-xb2hw children: - single-condition-pipeline-xb2hw-329071385 - single-condition-pipeline-xb2hw-1404454342 displayName: condition-1 finishedAt: "2021-06-28T13:59:30Z" id: single-condition-pipeline-xb2hw-151083147 inputs: parameters: - name: text value: condition test name: single-condition-pipeline-xb2hw.condition-1 outboundNodes: - single-condition-pipeline-xb2hw-1421231961 - single-condition-pipeline-xb2hw-1404454342 phase: Succeeded progress: 3/3 resourcesDuration: cpu: 0 memory: 0 startedAt: "2021-06-28T13:58:50Z" templateName: condition-1 templateScope: local/single-condition-pipeline-xb2hw type: DAG single-condition-pipeline-xb2hw-329071385: boundaryID: single-condition-pipeline-xb2hw-151083147 children: - single-condition-pipeline-xb2hw-1421231961 displayName: flip-coin-2 finishedAt: "2021-06-28T13:58:56Z" id: single-condition-pipeline-xb2hw-329071385 name: single-condition-pipeline-xb2hw.condition-1.flip-coin-2 outputs: artifacts: - name: flip-coin-2-Output path: /tmp/outputs/Output/data s3: key: artifacts/single-condition-pipeline-982vv/2021/06/28/single-condition-pipeline-982vv-3001282121/flip-coin-2-Output.tgz - name: main-logs s3: key: artifacts/single-condition-pipeline-982vv/2021/06/28/single-condition-pipeline-982vv-3001282121/main.log exitCode: "0" parameters: - name: flip-coin-2-Output value: heads valueFrom: path: /tmp/outputs/Output/data phase: Succeeded progress: 1/1 resourcesDuration: cpu: 0 memory: 0 startedAt: "2021-06-28T13:58:50Z" templateName: flip-coin-2 templateScope: local/single-condition-pipeline-xb2hw type: Pod single-condition-pipeline-xb2hw-690456091: boundaryID: single-condition-pipeline-xb2hw children: - single-condition-pipeline-xb2hw-151083147 displayName: flip-coin finishedAt: "2021-06-28T13:58:41Z" hostNodeName: gke-kfp-std-default-pool-1c1207aa-2eyx id: single-condition-pipeline-xb2hw-690456091 inputs: parameters: - name: force_flip_result value: heads name: single-condition-pipeline-xb2hw.flip-coin outputs: artifacts: - name: flip-coin-Output path: /tmp/outputs/Output/data s3: key: artifacts/single-condition-pipeline-xb2hw/2021/06/28/single-condition-pipeline-xb2hw-690456091/flip-coin-Output.tgz - name: main-logs s3: key: artifacts/single-condition-pipeline-xb2hw/2021/06/28/single-condition-pipeline-xb2hw-690456091/main.log exitCode: "0" parameters: - name: flip-coin-Output value: heads valueFrom: path: /tmp/outputs/Output/data phase: Succeeded progress: 1/1 resourcesDuration: cpu: 7 memory: 2 startedAt: "2021-06-28T13:58:12Z" templateName: flip-coin templateScope: local/single-condition-pipeline-xb2hw type: Pod single-condition-pipeline-xb2hw-1404454342: boundaryID: single-condition-pipeline-xb2hw-151083147 displayName: print-msg-3 finishedAt: "2021-06-28T13:59:02Z" hostNodeName: gke-kfp-std-default-pool-1c1207aa-2eyx id: single-condition-pipeline-xb2hw-1404454342 inputs: parameters: - name: text value: condition test name: single-condition-pipeline-xb2hw.condition-1.print-msg-3 outputs: artifacts: - name: main-logs s3: key: artifacts/single-condition-pipeline-982vv/2021/06/28/single-condition-pipeline-982vv-3511891414/main.log exitCode: "0" phase: Succeeded progress: 1/1 resourcesDuration: cpu: 0 memory: 0 startedAt: "2021-06-28T13:58:50Z" templateName: print-msg-3 templateScope: local/single-condition-pipeline-xb2hw type: Pod single-condition-pipeline-xb2hw-1421231961: boundaryID: single-condition-pipeline-xb2hw-151083147 displayName: print-msg-2 finishedAt: "2021-06-28T13:59:09Z" id: single-condition-pipeline-xb2hw-1421231961 inputs: parameters: - name: flip-coin-2-Output value: heads name: single-condition-pipeline-xb2hw.condition-1.print-msg-2 outputs: artifacts: - name: main-logs s3: key: artifacts/single-condition-pipeline-982vv/2021/06/28/single-condition-pipeline-982vv-3528669033/main.log exitCode: "0" phase: Succeeded progress: 1/1 resourcesDuration: cpu: 0 memory: 0 startedAt: "2021-06-28T13:59:00Z" templateName: print-msg-2 templateScope: local/single-condition-pipeline-xb2hw type: Pod single-condition-pipeline-xb2hw-3316531579: boundaryID: single-condition-pipeline-xb2hw displayName: print-msg finishedAt: "2021-06-28T13:59:04Z" hostNodeName: gke-kfp-std-default-pool-1c1207aa-2eyx id: single-condition-pipeline-xb2hw-3316531579 inputs: parameters: - name: flip-coin-Output value: heads name: single-condition-pipeline-xb2hw.print-msg outputs: artifacts: - name: main-logs s3: key: artifacts/single-condition-pipeline-982vv/2021/06/28/single-condition-pipeline-982vv-1648329963/main.log exitCode: "0" phase: Succeeded progress: 1/1 resourcesDuration: cpu: 0 memory: 0 startedAt: "2021-06-28T13:58:50Z" templateName: print-msg templateScope: local/single-condition-pipeline-xb2hw type: Pod phase: Running progress: 5/5 resourcesDuration: cpu: 7 memory: 2 startedAt: "2021-06-28T13:58:12Z" ``` ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> /area sdk <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5937/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5937/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5933
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5933/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5933/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5933/events
https://github.com/kubeflow/pipelines/issues/5933
931,921,553
MDU6SXNzdWU5MzE5MjE1NTM=
5,933
[feature] Support for authenticating kfp_server_api api_client
{ "login": "subodh101", "id": 22378766, "node_id": "MDQ6VXNlcjIyMzc4NzY2", "avatar_url": "https://avatars.githubusercontent.com/u/22378766?v=4", "gravatar_id": "", "url": "https://api.github.com/users/subodh101", "html_url": "https://github.com/subodh101", "followers_url": "https://api.github.com/users/subodh101/followers", "following_url": "https://api.github.com/users/subodh101/following{/other_user}", "gists_url": "https://api.github.com/users/subodh101/gists{/gist_id}", "starred_url": "https://api.github.com/users/subodh101/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/subodh101/subscriptions", "organizations_url": "https://api.github.com/users/subodh101/orgs", "repos_url": "https://api.github.com/users/subodh101/repos", "events_url": "https://api.github.com/users/subodh101/events{/privacy}", "received_events_url": "https://api.github.com/users/subodh101/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Just wanna clarify, can you use `kfp client` to do the authentication? What's the use case of using `kfp_server_api` instead of kfp client?", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "This issue has been automatically closed because it has not had recent activity. Please comment \"/reopen\" to reopen it.\n" ]
2021-06-28T19:49:00
2022-03-03T03:05:39
2022-03-03T03:05:39
MEMBER
null
### Feature Area /area sdk ### What feature would you like to see? A function to get auth token for configuring `kfp_server_api` api_client. I currently didn't find any function in the `kfp_server_api` class similar to the `kfp` Client class for authorization, but it turns out that retrieving the bearer token is the same. So, would it be better to add the same functionality in the `kfp_server_api` api_client as well? ### What is the use case or pain point? Be able to disable/delete recurring jobs. <!-- It helps us understand the benefit of this feature for your use case. --> ### Is there a workaround currently? ```python LOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json') OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token' def get_auth_token(client_id): """Gets auth token from user account.""" if os.path.exists(LOCAL_KFP_CREDENTIAL): # fetch IAP auth token using the locally stored credentials. with open(LOCAL_KFP_CREDENTIAL, 'r') as f: credentials = json.load(f) if client_id in credentials: payload = { "client_id": credentials[client_id]['other_client_id'], "client_secret": credentials[client_id]['other_client_secret'], "refresh_token": credentials[client_id]['refresh_token'], "grant_type": "refresh_token", "audience": client_id, } res = requests.post(OAUTH_TOKEN_URI, data=payload) res.raise_for_status() return str(json.loads(res.text)[u"id_token"]) else: print("Exception getting auth token!") return token token = get_auth_token(client_id=os.environ['KF_SERVER_CLIENT_ID']) # Configure API key authorization: Bearer config = kfp_server_api.Configuration( host = os.environ['HOST'], api_key = { 'authorization': f'Bearer {token}' } ) # Enter a context with an instance of the API client with kfp_server_api.ApiClient(config) as api_client: # Create an instance of the API class job_client = kfp_server_api.JobServiceApi(api_client=api_client) # disable the existing recurring run try: api_response = job_client.disable_job(id=recurring_runs_id) pprint(api_response) except ApiException as e: print(f"Exception occured while disabling recurring run {recurring_runs_id}.") ``` --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5933/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5933/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5932
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5932/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5932/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5932/events
https://github.com/kubeflow/pipelines/issues/5932
931,759,104
MDU6SXNzdWU5MzE3NTkxMDQ=
5,932
[Sample] Create visualization samples
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "Consider the possibility of adding yaml sample for visualization. ", "https://github.com/kubeflow/pipelines/pull/5947#issuecomment-871704880" ]
2021-06-28T16:44:14
2021-07-19T21:33:30
2021-07-19T18:32:15
COLLABORATOR
null
Visualization doc: https://www.kubeflow.org/docs/components/pipelines/sdk/output-viewer/#available-output-viewers Create pipeline examples with sample data in v1 format, so we can test them after the following issues are unblocked: - https://github.com/kubeflow/pipelines/issues/5831 - https://github.com/kubeflow/pipelines/issues/5830 This is to prepare for validation of https://github.com/kubeflow/pipelines/issues/5666 changes. List of visualization types is: - [x] Confusion matrix - [x] Markdown - [x] ROC curve - [x] Table - [x] TensorBoard - [x] Web app
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5932/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5932/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5931
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5931/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5931/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5931/events
https://github.com/kubeflow/pipelines/issues/5931
931,682,324
MDU6SXNzdWU5MzE2ODIzMjQ=
5,931
[bug] kubeflow 1.2.0 (kfctl_aws.v1.2.0.yaml) install failed to build LoadBalancer configuration(ALB) on AWS (eks cluster).
{ "login": "amalendur", "id": 49721445, "node_id": "MDQ6VXNlcjQ5NzIxNDQ1", "avatar_url": "https://avatars.githubusercontent.com/u/49721445?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amalendur", "html_url": "https://github.com/amalendur", "followers_url": "https://api.github.com/users/amalendur/followers", "following_url": "https://api.github.com/users/amalendur/following{/other_user}", "gists_url": "https://api.github.com/users/amalendur/gists{/gist_id}", "starred_url": "https://api.github.com/users/amalendur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amalendur/subscriptions", "organizations_url": "https://api.github.com/users/amalendur/orgs", "repos_url": "https://api.github.com/users/amalendur/repos", "events_url": "https://api.github.com/users/amalendur/events{/privacy}", "received_events_url": "https://api.github.com/users/amalendur/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "@amalendur: The label(s) `kind/bug.` cannot be applied, because the repository doesn't have them.\n\n<details>\n\nIn response to [this](https://github.com/kubeflow/pipelines/issues/5931):\n\n>/kind bug.\r\n>\r\n>### What steps did you take\r\n>I have deployed a EKS cluster and on top of that cluster I am trying to deploy \"Kubeflow\" through GitLab pipeline.\r\n>In the pipeline script I have set the Kube-context before execute the Kubeflow config-file(kfctl_aws.v1.2.0.yaml).\r\n>I have executed the steps in the following order;\r\n>\r\n>1. Deploy EKS cluster with Nodegrop (deploy through terraform)\r\n>2. Download the Kubeflow config-file from \"https://raw.githubusercontent.com/kubeflow/manifests/v1.2-branch/kfdef/kfctl_aws.v1.2.0.yaml\"\r\n>3. Update the config-file with;\r\n>\r\n>```\r\n>apiVersion: kfdef.apps.kubeflow.org/v1\r\n>kind: KfDef\r\n>metadata:\r\n> namespace: kubeflow\r\n>spec:\r\n> applications:\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: namespaces/base\r\n> name: namespaces\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: application/v3\r\n> name: application\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/istio-1-3-1-stack\r\n> name: istio-stack\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/cluster-local-gateway-1-3-1\r\n> name: cluster-local-gateway\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: istio/istio/base\r\n> name: istio\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/cert-manager-crds\r\n> name: cert-manager-crds\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/cert-manager-kube-system-resources\r\n> name: cert-manager-kube-system-resources\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/cert-manager\r\n> name: cert-manager\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: metacontroller/base\r\n> name: metacontroller\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/oidc-authservice\r\n> name: oidc-authservice\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/dex-auth\r\n> name: dex\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: admission-webhook/bootstrap/overlays/application\r\n> name: bootstrap\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: spark/spark-operator/overlays/application\r\n> name: spark-operator\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws\r\n> name: kubeflow-apps\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: aws/istio-ingress/base_v3\r\n> name: istio-ingress\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: knative/installs/generic\r\n> name: knative\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: kfserving/installs/generic\r\n> name: kfserving\r\n> - kustomizeConfig:\r\n> repoRef:\r\n> name: manifests\r\n> path: stacks/aws/application/spartakus\r\n> name: spartakus\r\n> plugins:\r\n> - kind: KfAwsPlugin\r\n> metadata:\r\n> name: aws\r\n> spec:\r\n> auth:\r\n> basicAuth:\r\n> password: <password>\r\n> username: admin\r\n> region: <aws_default_region>\r\n> #enablePodIamPolicy: true\r\n> roles:\r\n> - <eks_node_role>\r\n> repos:\r\n> - name: manifests\r\n> uri: https://github.com/kubeflow/manifests/archive/v1.2.0.tar.gz\r\n> version: v1.2-branch\r\n>```\r\n>```\r\n><aws_region> : AWS Region, where the cluster has been created.\r\n><eks_node_role> : Name of the IAM role for the nodegroup.\r\n><password> : Admin password\r\n>```\r\n>4. Set the Kube-context;\r\n>`aws eks --region ${AWS_REGION} update-kubeconfig --name ${CLUSTER_NAME}`\r\n>5. Set the environment;\r\n>```\r\n>wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gz --no-check-certificate && tar -xvzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gz && chmod +x kfctl && cp ./kfctl /usr/local/bin/kfctl\r\n>curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/aws-iam-authenticator && chmod +x ./aws-iam-authenticator && cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator\r\n>export AWS_CLUSTER_NAME=${CLUSTER_NAME}\r\n>export KF_NAME=${AWS_CLUSTER_NAME}\r\n>KF_DIR = /opt/ar-usv-poc-plt\r\n>kfctl build -f kfctl_aws.v1.2.0.yaml -V\r\n>kfctl apply -f kfctl_aws.v1.2.0.yaml -V\r\n>```\r\n>\r\n>### What happened:\r\n>Getting the following error:\r\n>`E0628 14:56:25.659557 1 controller.go:217] kubebuilder/controller \"msg\"=\"Reconciler error\" \"error\"=\"failed to build LoadBalancer configuration due to retrieval of subnets failed to resolve 2 qualified subnets. Subnets must contain the kubernetes.io/cluster/\\u003ccluster name\\u003e tag with a value of shared or owned and the kubernetes.io/role/elb tag signifying it should be used for ALBs Additionally, there must be at least 2 subnets with unique availability zones as required by ALBs. Either tag subnets to meet this requirement or use the subnets annotation on the ingress resource to explicitly call out what subnets to use for ALB creation. The subnets that did resolve were []\" \"controller\"=\"alb-ingress-controller\" \"request\"={\"Namespace\":\"istio-system\",\"Name\":\"istio-ingress\"}`\r\n>\r\n>### What did you expect to happen:\r\n>load-balancer should be configure, up & running.\r\n>\r\n>### Environment:\r\n>Platform.\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n</details>", "@google-oss-robot , thank you for guidance.\r\nI have fixed the error . \r\nThe issue was with installation directory & it has been fixed." ]
2021-06-28T15:14:45
2021-06-29T08:13:01
2021-06-29T08:13:01
NONE
null
/kind bug. ### What steps did you take I have deployed a EKS cluster and on top of that cluster I am trying to deploy "Kubeflow" through GitLab pipeline. In the pipeline script I have set the Kube-context before execute the Kubeflow config-file(kfctl_aws.v1.2.0.yaml). I have executed the steps in the following order; 1. Deploy EKS cluster with Nodegrop (deploy through terraform) 2. Download the Kubeflow config-file from "https://raw.githubusercontent.com/kubeflow/manifests/v1.2-branch/kfdef/kfctl_aws.v1.2.0.yaml" 3. Update the config-file with; ``` apiVersion: kfdef.apps.kubeflow.org/v1 kind: KfDef metadata: namespace: kubeflow spec: applications: - kustomizeConfig: repoRef: name: manifests path: namespaces/base name: namespaces - kustomizeConfig: repoRef: name: manifests path: application/v3 name: application - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/istio-1-3-1-stack name: istio-stack - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cluster-local-gateway-1-3-1 name: cluster-local-gateway - kustomizeConfig: repoRef: name: manifests path: istio/istio/base name: istio - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cert-manager-crds name: cert-manager-crds - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cert-manager-kube-system-resources name: cert-manager-kube-system-resources - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cert-manager name: cert-manager - kustomizeConfig: repoRef: name: manifests path: metacontroller/base name: metacontroller - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/oidc-authservice name: oidc-authservice - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/dex-auth name: dex - kustomizeConfig: repoRef: name: manifests path: admission-webhook/bootstrap/overlays/application name: bootstrap - kustomizeConfig: repoRef: name: manifests path: spark/spark-operator/overlays/application name: spark-operator - kustomizeConfig: repoRef: name: manifests path: stacks/aws name: kubeflow-apps - kustomizeConfig: repoRef: name: manifests path: aws/istio-ingress/base_v3 name: istio-ingress - kustomizeConfig: repoRef: name: manifests path: knative/installs/generic name: knative - kustomizeConfig: repoRef: name: manifests path: kfserving/installs/generic name: kfserving - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/spartakus name: spartakus plugins: - kind: KfAwsPlugin metadata: name: aws spec: auth: basicAuth: password: <password> username: admin region: <aws_default_region> #enablePodIamPolicy: true roles: - <eks_node_role> repos: - name: manifests uri: https://github.com/kubeflow/manifests/archive/v1.2.0.tar.gz version: v1.2-branch ``` ``` <aws_region> : AWS Region, where the cluster has been created. <eks_node_role> : Name of the IAM role for the nodegroup. <password> : Admin password ``` 4. Set the Kube-context; `aws eks --region ${AWS_REGION} update-kubeconfig --name ${CLUSTER_NAME}` 5. Set the environment; ``` wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gz --no-check-certificate && tar -xvzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gz && chmod +x kfctl && cp ./kfctl /usr/local/bin/kfctl curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/aws-iam-authenticator && chmod +x ./aws-iam-authenticator && cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator export AWS_CLUSTER_NAME=${CLUSTER_NAME} export KF_NAME=${AWS_CLUSTER_NAME} KF_DIR = /opt/ar-usv-poc-plt kfctl build -f kfctl_aws.v1.2.0.yaml -V kfctl apply -f kfctl_aws.v1.2.0.yaml -V ``` ### What happened: Getting the following error: `E0628 14:56:25.659557 1 controller.go:217] kubebuilder/controller "msg"="Reconciler error" "error"="failed to build LoadBalancer configuration due to retrieval of subnets failed to resolve 2 qualified subnets. Subnets must contain the kubernetes.io/cluster/\u003ccluster name\u003e tag with a value of shared or owned and the kubernetes.io/role/elb tag signifying it should be used for ALBs Additionally, there must be at least 2 subnets with unique availability zones as required by ALBs. Either tag subnets to meet this requirement or use the subnets annotation on the ingress resource to explicitly call out what subnets to use for ALB creation. The subnets that did resolve were []" "controller"="alb-ingress-controller" "request"={"Namespace":"istio-system","Name":"istio-ingress"}` ### What did you expect to happen: load-balancer should be configure, up & running. ### Environment: Platform. ### Additional information: ``` subnets tagged with kubernetes.io/cluster/clusterName : shared & kubernetes.io/role/elb : 1 where, clusterName : <Name of the Cluster> ```
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5931/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5931/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5930
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5930/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5930/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5930/events
https://github.com/kubeflow/pipelines/issues/5930
931,582,821
MDU6SXNzdWU5MzE1ODI4MjE=
5,930
[frontend] artifact preview & visualization breaks with argo v3.1+
{ "login": "Bobgy", "id": 4957653, "node_id": "MDQ6VXNlcjQ5NTc2NTM=", "avatar_url": "https://avatars.githubusercontent.com/u/4957653?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bobgy", "html_url": "https://github.com/Bobgy", "followers_url": "https://api.github.com/users/Bobgy/followers", "following_url": "https://api.github.com/users/Bobgy/following{/other_user}", "gists_url": "https://api.github.com/users/Bobgy/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bobgy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bobgy/subscriptions", "organizations_url": "https://api.github.com/users/Bobgy/orgs", "repos_url": "https://api.github.com/users/Bobgy/repos", "events_url": "https://api.github.com/users/Bobgy/events{/privacy}", "received_events_url": "https://api.github.com/users/Bobgy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @zijianjoy ", "Pasting an example full argo workflow:\r\n\r\n```yaml\r\napiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n annotations:\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline_compilation_time: 2021-06-29T11:08:44.423700\r\n pipelines.kubeflow.org/pipeline_spec: '{\"inputs\": [{\"default\": \"\", \"name\": \"pipeline-output-directory\"},\r\n {\"default\": \"two_step_pipeline\", \"name\": \"pipeline-name\"}], \"name\": \"two_step_pipeline\"}'\r\n pipelines.kubeflow.org/run_name: two_step_pipeline 2021-06-29 11-08-44\r\n pipelines.kubeflow.org/v2_pipeline: \"true\"\r\n creationTimestamp: \"2021-06-29T11:08:44Z\"\r\n generateName: two-step-pipeline-\r\n generation: 7\r\n labels:\r\n pipeline/persistedFinalState: \"true\"\r\n pipeline/runid: ceb88c92-21a9-4797-a68b-bb27c6183d59\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/v2_pipeline: \"true\"\r\n workflows.argoproj.io/completed: \"true\"\r\n workflows.argoproj.io/phase: Succeeded\r\n manager: workflow-controller\r\n operation: Update\r\n time: \"2021-06-29T11:10:01Z\"\r\n name: two-step-pipeline-94mfz\r\n namespace: kubeflow\r\n resourceVersion: \"30684140\"\r\n selfLink: /apis/argoproj.io/v1alpha1/namespaces/kubeflow/workflows/two-step-pipeline-94mfz\r\n uid: cef9e9db-28ae-4e16-8e4e-fcf4d960e967\r\nspec:\r\n arguments:\r\n parameters:\r\n - name: pipeline-output-directory\r\n value: gs://gongyuan-dev/v2-sample-test/data/samples_config-loop-item\r\n - name: pipeline-name\r\n value: two_step_pipeline\r\n entrypoint: two-step-pipeline\r\n serviceAccountName: pipeline-runner\r\n templates:\r\n - container:\r\n args:\r\n - sh\r\n - -ec\r\n - |\r\n program_path=$(mktemp)\r\n printf \"%s\" \"$0\" > \"$program_path\"\r\n python3 -u \"$program_path\" \"$@\"\r\n - |\r\n def _make_parent_dirs_and_return_path(file_path: str):\r\n import os\r\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\r\n return file_path\r\n\r\n def preprocess(\r\n uri, some_int, output_parameter_one,\r\n output_dataset_one\r\n ):\r\n '''Dummy Preprocess Step.'''\r\n with open(output_dataset_one, 'w') as f:\r\n f.write('Output dataset')\r\n with open(output_parameter_one, 'w') as f:\r\n f.write(\"{}\".format(1234))\r\n\r\n import argparse\r\n _parser = argparse.ArgumentParser(prog='Preprocess', description='Dummy Preprocess Step.')\r\n _parser.add_argument(\"--uri\", dest=\"uri\", type=str, required=True, default=argparse.SUPPRESS)\r\n _parser.add_argument(\"--some-int\", dest=\"some_int\", type=int, required=True, default=argparse.SUPPRESS)\r\n _parser.add_argument(\"--output-parameter-one\", dest=\"output_parameter_one\", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)\r\n _parser.add_argument(\"--output-dataset-one\", dest=\"output_dataset_one\", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)\r\n _parsed_args = vars(_parser.parse_args())\r\n\r\n _outputs = preprocess(**_parsed_args)\r\n - --uri\r\n - '{{$.inputs.parameters[''uri'']}}'\r\n - --some-int\r\n - '{{$.inputs.parameters[''some_int'']}}'\r\n - --output-parameter-one\r\n - '{{$.outputs.parameters[''output_parameter_one''].output_file}}'\r\n - --output-dataset-one\r\n - '{{$.outputs.artifacts[''output_dataset_one''].path}}'\r\n command:\r\n - /kfp-launcher/launch\r\n - --mlmd_server_address\r\n - $(METADATA_GRPC_SERVICE_HOST)\r\n - --mlmd_server_port\r\n - $(METADATA_GRPC_SERVICE_PORT)\r\n - --runtime_info_json\r\n - $(KFP_V2_RUNTIME_INFO)\r\n - --container_image\r\n - $(KFP_V2_IMAGE)\r\n - --task_name\r\n - preprocess\r\n - --pipeline_name\r\n - '{{inputs.parameters.pipeline-name}}'\r\n - --pipeline_run_id\r\n - $(WORKFLOW_ID)\r\n - --pipeline_task_id\r\n - $(KFP_POD_NAME)\r\n - --pipeline_root\r\n - '{{inputs.parameters.pipeline-output-directory}}'\r\n env:\r\n - name: KFP_POD_NAME\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.name\r\n - name: KFP_NAMESPACE\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.namespace\r\n - name: WORKFLOW_ID\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.labels['workflows.argoproj.io/workflow']\r\n - name: KFP_V2_IMAGE\r\n value: python:3.9\r\n - name: KFP_V2_RUNTIME_INFO\r\n value: '{\"inputParameters\": {\"some_int\": {\"type\": \"INT\", \"value\": \"BEGIN-KFP-PARAM[12]END-KFP-PARAM\"},\r\n \"uri\": {\"type\": \"STRING\", \"value\": \"BEGIN-KFP-PARAM[uri-to-import]END-KFP-PARAM\"}},\r\n \"inputArtifacts\": {}, \"outputParameters\": {\"output_parameter_one\": {\"type\":\r\n \"INT\", \"path\": \"/tmp/outputs/output_parameter_one/data\"}}, \"outputArtifacts\":\r\n {\"output_dataset_one\": {\"schemaTitle\": \"system.Dataset\", \"instanceSchema\":\r\n \"\", \"metadataPath\": \"/tmp/outputs/output_dataset_one/data\"}}}'\r\n envFrom:\r\n - configMapRef:\r\n name: metadata-grpc-configmap\r\n optional: true\r\n image: python:3.9\r\n name: \"\"\r\n resources: {}\r\n volumeMounts:\r\n - mountPath: /kfp-launcher\r\n name: kfp-launcher\r\n initContainers:\r\n - command:\r\n - /bin/mount_launcher.sh\r\n image: gcr.io/gongyuan-dev/v2-sample-test/kfp-launcher@sha256:55d2af7c8f37515f745dea578ffa76af749e99474af29157474ea88ce0249d17\r\n mirrorVolumeMounts: true\r\n name: kfp-launcher\r\n resources: {}\r\n inputs:\r\n parameters:\r\n - name: pipeline-name\r\n - name: pipeline-output-directory\r\n metadata:\r\n annotations:\r\n pipelines.kubeflow.org/arguments.parameters: '{\"some_int\": \"12\", \"uri\": \"uri-to-import\"}'\r\n pipelines.kubeflow.org/component_ref: '{}'\r\n pipelines.kubeflow.org/v2_component: \"true\"\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline-sdk-type: kfp\r\n pipelines.kubeflow.org/v2_component: \"true\"\r\n name: preprocess\r\n outputs:\r\n artifacts:\r\n - name: preprocess-output_dataset_one\r\n path: /tmp/outputs/output_dataset_one/data\r\n - name: preprocess-output_parameter_one\r\n path: /tmp/outputs/output_parameter_one/data\r\n parameters:\r\n - name: preprocess-output_parameter_one\r\n valueFrom:\r\n path: /tmp/outputs/output_parameter_one/data\r\n volumes:\r\n - name: kfp-launcher\r\n - container:\r\n args:\r\n - sh\r\n - -ec\r\n - |\r\n program_path=$(mktemp)\r\n printf \"%s\" \"$0\" > \"$program_path\"\r\n python3 -u \"$program_path\" \"$@\"\r\n - |\r\n def _make_parent_dirs_and_return_path(file_path: str):\r\n import os\r\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\r\n return file_path\r\n\r\n def train_op(\r\n dataset,\r\n model,\r\n num_steps = 100\r\n ):\r\n '''Dummy Training Step.'''\r\n\r\n with open(dataset, 'r') as input_file:\r\n input_string = input_file.read()\r\n with open(model, 'w') as output_file:\r\n for i in range(num_steps):\r\n output_file.write(\r\n \"Step {}\\n{}\\n=====\\n\".format(i, input_string)\r\n )\r\n\r\n import argparse\r\n _parser = argparse.ArgumentParser(prog='Train op', description='Dummy Training Step.')\r\n _parser.add_argument(\"--dataset\", dest=\"dataset\", type=str, required=True, default=argparse.SUPPRESS)\r\n _parser.add_argument(\"--num-steps\", dest=\"num_steps\", type=int, required=False, default=argparse.SUPPRESS)\r\n _parser.add_argument(\"--model\", dest=\"model\", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)\r\n _parsed_args = vars(_parser.parse_args())\r\n\r\n _outputs = train_op(**_parsed_args)\r\n - --dataset\r\n - '{{$.inputs.artifacts[''dataset''].path}}'\r\n - --num-steps\r\n - '{{$.inputs.parameters[''num_steps'']}}'\r\n - --model\r\n - '{{$.outputs.artifacts[''model''].path}}'\r\n command:\r\n - /kfp-launcher/launch\r\n - --mlmd_server_address\r\n - $(METADATA_GRPC_SERVICE_HOST)\r\n - --mlmd_server_port\r\n - $(METADATA_GRPC_SERVICE_PORT)\r\n - --runtime_info_json\r\n - $(KFP_V2_RUNTIME_INFO)\r\n - --container_image\r\n - $(KFP_V2_IMAGE)\r\n - --task_name\r\n - train-op\r\n - --pipeline_name\r\n - '{{inputs.parameters.pipeline-name}}'\r\n - --pipeline_run_id\r\n - $(WORKFLOW_ID)\r\n - --pipeline_task_id\r\n - $(KFP_POD_NAME)\r\n - --pipeline_root\r\n - '{{inputs.parameters.pipeline-output-directory}}'\r\n env:\r\n - name: KFP_POD_NAME\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.name\r\n - name: KFP_NAMESPACE\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.namespace\r\n - name: WORKFLOW_ID\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.labels['workflows.argoproj.io/workflow']\r\n - name: KFP_V2_IMAGE\r\n value: python:3.7\r\n - name: KFP_V2_RUNTIME_INFO\r\n value: '{\"inputParameters\": {\"num_steps\": {\"type\": \"INT\", \"value\": \"BEGIN-KFP-PARAM[{{inputs.parameters.preprocess-output_parameter_one}}]END-KFP-PARAM\"}},\r\n \"inputArtifacts\": {\"dataset\": {\"metadataPath\": \"/tmp/inputs/dataset/data\",\r\n \"schemaTitle\": \"system.Dataset\", \"instanceSchema\": \"\"}}, \"outputParameters\":\r\n {}, \"outputArtifacts\": {\"model\": {\"schemaTitle\": \"system.Model\", \"instanceSchema\":\r\n \"\", \"metadataPath\": \"/tmp/outputs/model/data\"}}}'\r\n envFrom:\r\n - configMapRef:\r\n name: metadata-grpc-configmap\r\n optional: true\r\n image: python:3.7\r\n name: \"\"\r\n resources: {}\r\n volumeMounts:\r\n - mountPath: /kfp-launcher\r\n name: kfp-launcher\r\n initContainers:\r\n - command:\r\n - /bin/mount_launcher.sh\r\n image: gcr.io/gongyuan-dev/v2-sample-test/kfp-launcher@sha256:55d2af7c8f37515f745dea578ffa76af749e99474af29157474ea88ce0249d17\r\n mirrorVolumeMounts: true\r\n name: kfp-launcher\r\n resources: {}\r\n inputs:\r\n artifacts:\r\n - name: preprocess-output_dataset_one\r\n path: /tmp/inputs/dataset/data\r\n parameters:\r\n - name: pipeline-name\r\n - name: pipeline-output-directory\r\n - name: preprocess-output_parameter_one\r\n metadata:\r\n annotations:\r\n pipelines.kubeflow.org/arguments.parameters: '{\"num_steps\": \"{{inputs.parameters.preprocess-output_parameter_one}}\"}'\r\n pipelines.kubeflow.org/component_ref: '{}'\r\n pipelines.kubeflow.org/v2_component: \"true\"\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n pipelines.kubeflow.org/kfp_sdk_version: 1.6.4\r\n pipelines.kubeflow.org/pipeline-sdk-type: kfp\r\n pipelines.kubeflow.org/v2_component: \"true\"\r\n name: train-op\r\n outputs:\r\n artifacts:\r\n - name: train-op-model\r\n path: /tmp/outputs/model/data\r\n volumes:\r\n - name: kfp-launcher\r\n - dag:\r\n tasks:\r\n - arguments:\r\n parameters:\r\n - name: pipeline-name\r\n value: '{{inputs.parameters.pipeline-name}}'\r\n - name: pipeline-output-directory\r\n value: '{{inputs.parameters.pipeline-output-directory}}'\r\n name: preprocess\r\n template: preprocess\r\n - arguments:\r\n artifacts:\r\n - from: '{{tasks.preprocess.outputs.artifacts.preprocess-output_dataset_one}}'\r\n name: preprocess-output_dataset_one\r\n parameters:\r\n - name: pipeline-name\r\n value: '{{inputs.parameters.pipeline-name}}'\r\n - name: pipeline-output-directory\r\n value: '{{inputs.parameters.pipeline-output-directory}}'\r\n - name: preprocess-output_parameter_one\r\n value: '{{tasks.preprocess.outputs.parameters.preprocess-output_parameter_one}}'\r\n dependencies:\r\n - preprocess\r\n name: train-op\r\n template: train-op\r\n inputs:\r\n parameters:\r\n - name: pipeline-name\r\n - name: pipeline-output-directory\r\n metadata:\r\n annotations:\r\n sidecar.istio.io/inject: \"false\"\r\n labels:\r\n pipelines.kubeflow.org/cache_enabled: \"true\"\r\n name: two-step-pipeline\r\n outputs: {}\r\nstatus:\r\n artifactRepositoryRef:\r\n default: true\r\n conditions:\r\n - status: \"False\"\r\n type: PodRunning\r\n - status: \"True\"\r\n type: Completed\r\n finishedAt: \"2021-06-29T11:10:01Z\"\r\n nodes:\r\n two-step-pipeline-94mfz:\r\n children:\r\n - two-step-pipeline-94mfz-2926751466\r\n displayName: two-step-pipeline-94mfz\r\n finishedAt: \"2021-06-29T11:10:01Z\"\r\n id: two-step-pipeline-94mfz\r\n inputs:\r\n parameters:\r\n - name: pipeline-name\r\n value: two_step_pipeline\r\n - name: pipeline-output-directory\r\n value: gs://gongyuan-dev/v2-sample-test/data/samples_config-loop-item\r\n name: two-step-pipeline-94mfz\r\n outboundNodes:\r\n - two-step-pipeline-94mfz-1801497614\r\n phase: Succeeded\r\n progress: 2/2\r\n resourcesDuration:\r\n cpu: 37\r\n memory: 18\r\n startedAt: \"2021-06-29T11:08:44Z\"\r\n templateName: two-step-pipeline\r\n templateScope: local/two-step-pipeline-94mfz\r\n type: DAG\r\n two-step-pipeline-94mfz-1801497614:\r\n boundaryID: two-step-pipeline-94mfz\r\n displayName: train-op\r\n finishedAt: \"2021-06-29T11:09:58Z\"\r\n hostNodeName: gke-kfp-std-default-pool-1c1207aa-2eyx\r\n id: two-step-pipeline-94mfz-1801497614\r\n inputs:\r\n artifacts:\r\n - name: preprocess-output_dataset_one\r\n path: /tmp/inputs/dataset/data\r\n s3:\r\n key: artifacts/two-step-pipeline-94mfz/2021/06/29/two-step-pipeline-94mfz-2926751466/preprocess-output_dataset_one.tgz\r\n parameters:\r\n - name: pipeline-name\r\n value: two_step_pipeline\r\n - name: pipeline-output-directory\r\n value: gs://gongyuan-dev/v2-sample-test/data/samples_config-loop-item\r\n - name: preprocess-output_parameter_one\r\n value: \"1234\"\r\n name: two-step-pipeline-94mfz.train-op\r\n outputs:\r\n artifacts:\r\n - name: train-op-model\r\n path: /tmp/outputs/model/data\r\n s3:\r\n key: artifacts/two-step-pipeline-94mfz/2021/06/29/two-step-pipeline-94mfz-1801497614/train-op-model.tgz\r\n - name: main-logs\r\n s3:\r\n key: artifacts/two-step-pipeline-94mfz/2021/06/29/two-step-pipeline-94mfz-1801497614/main.log\r\n exitCode: \"0\"\r\n phase: Succeeded\r\n progress: 1/1\r\n resourcesDuration:\r\n cpu: 16\r\n memory: 7\r\n startedAt: \"2021-06-29T11:09:25Z\"\r\n templateName: train-op\r\n templateScope: local/two-step-pipeline-94mfz\r\n type: Pod\r\n two-step-pipeline-94mfz-2926751466:\r\n boundaryID: two-step-pipeline-94mfz\r\n children:\r\n - two-step-pipeline-94mfz-1801497614\r\n displayName: preprocess\r\n finishedAt: \"2021-06-29T11:09:14Z\"\r\n hostNodeName: gke-kfp-std-default-pool-1c1207aa-2eyx\r\n id: two-step-pipeline-94mfz-2926751466\r\n inputs:\r\n parameters:\r\n - name: pipeline-name\r\n value: two_step_pipeline\r\n - name: pipeline-output-directory\r\n value: gs://gongyuan-dev/v2-sample-test/data/samples_config-loop-item\r\n name: two-step-pipeline-94mfz.preprocess\r\n outputs:\r\n artifacts:\r\n - name: preprocess-output_dataset_one\r\n path: /tmp/outputs/output_dataset_one/data\r\n s3:\r\n key: artifacts/two-step-pipeline-94mfz/2021/06/29/two-step-pipeline-94mfz-2926751466/preprocess-output_dataset_one.tgz\r\n - name: preprocess-output_parameter_one\r\n path: /tmp/outputs/output_parameter_one/data\r\n s3:\r\n key: artifacts/two-step-pipeline-94mfz/2021/06/29/two-step-pipeline-94mfz-2926751466/preprocess-output_parameter_one.tgz\r\n - name: main-logs\r\n s3:\r\n key: artifacts/two-step-pipeline-94mfz/2021/06/29/two-step-pipeline-94mfz-2926751466/main.log\r\n exitCode: \"0\"\r\n parameters:\r\n - name: preprocess-output_parameter_one\r\n value: \"1234\"\r\n valueFrom:\r\n path: /tmp/outputs/output_parameter_one/data\r\n phase: Succeeded\r\n progress: 1/1\r\n resourcesDuration:\r\n cpu: 21\r\n memory: 11\r\n startedAt: \"2021-06-29T11:08:44Z\"\r\n templateName: preprocess\r\n templateScope: local/two-step-pipeline-94mfz\r\n type: Pod\r\n phase: Succeeded\r\n progress: 2/2\r\n resourcesDuration:\r\n cpu: 37\r\n memory: 18\r\n startedAt: \"2021-06-29T11:08:44Z\"\r\n```", "The only information related to artifact repository seems to be:\r\n```\r\n artifactRepositoryRef:\r\n default: true\r\n```", "The PR which removed `s3` from workflow template: https://github.com/argoproj/argo-workflows/pull/3377", "How argo retrieve file from artifact: https://github.com/argoproj/argo-workflows/blob/43212590d4579c821280fd482b960934139eac2f/ui/src/app/workflows/components/workflow-node-info/workflow-node-info.tsx#L365\r\n\r\nHow backend server reads from provided info: https://github.com/argoproj/argo-workflows/blob/0e94283aea641c6c927c9165900165a72022124f/server/artifacts/artifact_server.go#L143", "Looks like that what we are missing `bucket` and `endpoint`: `endpoint` tells us what kind of storage platform to use, `bucket` tells us the top directory to look for artifact with `key`.", "I think the \"easiest\" workaround without waiting for argo upstream changes is to adjust the UI server artifacts API endpoint:\r\nhttps://github.com/kubeflow/pipelines/blob/647bed72a9a18cb12a504d336af7432fa048a8e3/frontend/server/handlers/artifacts.ts#L65-L66\r\n\r\nChanges:\r\n* Make source & bucket args as optional, they default to the default artifact repository in the cluster\r\n* We can adjust manifests to make sure UI server knows what default artifact repository is\r\n* UI code no longer knows what source / bucket is, but it can just use the object key to get data from UI server.\r\n\r\nWhat do you think?", "Thank you @Bobgy for the suggestion! I will look into it.\r\n\r\nAfter the update of https://github.com/argoproj/argo-workflows/issues/6255, I can retrieve the `s3` artifact detail in `status`. For example: for KFP tutorial `[Tutorial] Data passing in python components (ac491)`:\r\n\r\n```\r\nstatus:\r\n artifactRepositoryRef:\r\n artifactRepository:\r\n archiveLogs: true\r\n s3:\r\n accessKeySecret:\r\n key: accesskey\r\n name: mlpipeline-minio-artifact\r\n bucket: mlpipeline\r\n endpoint: minio-service.kubeflow:9000\r\n insecure: true\r\n keyFormat: artifacts/{{workflow.name}}/{{workflow.creationTimestamp.Y}}/{{workflow.creationTimestamp.m}}/{{workflow.creationTimestamp.d}}/{{pod.name}}\r\n secretKeySecret:\r\n key: secretkey\r\n name: mlpipeline-minio-artifact\r\n default: true\r\n```\r\n\r\nHowever, KFP API response didn't have this info under `pipeline_runtime` -> `workflow_manifest` field. We might need to make some adjustment to expose this information, and update KFP UI to read new artifact bucket and endpoint accordingly. internal link: go/paste/4581363291258880", "@zijianjoy yes, it's expected that we need to update kfp api server and kfp persistence agent to make the new fields show up in the response to KFP UI.\r\nI've included the changes in https://github.com/kubeflow/pipelines/pull/6027. You can first test by editing images for ml-pipeline and ml-pipeline-persistence-agent deployments.\r\n\r\nTry:\r\n* gcr.io/ml-pipeline-test/d4e24cc2de6be0448c67508198f3163fb261c9a0/api-server\r\n* gcr.io/ml-pipeline-test/d4e24cc2de6be0448c67508198f3163fb261c9a0/persistenceagent", "@Bobgy Thank you Yuan! I replaced both images on a `1.7.0-alpha.2` cluster, and I ran the v1 pipeline `[Tutorial] Data passing in python component`. But the workflow output is still the same as before. Am I missing some other steps for applying this change?", "I might need to replace `workflow-controller` with https://github.com/kubeflow/pipelines/pull/6027/files#diff-51ad04b116a62f170e26dd77721528424f95fbc1df7adcd2a325bdaa100dac08 as well.", "Yes, you need to replace workflow-controller too.\r\nI'd recommend install by checking out my PR locally and `kubectl apply -k manifests/kustomize/env/dev`, it will have all the latest images for argo workflow controller.\r\n\r\nThen edit images manually for api server and persistence agent.\r\n\r\nI didn't have time to test yesterday, will have a try now", "I just tested this, and confirmed artifactRepositoryRef now contains the full spec for responses to UI.\r\n\r\nSee the example I got (edited version):\r\n> {\"status\":{\"artifactRepositoryRef\":{\"default\":true,\"artifactRepository\":{\"archiveLogs\":true,\"s3\":{\"endpoint\":\"minio-service.kubeflow:9000\",\"bucket\":\"mlpipeline\",\"insecure\":true,\"accessKeySecret\":{\"name\":\"mlpipeline-minio-artifact\",\"key\":\"accesskey\"},\"secretKeySecret\":{\"name\":\"mlpipeline-minio-artifact\",\"key\":\"secretkey\"},\"keyFormat\":\"artifacts/{{workflow.name}}/{{workflow.creationTimestamp.Y}}/{{workflow.creationTimestamp.m}}/{{workflow.creationTimestamp.d}}/{{pod.name}}\"}}}}}" ]
2021-06-28T13:37:29
2021-07-15T08:44:51
2021-07-15T08:44:51
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> kfp standalone * KFP version: 1.7.0-alpha.1 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ![image](https://user-images.githubusercontent.com/4957653/123645232-bdebc900-d858-11eb-9735-962616f42743.png) ### Expected result <!-- What should the correct behavior be? --> artifacts should show preview, visualizations should show up ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> Root cause: argo removed some information in workflow status. Previously, artifacts contain an object of information, but now artifacts only contain their keys. See workflows.argoproj.io/outputs annotation > workflows.argoproj.io/outputs: >- {"artifacts":[{"name":"main-logs","s3":{"key":"artifacts/file-passing-pipelines-xz8xs/2021/06/28/file-passing-pipelines-xz8xs-3422213888/main.log"}}]} --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5930/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5930/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5929
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5929/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5929/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5929/events
https://github.com/kubeflow/pipelines/issues/5929
931,571,858
MDU6SXNzdWU5MzE1NzE4NTg=
5,929
kubeflow 1.2.0 (kfctl_aws.v1.2.0.yaml) install failed to build LoadBalancer configuration on AWS (eks cluster).
{ "login": "amalendur", "id": 49721445, "node_id": "MDQ6VXNlcjQ5NzIxNDQ1", "avatar_url": "https://avatars.githubusercontent.com/u/49721445?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amalendur", "html_url": "https://github.com/amalendur", "followers_url": "https://api.github.com/users/amalendur/followers", "following_url": "https://api.github.com/users/amalendur/following{/other_user}", "gists_url": "https://api.github.com/users/amalendur/gists{/gist_id}", "starred_url": "https://api.github.com/users/amalendur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amalendur/subscriptions", "organizations_url": "https://api.github.com/users/amalendur/orgs", "repos_url": "https://api.github.com/users/amalendur/repos", "events_url": "https://api.github.com/users/amalendur/events{/privacy}", "received_events_url": "https://api.github.com/users/amalendur/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Will create under bug." ]
2021-06-28T13:26:14
2021-06-28T15:05:32
2021-06-28T15:05:13
NONE
null
/kind bug. I have deployed a EKS cluster and on top of that cluster I am trying to deploy "Kubeflow" through GitLab pipeline. In the pipeline script I have set the Kube-context before execute the Kubeflow config-file(kfctl_aws.v1.2.0.yaml). I have executed the steps in the following order; 1. Deploy EKS cluster with Nodegrop (deploy through terraform) 2. Download the Kubeflow config-file from "https://raw.githubusercontent.com/kubeflow/manifests/v1.2-branch/kfdef/kfctl_aws.v1.2.0.yaml" 3. Update the config-file with ``` apiVersion: kfdef.apps.kubeflow.org/v1 kind: KfDef metadata: namespace: kubeflow spec: applications: - kustomizeConfig: repoRef: name: manifests path: namespaces/base name: namespaces - kustomizeConfig: repoRef: name: manifests path: application/v3 name: application - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/istio-1-3-1-stack name: istio-stack - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cluster-local-gateway-1-3-1 name: cluster-local-gateway - kustomizeConfig: repoRef: name: manifests path: istio/istio/base name: istio - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cert-manager-crds name: cert-manager-crds - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cert-manager-kube-system-resources name: cert-manager-kube-system-resources - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/cert-manager name: cert-manager - kustomizeConfig: repoRef: name: manifests path: metacontroller/base name: metacontroller - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/oidc-authservice name: oidc-authservice - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/dex-auth name: dex - kustomizeConfig: repoRef: name: manifests path: admission-webhook/bootstrap/overlays/application name: bootstrap - kustomizeConfig: repoRef: name: manifests path: spark/spark-operator/overlays/application name: spark-operator - kustomizeConfig: repoRef: name: manifests path: stacks/aws name: kubeflow-apps - kustomizeConfig: repoRef: name: manifests path: aws/istio-ingress/base_v3 name: istio-ingress - kustomizeConfig: repoRef: name: manifests path: knative/installs/generic name: knative - kustomizeConfig: repoRef: name: manifests path: kfserving/installs/generic name: kfserving - kustomizeConfig: repoRef: name: manifests path: stacks/aws/application/spartakus name: spartakus plugins: - kind: KfAwsPlugin metadata: name: aws spec: auth: basicAuth: password: <password> username: admin region: <aws_default_region> #enablePodIamPolicy: true roles: - <eks_node_role> repos: - name: manifests uri: https://github.com/kubeflow/manifests/archive/v1.2.0.tar.gz version: v1.2-branch ``` ``` <aws_region> : AWS Region, where the cluster has been created. <eks_node_role> : Name of the IAM role for the nodegroup. <password> : Admin password ``` 4. Set the Kube-context; `aws eks --region ${AWS_REGION} update-kubeconfig --name ${CLUSTER_NAME}` 5. Set the environment; ``` wget https://github.com/kubeflow/kfctl/releases/download/v1.2.0/kfctl_v1.2.0-0-gbc038f9_linux.tar.gz --no-check-certificate && tar -xvzf kfctl_v1.2.0-0-gbc038f9_linux.tar.gz && chmod +x kfctl && cp ./kfctl /usr/local/bin/kfctl curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/aws-iam-authenticator && chmod +x ./aws-iam-authenticator && cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator export AWS_CLUSTER_NAME=${CLUSTER_NAME} export KF_NAME=${AWS_CLUSTER_NAME} KF_DIR = /opt/ar-usv-poc-plt kfctl build -f kfctl_aws.v1.2.0.yaml -V kfctl apply -f kfctl_aws.v1.2.0.yaml -V ``` Getting the following error: `E0628 14:56:25.659557 1 controller.go:217] kubebuilder/controller "msg"="Reconciler error" "error"="failed to build LoadBalancer configuration due to retrieval of subnets failed to resolve 2 qualified subnets. Subnets must contain the kubernetes.io/cluster/\u003ccluster name\u003e tag with a value of shared or owned and the kubernetes.io/role/elb tag signifying it should be used for ALBs Additionally, there must be at least 2 subnets with unique availability zones as required by ALBs. Either tag subnets to meet this requirement or use the subnets annotation on the ingress resource to explicitly call out what subnets to use for ALB creation. The subnets that did resolve were []" "controller"="alb-ingress-controller" "request"={"Namespace":"istio-system","Name":"istio-ingress"}`
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5929/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5929/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5925
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5925/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5925/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5925/events
https://github.com/kubeflow/pipelines/issues/5925
931,326,837
MDU6SXNzdWU5MzEzMjY4Mzc=
5,925
[feature] Multi-User Pipeline Definition Isolation
{ "login": "ILLUM1N0X", "id": 79043944, "node_id": "MDQ6VXNlcjc5MDQzOTQ0", "avatar_url": "https://avatars.githubusercontent.com/u/79043944?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ILLUM1N0X", "html_url": "https://github.com/ILLUM1N0X", "followers_url": "https://api.github.com/users/ILLUM1N0X/followers", "following_url": "https://api.github.com/users/ILLUM1N0X/following{/other_user}", "gists_url": "https://api.github.com/users/ILLUM1N0X/gists{/gist_id}", "starred_url": "https://api.github.com/users/ILLUM1N0X/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ILLUM1N0X/subscriptions", "organizations_url": "https://api.github.com/users/ILLUM1N0X/orgs", "repos_url": "https://api.github.com/users/ILLUM1N0X/repos", "events_url": "https://api.github.com/users/ILLUM1N0X/events{/privacy}", "received_events_url": "https://api.github.com/users/ILLUM1N0X/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Same problem here, we followed the [manifests](https://github.com/kubeflow/manifests#kubeflow-pipelines) and deployed pipeline with multi-user support, but still all pipelines are public to everyone.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "Not stale", "Any update?" ]
2021-06-28T08:45:54
2023-02-20T15:51:01
null
NONE
null
Hey guys, According to the [official documentation](https://www.kubeflow.org/docs/components/pipelines/multi-user/#current-limitations), Pipeline definitions are not isolated currently. **What feature would you like to see?** I would like to see Pipeline isolation similar to what we have in Notebooks/Runs/Experiments. **What is the use case or pain point?** The way Pipelines are exposed currently allows the deletion of Pipelines owned by other users. **Is there a workaround currently?** Not that I am aware of. Are there intentions to add this to future versions? Thanks <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5925/reactions", "total_count": 22, "+1": 22, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5925/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5924
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5924/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5924/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5924/events
https://github.com/kubeflow/pipelines/issues/5924
931,044,084
MDU6SXNzdWU5MzEwNDQwODQ=
5,924
[backend] cache-server cannot mount webhook-tls-certs
{ "login": "yuhuishi-convect", "id": 74702693, "node_id": "MDQ6VXNlcjc0NzAyNjkz", "avatar_url": "https://avatars.githubusercontent.com/u/74702693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yuhuishi-convect", "html_url": "https://github.com/yuhuishi-convect", "followers_url": "https://api.github.com/users/yuhuishi-convect/followers", "following_url": "https://api.github.com/users/yuhuishi-convect/following{/other_user}", "gists_url": "https://api.github.com/users/yuhuishi-convect/gists{/gist_id}", "starred_url": "https://api.github.com/users/yuhuishi-convect/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yuhuishi-convect/subscriptions", "organizations_url": "https://api.github.com/users/yuhuishi-convect/orgs", "repos_url": "https://api.github.com/users/yuhuishi-convect/repos", "events_url": "https://api.github.com/users/yuhuishi-convect/events{/privacy}", "received_events_url": "https://api.github.com/users/yuhuishi-convect/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "Not solving the issue, but failed to reproduce with minikube:\r\n\r\n```\r\ndocker system prune --all \r\nminikube start --cpus 6 --memory 12288 --disk-size=120g --extra-config=apiserver.service-account-issuer=api --extra-config=apiserver.service-account-signing-key-file=/var/lib/minikube/certs/apiserver.key --extra-config=apiserver.service-account-api-audiences=api\r\nexport PIPELINE_VERSION=1.2.0\r\nkubectl apply -k \"github.com/kubeflow/pipelines/manifests/kustomize/cluster-scoped-resources?ref=$PIPELINE_VERSION\"\r\nkubectl wait --for condition=established --timeout=60s crd/applications.app.k8s.io\r\nkubectl apply -k \"github.com/kubeflow/pipelines/manifests/kustomize/env/platform-agnostic-pns?ref=$PIPELINE_VERSION\"\r\n\r\n```", "Can you please check and post the `cache-deployer` logs?", "`cache-deployer` log\r\n\r\n```\r\n$ k logs cache-deployer-deployment-79f5c8d4f4-tr7z9 -n kubeflow\r\n+ echo 'Start deploying cache service to existing cluster:'\r\nStart deploying cache service to existing cluster:\r\n+ NAMESPACE=kubeflow\r\n+ MUTATING_WEBHOOK_CONFIGURATION_NAME=cache-webhook-kubeflow\r\n+ WEBHOOK_SECRET_NAME=webhook-server-tls\r\n+ mkdir -p /root/bin\r\n+ export 'PATH=/root/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'\r\n+ kubectl version --output json\r\n+ jq --raw-output '(.serverVersion.major + \".\" + .serverVersion.minor)'\r\n+ tr -d '\"+'\r\nUnable to connect to the server: dial tcp 10.96.0.1:443: i/o timeout\r\n+ server_version_major_minor=.\r\n+ curl -s https://storage.googleapis.com/kubernetes-release/release/stable-..txt\r\n+ stable_build_version=\r\n+ kubectl_url=https://storage.googleapis.com/kubernetes-release/release//bin/linux/amd64/kubectl\r\n+ curl -L -o /root/bin/kubectl https://storage.googleapis.com/kubernetes-release/release//bin/linux/amd64/kubectl\r\n % Total % Received % Xferd Average Speed Time Time Time Current\r\n Dload Upload Total Spent Left Speed\r\n 0 0 0 0 0 0 0 0 --:--:-- 0:00:04 --:--:-- 0curl: (6) Could not resolve host: storage.googleapis.com\r\n+ chmod +x /root/bin/kubectl\r\nchmod: /root/bin/kubectl: No such file or directory\r\n+ true\r\n+ kubectl get mutatingwebhookconfigurations cache-webhook-kubeflow --namespace kubeflow --ignore-not-found\r\n```\r\n\r\n> Can you please check and post the `cache-deployer` logs?\r\n\r\n", "Could you try restarting the `cache-deployer`? \r\nIt had tried to get your cluster version `kubectl version --output json`, but failed to connect. Then it failed to download the proper kubectl version.", "similar issue but its failing with permissions on `/`\r\n```kubectl logs pods/cache-deployer-deployment-66f7dd69b4-lbt7v -n kubeflow -c main\r\n+ echo 'Start deploying cache service to existing cluster:'\r\n+ NAMESPACE=kubeflow\r\nStart deploying cache service to existing cluster:\r\n+ MUTATING_WEBHOOK_CONFIGURATION_NAME=cache-webhook-kubeflow\r\n+ WEBHOOK_SECRET_NAME=webhook-server-tls\r\n+ mkdir -p /tmp/bin\r\n+ export 'PATH=/tmp/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'\r\n+ kubectl version --output json\r\n+ jq --raw-output '(.serverVersion.major + \".\" + .serverVersion.minor)'\r\n+ tr -d '\"+'\r\n+ server_version_major_minor=1.20\r\n+ curl -s https://storage.googleapis.com/kubernetes-release/release/stable-1.20.txt\r\n+ stable_build_version=v1.20.9\r\n+ kubectl_url=https://storage.googleapis.com/kubernetes-release/release/v1.20.9/bin/linux/amd64/kubectl\r\n+ curl -L -o /tmp/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.20.9/bin/linux/amd64/kubectl\r\n % Total % Received % Xferd Average Speed Time Time Time Current\r\n Dload Upload Total Spent Left Speed\r\n100 38.3M 100 38.3M 0 0 21.8M 0 0:00:01 0:00:01 --:--:-- 21.8M\r\n+ chmod +x /tmp/bin/kubectl\r\n/kfp/cache/deployer/deploy-cache-service.sh: line 47: can't create webhooks.txt: Permission denied\r\n----\r\nImage: gcr.io/ml-pipeline/cache-deployer:1.5.1\r\nImage ID: gcr.io/ml-pipeline/cache-deployer@sha256:88d07dd205577ca3715fc93b01a1ec251028e6b74b13681c0aeb64ea91d5e793\r\n```\r\n[temp-work around ](https://github.com/vgopal/pipelines/commit/83f97cf136bbeee1f755f9599d73c99791b1a5a7) worked for me.. also had to explicity set [env HOME in deployment](https://github.com/vgopal/manifests/commit/3e0d827530a485b96e874a8848e18e25fbd0c0c5) ", "I have a similar issue, but I don’t know how to solve it. Can someone help me?\r\n![image](https://user-images.githubusercontent.com/13248089/129714165-322d2a52-d89e-4cc1-a12b-57e15998458c.png)\r\n![image](https://user-images.githubusercontent.com/13248089/129714187-f3d89d07-c57a-4b5a-b170-886302b3aa4b.png)\r\n", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-06-27T23:35:18
2022-03-03T00:05:11
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? [Deployed locally on a kind cluster](https://www.kubeflow.org/docs/components/pipelines/installation/localcluster-deployment/#uninstalling-kubeflow-pipelines) kind version: `kind v0.9.0 go1.15.2 linux/amd64` <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.2.0 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: 1.2.0 <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ``` export PIPELINE_VERSION=1.2.0 kubectl apply -k "github.com/kubeflow/pipelines/manifests/kustomize/cluster-scoped-resources?ref=$PIPELINE_VERSION" kubectl wait --for condition=established --timeout=60s crd/applications.app.k8s.io kubectl apply -k "github.com/kubeflow/pipelines/manifests/kustomize/env/platform-agnostic-pns?ref=$PIPELINE_VERSION" ``` The `cache-server` pod gets stuck in the `ContainerCreating` stage. Logs ``` Name: cache-server-b68649ff-dl89c Namespace: kubeflow Priority: 0 Node: kind-control-plane/172.18.0.2 Start Time: Sun, 27 Jun 2021 16:22:24 -0700 Labels: app=cache-server application-crd-id=kubeflow-pipelines pod-template-hash=b68649ff Annotations: <none> Status: Pending IP: IPs: <none> Controlled By: ReplicaSet/cache-server-b68649ff Containers: server: Container ID: Image: gcr.io/ml-pipeline/cache-server:1.3.0 Image ID: Port: 8443/TCP Host Port: 0/TCP Args: --db_driver=$(DBCONFIG_DRIVER) --db_host=$(DBCONFIG_HOST_NAME) --db_port=$(DBCONFIG_PORT) --db_name=$(DBCONFIG_DB_NAME) --db_user=$(DBCONFIG_USER) --db_password=$(DBCONFIG_PASSWORD) --namespace_to_watch=$(NAMESPACE_TO_WATCH) State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: DBCONFIG_DRIVER: mysql DBCONFIG_DB_NAME: <set to the key 'cacheDb' of config map 'pipeline-install-config-m2k6bmc5m7'> Optional: false DBCONFIG_HOST_NAME: <set to the key 'dbHost' of config map 'pipeline-install-config-m2k6bmc5m7'> Optional: false DBCONFIG_PORT: <set to the key 'dbPort' of config map 'pipeline-install-config-m2k6bmc5m7'> Optional: false DBCONFIG_USER: <set to the key 'username' in secret 'mysql-secret-fd5gktm75t'> Optional: false DBCONFIG_PASSWORD: <set to the key 'password' in secret 'mysql-secret-fd5gktm75t'> Optional: false NAMESPACE_TO_WATCH: kubeflow (v1:metadata.namespace) Mounts: /etc/webhook/certs from webhook-tls-certs (ro) /var/run/secrets/kubernetes.io/serviceaccount from kubeflow-pipelines-cache-token-bpbng (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: webhook-tls-certs: Type: Secret (a volume populated by a Secret) SecretName: webhook-server-tls Optional: false kubeflow-pipelines-cache-token-bpbng: Type: Secret (a volume populated by a Secret) SecretName: kubeflow-pipelines-cache-token-bpbng Optional: false QoS Class: BestEffort Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 7m16s default-scheduler Successfully assigned kubeflow/cache-server-b68649ff-dl89c to kind-control-plane Warning FailedMount 65s (x11 over 7m16s) kubelet MountVolume.SetUp failed for volume "webhook-tls-certs" : secret "webhook-server-tls" not found Warning FailedMount 43s (x3 over 5m14s) kubelet Unable to attach or mount volumes: unmounted volumes=[webhook-tls-certs], unattached volumes=[webhook-tls-certs kubeflow-pipelines-cache-token-bpbng]: timed out waiting for the condition ``` Looks like it was not able to find `webhook-tls-certs` ### Expected result All pods shall be in running status <!-- What should the correct behavior be? --> ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5924/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/kubeflow/pipelines/issues/5924/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5921
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5921/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5921/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5921/events
https://github.com/kubeflow/pipelines/issues/5921
930,645,145
MDU6SXNzdWU5MzA2NDUxNDU=
5,921
[sdk] run_id.run.error is None
{ "login": "horatiu-negutoiu", "id": 19393629, "node_id": "MDQ6VXNlcjE5MzkzNjI5", "avatar_url": "https://avatars.githubusercontent.com/u/19393629?v=4", "gravatar_id": "", "url": "https://api.github.com/users/horatiu-negutoiu", "html_url": "https://github.com/horatiu-negutoiu", "followers_url": "https://api.github.com/users/horatiu-negutoiu/followers", "following_url": "https://api.github.com/users/horatiu-negutoiu/following{/other_user}", "gists_url": "https://api.github.com/users/horatiu-negutoiu/gists{/gist_id}", "starred_url": "https://api.github.com/users/horatiu-negutoiu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/horatiu-negutoiu/subscriptions", "organizations_url": "https://api.github.com/users/horatiu-negutoiu/orgs", "repos_url": "https://api.github.com/users/horatiu-negutoiu/repos", "events_url": "https://api.github.com/users/horatiu-negutoiu/events{/privacy}", "received_events_url": "https://api.github.com/users/horatiu-negutoiu/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "This issue has been automatically closed because it has not had recent activity. Please comment \"/reopen\" to reopen it.\n" ]
2021-06-26T08:21:36
2022-03-03T04:05:11
2022-03-03T04:05:11
NONE
null
### Environment * KFP version: 1.4.1 * KFP SDK version: 1.4.0 * All dependencies version: kfp 1.4.0 kfp-pipeline-spec 0.1.8 kfp-server-api 1.6.0 ### Steps to reproduce 1. Write a sample pipeline containing a component that fails on purpose and an exit handler. The parameter passed to the exit handler contains the run_id. ```python import kfp from kfp import dsl, components @components.create_component_from_func def echo_msg(msg: str): """Echo a message by parameter.""" print(msg) @components.create_component_from_func def raise_error(): """Raises an error on purpose.""" raise AssertionError("Data was unexpected.") @dsl.pipeline( name='exit-handler-test', description="Testing kubeflow's exit handler." ) def pipeline_exit_handler(echoed_message: str = 'hello world'): """A sample pipeline showing exit handler.""" exit_task = echo_msg(kfp.dsl.RUN_ID_PLACEHOLDER) with dsl.ExitHandler(exit_task): raise_error() if __name__ == '__main__': kfp.compiler.Compiler().compile(pipeline_exit_handler, __file__ + '.yaml') ``` 2. Upload the pipeline to Kubeflow (in my case, GCP's AI Pipelines). 3. Create an experiment, then a run using the pipeline. 4. After the run completes (and fails accordingly), copy the run_id from the exit handler. 5. Obtain run info: ```python import kfp kfp_client = kfp.Client(host="https://some-ai-pipelines-url.pipelines.googleusercontent.com/") run_info = kfp_client.runs.get_run("<run-id-copied-from-exit-handler>") assert run_info.run.status == 'Failed' # the run status is correct print(run_info.run.error) ``` ### Expected result I would expect `run_info.run.error` to contain some sort of "AssertionError Data was unexpected" message but it doesn't, it's just None. In this case, the exit handler is compiled from a function but, for the sake of code re-use, the code in step 5 would probably become an image. I'm not sure if the behaviour is the same in that case. I'm trying to find a way to extract the reason why a pipeline failed and post it on slack. - if the runs are recurring, i imagine it _can_ be done from the exit handler - if the runs are one-offs and are being started by something like Airflow, Airflow should be able to get the run info and the error just as well. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5921/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5921/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5920
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5920/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5920/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5920/events
https://github.com/kubeflow/pipelines/issues/5920
930,631,422
MDU6SXNzdWU5MzA2MzE0MjI=
5,920
Enable sorting by status in the UI
{ "login": "midhun1998", "id": 24776450, "node_id": "MDQ6VXNlcjI0Nzc2NDUw", "avatar_url": "https://avatars.githubusercontent.com/u/24776450?v=4", "gravatar_id": "", "url": "https://api.github.com/users/midhun1998", "html_url": "https://github.com/midhun1998", "followers_url": "https://api.github.com/users/midhun1998/followers", "following_url": "https://api.github.com/users/midhun1998/following{/other_user}", "gists_url": "https://api.github.com/users/midhun1998/gists{/gist_id}", "starred_url": "https://api.github.com/users/midhun1998/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/midhun1998/subscriptions", "organizations_url": "https://api.github.com/users/midhun1998/orgs", "repos_url": "https://api.github.com/users/midhun1998/repos", "events_url": "https://api.github.com/users/midhun1998/events{/privacy}", "received_events_url": "https://api.github.com/users/midhun1998/received_events", "type": "User", "site_admin": false }
[ { "id": 930476737, "node_id": "MDU6TGFiZWw5MzA0NzY3Mzc=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/help%20wanted", "name": "help wanted", "color": "db1203", "default": true, "description": "The community is welcome to contribute." }, { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2152751095, "node_id": "MDU6TGFiZWwyMTUyNzUxMDk1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/frozen", "name": "lifecycle/frozen", "color": "ededed", "default": false, "description": null } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "/lifecycle frozen", "We can use `filter` to obtain runs by specifying states: https://github.com/kubeflow/pipelines/blob/master/backend/api/filter.proto#L81-L88", "Hi @zijianjoy . Thanks for the hint. Yes we can filter and show it in in the UI. I would let someone pick this up who is interested in contributing to the frontend. \n\n/help\n", "@midhun1998: \n\tThis request has been marked as needing help from a contributor.\n\nPlease ensure the request meets the requirements listed [here](https://git.k8s.io/community/contributors/guide/help-wanted.md).\n\nIf this request no longer meets these requirements, the label can be removed\nby commenting with the `/remove-help` command.\n\n\n<details>\n\nIn response to [this](https://github.com/kubeflow/pipelines/issues/5920):\n\n>Hi @zijianjoy . Thanks for the hint. Yes we can filter and show it in in UI. I would let someone pick this up who is intrested in frontend. \n>\n>/help\n>\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n</details>" ]
2021-06-26T06:52:17
2021-10-12T14:40:19
null
MEMBER
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> More of a productivity improvement. 🚀 /area frontend ### What feature would you like to see? Enable sorting by status for runs under the Experiment and Runs section of UI which would filter all Running pipelines to the top. <!-- Provide a description of this feature and the user experience. --> ### What is the use case or pain point? Currently, the Kubeflow pipeline experiment UI only sorts based on start time. But sometimes when there are a lot of pipelines running in the same experiment the running ones tend to be below completed ones which makes it difficult to search for a running pipeline especially when recurring runs have been configured. This feature will enable users to find and track pipelines quicker. <!-- It helps us understand the benefit of this feature for your use case. --> ### Is there a workaround currently? No workarounds were found. At present, we manually search for the running ones. <!-- Without this feature, how do you accomplish your task today? --> --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5920/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5920/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5917
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5917/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5917/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5917/events
https://github.com/kubeflow/pipelines/issues/5917
929,799,663
MDU6SXNzdWU5Mjk3OTk2NjM=
5,917
Pass pandas dataframe from one component to other without writing and reading
{ "login": "motilalmeher", "id": 49094780, "node_id": "MDQ6VXNlcjQ5MDk0Nzgw", "avatar_url": "https://avatars.githubusercontent.com/u/49094780?v=4", "gravatar_id": "", "url": "https://api.github.com/users/motilalmeher", "html_url": "https://github.com/motilalmeher", "followers_url": "https://api.github.com/users/motilalmeher/followers", "following_url": "https://api.github.com/users/motilalmeher/following{/other_user}", "gists_url": "https://api.github.com/users/motilalmeher/gists{/gist_id}", "starred_url": "https://api.github.com/users/motilalmeher/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/motilalmeher/subscriptions", "organizations_url": "https://api.github.com/users/motilalmeher/orgs", "repos_url": "https://api.github.com/users/motilalmeher/repos", "events_url": "https://api.github.com/users/motilalmeher/events{/privacy}", "received_events_url": "https://api.github.com/users/motilalmeher/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
null
[]
null
[ "I think argo does not support panda frame. As a workaround, can the user store panda dataframe in an artifact? @Bobgy Can you confirm?", "Thanks @capri-xiyue for the heads up!\n\nIIUC, @motilalmeher is already writing the data to a storage as an artifact.\n\nFor clarification, the major reason data have to be in a storage between steps is that each step may run in a different node, so there has to be a way to pass the data.\n\nTherefore @motilalmeher 's request implies that we need a way to run multiple components in one single Pod and avoid intermediate data passing using a remote storage.\n\nI believe with argo v3.1 emissary executor and the support for container DAG in a single Pod, we are much closer to be able to implement this feature.\n\nI'd like to know more feedback both from you and others about possible use cases for this feature.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "This issue has been automatically closed because it has not had recent activity. Please comment \"/reopen\" to reopen it.\n" ]
2021-06-25T03:44:07
2022-03-03T04:05:12
2022-03-03T04:05:12
NONE
null
I am currently working with a large dataset and in my pipeline there are many component. I want to make the process fast. Currently I am passing the data from one component to ther by storing it some storage and passing the path so that I can read the data from other component. Is there any way to pass the Pandas dataframe like passing strings.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5917/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5917/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5915
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5915/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5915/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5915/events
https://github.com/kubeflow/pipelines/issues/5915
929,334,289
MDU6SXNzdWU5MjkzMzQyODk=
5,915
[bug] `from google_cloud_pipeline_components import aiplatform` results in TypeError: 'NoneType' object is not iterable
{ "login": "A-Pot", "id": 48222322, "node_id": "MDQ6VXNlcjQ4MjIyMzIy", "avatar_url": "https://avatars.githubusercontent.com/u/48222322?v=4", "gravatar_id": "", "url": "https://api.github.com/users/A-Pot", "html_url": "https://github.com/A-Pot", "followers_url": "https://api.github.com/users/A-Pot/followers", "following_url": "https://api.github.com/users/A-Pot/following{/other_user}", "gists_url": "https://api.github.com/users/A-Pot/gists{/gist_id}", "starred_url": "https://api.github.com/users/A-Pot/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/A-Pot/subscriptions", "organizations_url": "https://api.github.com/users/A-Pot/orgs", "repos_url": "https://api.github.com/users/A-Pot/repos", "events_url": "https://api.github.com/users/A-Pot/events{/privacy}", "received_events_url": "https://api.github.com/users/A-Pot/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "sasha-gitg", "id": 44654632, "node_id": "MDQ6VXNlcjQ0NjU0NjMy", "avatar_url": "https://avatars.githubusercontent.com/u/44654632?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sasha-gitg", "html_url": "https://github.com/sasha-gitg", "followers_url": "https://api.github.com/users/sasha-gitg/followers", "following_url": "https://api.github.com/users/sasha-gitg/following{/other_user}", "gists_url": "https://api.github.com/users/sasha-gitg/gists{/gist_id}", "starred_url": "https://api.github.com/users/sasha-gitg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sasha-gitg/subscriptions", "organizations_url": "https://api.github.com/users/sasha-gitg/orgs", "repos_url": "https://api.github.com/users/sasha-gitg/repos", "events_url": "https://api.github.com/users/sasha-gitg/events{/privacy}", "received_events_url": "https://api.github.com/users/sasha-gitg/received_events", "type": "User", "site_admin": false }
[ { "login": "SinaChavoshi", "id": 20114005, "node_id": "MDQ6VXNlcjIwMTE0MDA1", "avatar_url": "https://avatars.githubusercontent.com/u/20114005?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SinaChavoshi", "html_url": "https://github.com/SinaChavoshi", "followers_url": "https://api.github.com/users/SinaChavoshi/followers", "following_url": "https://api.github.com/users/SinaChavoshi/following{/other_user}", "gists_url": "https://api.github.com/users/SinaChavoshi/gists{/gist_id}", "starred_url": "https://api.github.com/users/SinaChavoshi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SinaChavoshi/subscriptions", "organizations_url": "https://api.github.com/users/SinaChavoshi/orgs", "repos_url": "https://api.github.com/users/SinaChavoshi/repos", "events_url": "https://api.github.com/users/SinaChavoshi/events{/privacy}", "received_events_url": "https://api.github.com/users/SinaChavoshi/received_events", "type": "User", "site_admin": false }, { "login": "sasha-gitg", "id": 44654632, "node_id": "MDQ6VXNlcjQ0NjU0NjMy", "avatar_url": "https://avatars.githubusercontent.com/u/44654632?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sasha-gitg", "html_url": "https://github.com/sasha-gitg", "followers_url": "https://api.github.com/users/sasha-gitg/followers", "following_url": "https://api.github.com/users/sasha-gitg/following{/other_user}", "gists_url": "https://api.github.com/users/sasha-gitg/gists{/gist_id}", "starred_url": "https://api.github.com/users/sasha-gitg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sasha-gitg/subscriptions", "organizations_url": "https://api.github.com/users/sasha-gitg/orgs", "repos_url": "https://api.github.com/users/sasha-gitg/repos", "events_url": "https://api.github.com/users/sasha-gitg/events{/privacy}", "received_events_url": "https://api.github.com/users/sasha-gitg/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @sasha-gitg @SinaChavoshi ", "I managed to work around this by using python 3.9.6. No idea why that would affect this but it appears to help.", "This issue was fixed by https://github.com/googleapis/python-aiplatform/pull/494 " ]
2021-06-24T15:13:30
2021-08-03T17:52:59
2021-08-03T17:52:59
NONE
null
When I try to import `aiplatform` from `google_cloud_pipeline_components`, I get a `TypeError`, and the import fails. ### What steps did you take To reproduce the issue: 1. Create a new virtual environment (Python version 3.6.9): `python3 -m virtualenv gcp_aip` 2. Activate the environment: `source gcp_aip/bin/activate` 3. Install per instructions: `pip install -U google-cloud-pipeline-components` 4. Start a new Python session and try to import aiplatform: `python3 -c 'from google_cloud_pipeline_components import aiplatform'` ### What happened: I get a `TypeError`, specifically the following: ``` Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/ubuntu/.env/gcp_aip/lib/python3.6/site-packages/google_cloud_pipeline_components/aiplatform/__init__.py", line 68, in <module> aiplatform_sdk.ImageDataset.export_data, File "/home/ubuntu/.env/gcp_aip/lib/python3.6/site-packages/google_cloud_pipeline_components/aiplatform/utils.py", line 465, in convert_method_to_component output_type TypeError: 'NoneType' object is not iterable ``` ### What did you expect to happen: I expected the import to succeed. ### Environment: Ubuntu 18.04 / Python 3.6.9. I suspect this may have something to do with the version of Python. The error above happens with Python 3.6.9. I tried the same process with Python 3.8.5, and I did not get the above error. ### Labels /area components --- Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5915/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5915/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5913
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5913/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5913/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5913/events
https://github.com/kubeflow/pipelines/issues/5913
929,046,062
MDU6SXNzdWU5MjkwNDYwNjI=
5,913
Mask sensitive parameters that is passed to pipeline
{ "login": "raghavendramanandi", "id": 1164122, "node_id": "MDQ6VXNlcjExNjQxMjI=", "avatar_url": "https://avatars.githubusercontent.com/u/1164122?v=4", "gravatar_id": "", "url": "https://api.github.com/users/raghavendramanandi", "html_url": "https://github.com/raghavendramanandi", "followers_url": "https://api.github.com/users/raghavendramanandi/followers", "following_url": "https://api.github.com/users/raghavendramanandi/following{/other_user}", "gists_url": "https://api.github.com/users/raghavendramanandi/gists{/gist_id}", "starred_url": "https://api.github.com/users/raghavendramanandi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/raghavendramanandi/subscriptions", "organizations_url": "https://api.github.com/users/raghavendramanandi/orgs", "repos_url": "https://api.github.com/users/raghavendramanandi/repos", "events_url": "https://api.github.com/users/raghavendramanandi/events{/privacy}", "received_events_url": "https://api.github.com/users/raghavendramanandi/received_events", "type": "User", "site_admin": false }
[ { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
null
[]
null
[ "Hi @raghavendramanandi, parameters are by design public.\r\nYou should use secret solutions for this, e.g. Kubernetes secret.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "This issue has been automatically closed because it has not had recent activity. Please comment \"/reopen\" to reopen it.\n" ]
2021-06-24T09:44:54
2022-03-03T03:05:32
2022-03-03T03:05:32
NONE
null
While running the pipeline, we can specify parameters. Is there a way to mask or show **** for those parameter values. Just like how a password field would look like.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5913/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5913/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5912
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5912/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5912/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5912/events
https://github.com/kubeflow/pipelines/issues/5912
928,824,724
MDU6SXNzdWU5Mjg4MjQ3MjQ=
5,912
[sdk compiler] Compile is successed, but VertexPipelines validation is failed
{ "login": "TrsNium", "id": 11388424, "node_id": "MDQ6VXNlcjExMzg4NDI0", "avatar_url": "https://avatars.githubusercontent.com/u/11388424?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TrsNium", "html_url": "https://github.com/TrsNium", "followers_url": "https://api.github.com/users/TrsNium/followers", "following_url": "https://api.github.com/users/TrsNium/following{/other_user}", "gists_url": "https://api.github.com/users/TrsNium/gists{/gist_id}", "starred_url": "https://api.github.com/users/TrsNium/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TrsNium/subscriptions", "organizations_url": "https://api.github.com/users/TrsNium/orgs", "repos_url": "https://api.github.com/users/TrsNium/repos", "events_url": "https://api.github.com/users/TrsNium/events{/privacy}", "received_events_url": "https://api.github.com/users/TrsNium/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "```\r\n \"pipelineInfo\": {\r\n \"name\": \"test\"\r\n },\r\n \"root\": {\r\n \"dag\": {\r\n \"tasks\": {\r\n \"for-loop-1\": {\r\n \"componentRef\": {\r\n \"name\": \"comp-for-loop-1\"\r\n },\r\n \"dependentTasks\": [\r\n \"get-parallel-offsets\"\r\n ],\r\n \"inputs\": {\r\n \"artifacts\": {\r\n \"pipelineparam--get-parallel-offsets-offsets\": {\r\n \"taskOutputArtifact\": {\r\n \"outputArtifactKey\": \"offsets\",\r\n \"producerTask\": \"get-parallel-offsets\"\r\n }\r\n }\r\n }\r\n },\r\n \"parameterIterator\": {\r\n \"itemInput\": \"pipelineparam--get-parallel-offsets-offsets-loop-item\",\r\n \"items\": {\r\n \"inputParameter\": \"pipelineparam--get-parallel-offsets-offsets\"\r\n }\r\n },\r\n \"taskInfo\": {\r\n \"name\": \"for-loop-1\"\r\n }\r\n },\r\n \"get-parallel-offsets\": {\r\n \"componentRef\": {\r\n \"name\": \"comp-get-parallel-offsets\"\r\n },\r\n \"inputs\": {\r\n \"parameters\": {\r\n \"parallel_num\": {\r\n \"componentInputParameter\": \"parallel_num\"\r\n },\r\n \"rows_count\": {\r\n \"runtimeValue\": {\r\n \"constantValue\": {\r\n \"intValue\": \"3000\"\r\n }\r\n }\r\n }\r\n }\r\n },\r\n \"taskInfo\": {\r\n \"name\": \"get-parallel-offsets\"\r\n }\r\n }\r\n }\r\n },\r\n \"inputDefinitions\": {\r\n \"parameters\": {\r\n \"parallel_num\": {\r\n \"type\": \"INT\"\r\n }\r\n }\r\n }\r\n },\r\n \"schemaVersion\": \"2.0.0\",\r\n \"sdkVersion\": \"kfp-1.6.3\"\r\n },\r\n \"runtimeConfig\": {\r\n \"parameters\": {\r\n \"parallel_num\": {\r\n \"intValue\": \"10\"\r\n }\r\n }\r\n }\r\n}\r\n```\r\n~I was looking at the compiled dsl and it looks like there is no dependency on print_op.~\r\n~This has nothing to do with the above error, but I think it might be a problem later as an error~\r\n```\r\n{\r\n \"pipelineSpec\": {\r\n \"components\": {\r\n \"comp-for-loop-1\": {\r\n \"dag\": {\r\n \"tasks\": {\r\n \"print-op\": {\r\n \"componentRef\": {\r\n \"name\": \"comp-print-op\"\r\n },\r\n \"inputs\": {\r\n \"parameters\": {\r\n \"message\": {\r\n \"componentInputParameter\": \"pipelineparam--get-parallel-offsets-offsets-loop-item\",\r\n \"parameterExpressionSelector\": \"parseJson(string_value)[\\\"upper_bounds\\\"]\"\r\n }\r\n }\r\n },\r\n \"taskInfo\": {\r\n \"name\": \"print-op\"\r\n }\r\n }\r\n }\r\n },\r\n \"inputDefinitions\": {\r\n \"artifacts\": {\r\n \"pipelineparam--get-parallel-offsets-offsets\": {\r\n \"artifactType\": {\r\n \"schemaTitle\": \"system.Artifact\"\r\n }\r\n }\r\n },\r\n \"parameters\": {\r\n \"pipelineparam--get-parallel-offsets-offsets-loop-item\": {\r\n \"type\": \"STRING\"\r\n }\r\n }\r\n }\r\n },\r\n```\r\nThe above comment is my mistake.\r\nIt was defined as a dag in the component.", "cc @chensun ", "```python\r\nfrom typing import NamedTuple\r\n\r\n@component\r\ndef multiple_return_values_example(a: float, b: float) -> NamedTuple(\r\n 'ExampleOutputs',\r\n [\r\n ('sum', float),\r\n ('product', float)\r\n ]):\r\n \"\"\"Example function that demonstrates how to return multiple values.\"\"\" \r\n sum_value = a + b\r\n product_value = a * b\r\n\r\n from collections import namedtuple\r\n example_output = namedtuple('ExampleOutputs', ['sum', 'product'])\r\n return example_output(sum_value, product_value)\r\n```\r\n\r\n```json\r\n \"comp-multiple-return-values-example\": {\r\n \"executorLabel\": \"exec-multiple-return-values-example\",\r\n \"inputDefinitions\": {\r\n \"parameters\": {\r\n \"a\": {\r\n \"type\": \"DOUBLE\"\r\n },\r\n \"b\": {\r\n \"type\": \"DOUBLE\"\r\n }\r\n }\r\n },\r\n \"outputDefinitions\": {\r\n \"parameters\": {\r\n \"product\": {\r\n \"type\": \"DOUBLE\"\r\n },\r\n \"sum\": {\r\n \"type\": \"DOUBLE\"\r\n }\r\n }\r\n }\r\n },\r\n```\r\n\r\nWhen I used the example function, I found that the outputDefinitions field of the compiled json was set to parameters instead of artifact information.\r\n\r\nAnd in v1 compiler, I notice that both artifacts and parameters are defined as outputs.", "Could you please share any v2 compiler or compiled dsl specification to make it easier to contribute?", "> And in v1 compiler, I notice that both artifacts and parameters are defined as outputs.\r\n\r\n> Could you please share any v2 compiler or compiled dsl specification to make it easier to contribute?\r\n\r\nHi @TrsNium, thanks for opening this issue with all the details. In v2, we have the distinction between parameters and artifacts, and they have different behaviors. The decision of parameter vs. artifact is based on the user declared types. Here's our latest doc touching this topic: https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility/#building-pipelines-using-the-kubeflow-pipelines-sdk-v2\r\nIn short, inputs/outputs typed as str, int, float, bool, dict, or list are treated as parameters, and everything else including untyped inputs/outputs are treated as artifacts. Apologize that our doc isn't super clear on this topic. We will have some additional docs available soon.\r\n\r\nBack to your example, your output `offsets` seems to be an output parameter. Can you please try type it as `dict` or `typing.Dict`? I think that will probably solve this issue.\r\n\r\nThat being said, I realize that we probably have a bug here that we cannot define output artifact using `NamedTuple` return annotation, but compiler doesn't throw any error in such case. We'll try to fix that separately.\r\n\r\n\r\n ", "Hi @chensun \r\n`typing.Dict` and `typing.List` is not worked, so fix that in this PR https://github.com/kubeflow/pipelines/pull/5979 .", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-06-24T04:03:35
2022-03-03T02:05:37
null
CONTRIBUTOR
null
### Environment * KFP version: GCP VertexAi Pipelines * KFP SDK version: build sdk on master branch ### Steps to reproduce When I try to loop through the following python function to get the output, the validation of Vertex Pipelines fails. ```python import kfp.components as comp from kfp.v2.dsl import component @component def print_op(message: str): """Prints a message.""" print(message) def get_parallel_offsets( rows_count: int, parallel_num: int, ) -> NamedTuple( "Offsets", [("offsets", "Offsets")], ): """ Get the offset of each parallel (from the number of rows and the number of parallels in the table.) Parameters ---------- rows_count: int number of bigquery table's rows parallel_num: int number of parallels """ from collections import namedtuple import math import json if rows_count % parallel_num == 0: offset_step = limit = int(rows_count / parallel_num) else: offset_step = limit = math.ceil(rows_count / parallel_num) # NOTE: When using `json.dump`, if a number with a large number of digits is included, the number will be converted to Scientific Notation format, so convert it to a string type once. offsets = [ {"index": str(index), "offset": str(offset), "upper_bounds": str(offset+limit)} for index, offset in enumerate(range(0, rows_count, offset_step)) ] output = namedtuple("Offsets", ["offsets"]) return output(json.dumps(offsets)) @dsl.pipeline( name="test", ) def test( parallel_num: int=10, ) -> None: get_parallel_offsets_op = comp.create_component_from_func( func=get_parallel_offsets, base_image="python:alpine" ) get_parallel_offsets_task = get_parallel_offsets_op( #get_count_rows_task.output, parallel_num 3000, parallel_num ) with dsl.ParallelFor( get_parallel_offsets_task.outputs["offsets"] ) as offset: print_op(offset.offset) print_op(offset.index) ``` error message ``` File "/Users/takuya.hirata/.pyenv/versions/3.7.1rc1/lib/python3.7/site-packages/kfp-1.6.3-py3.7.egg/kfp/v2/google/client/client.py", line 344, in create_run_from_job_spec job_id=job_id, File "/Users/takuya.hirata/.pyenv/versions/3.7.1rc1/lib/python3.7/site-packages/kfp-1.6.3-py3.7.egg/kfp/v2/google/client/client.py", line 228, in _submit_job response = request.execute() File "/Users/takuya.hirata/.pyenv/versions/3.7.1rc1/lib/python3.7/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper return wrapped(*args, **kwargs) File "/Users/takuya.hirata/.pyenv/versions/3.7.1rc1/lib/python3.7/site-packages/googleapiclient/http.py", line 915, in execute raise HttpError(resp, content, uri=self.uri) googleapiclient.errors.HttpError: <HttpError 400 when requesting https://asia-east1-aiplatform.googleapis.com/v1beta1/projects/zozo-mlops-dev/locations/asia-east1/pipelineJobs?pipelineJobId=test-20210624125423&alt=json returned "Iterator items parameter 'pipelineparam--get-parallel-offsets-offsets' cannot be found from the inputs of the task 'for-loop-1'.". Details: "Iterator items parameter 'pipelineparam--get-parallel-offsets-offsets' cannot be found from the inputs of the task 'for-loop-1'."> ``` <details> <summary>compiled json</summary> ```json { "pipelineSpec": { "components": { "comp-for-loop-1": { "dag": { "tasks": { "print-op": { "componentRef": { "name": "comp-print-op" }, "inputs": { "parameters": { "message": { "componentInputParameter": "pipelineparam--get-parallel-offsets-offsets-loop-item", "parameterExpressionSelector": "parseJson(string_value)[\"upper_bounds\"]" } } }, "taskInfo": { "name": "print-op" } } } }, "inputDefinitions": { "artifacts": { "pipelineparam--get-parallel-offsets-offsets": { "artifactType": { "schemaTitle": "system.Artifact" } } }, "parameters": { "pipelineparam--get-parallel-offsets-offsets-loop-item": { "type": "STRING" } } } }, "comp-get-parallel-offsets": { "executorLabel": "exec-get-parallel-offsets", "inputDefinitions": { "parameters": { "parallel_num": { "type": "INT" }, "rows_count": { "type": "INT" } } }, "outputDefinitions": { "artifacts": { "offsets": { "artifactType": { "schemaTitle": "system.Artifact" } } } } }, "comp-print-op": { "executorLabel": "exec-print-op", "inputDefinitions": { "parameters": { "message": { "type": "STRING" } } } } }, "deploymentSpec": { "executors": { "exec-get-parallel-offsets": { "container": { "args": [ "--rows-count", "{{$.inputs.parameters['rows_count']}}", "--parallel-num", "{{$.inputs.parameters['parallel_num']}}", "----output-paths", "{{$.outputs.artifacts['offsets'].path}}" ], "command": [ "sh", "-ec", "program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n", "def get_parallel_offsets(\n rows_count,\n parallel_num,\n):\n \"\"\"\n Get the offset of each parallel (from the number of rows and the number of parallels in the table.)\n\n Parameters\n ----------\n rows_count: int\n number of bigquery table's rows\n parallel_num: int\n number of parallels\n \"\"\"\n from collections import namedtuple\n import math\n import json\n\n if rows_count % parallel_num == 0:\n offset_step = limit = int(rows_count / parallel_num)\n else:\n offset_step = limit = math.ceil(rows_count / parallel_num)\n\n # NOTE: When using `json.dump`, if a number with a large number of digits is included, the number will be converted to Scientific Notation format, so convert it to a string type once.\n offsets = [\n {\"index\": str(index), \"offset\": str(offset), \"upper_bounds\": str(offset+limit)}\n for index, offset in enumerate(range(0, rows_count, offset_step))\n ]\n output = namedtuple(\"Offsets\", [\"offsets\"])\n return output(json.dumps(offsets))\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Get parallel offsets', description='Get the offset of each parallel (from the number of rows and the number of parallels in the table.)')\n_parser.add_argument(\"--rows-count\", dest=\"rows_count\", type=int, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"--parallel-num\", dest=\"parallel_num\", type=int, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"----output-paths\", dest=\"_output_paths\", type=str, nargs=1)\n_parsed_args = vars(_parser.parse_args())\n_output_files = _parsed_args.pop(\"_output_paths\", [])\n\n_outputs = get_parallel_offsets(**_parsed_args)\n\n_output_serializers = [\n str,\n\n]\n\nimport os\nfor idx, output_file in enumerate(_output_files):\n try:\n os.makedirs(os.path.dirname(output_file))\n except OSError:\n pass\n with open(output_file, 'w') as f:\n f.write(_output_serializers[idx](_outputs[idx]))\n" ], "image": "python:alpine" } }, "exec-print-op": { "container": { "args": [ "--executor_input", "{{$}}", "--function_to_execute", "print_op", "--message-output-path", "{{$.inputs.parameters['message']}}" ], "command": [ "sh", "-ec", "program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n", "\nimport json\nimport inspect\nfrom typing import *\n\n# Copyright 2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Classes for input/output types in KFP SDK.\n\nThese are only compatible with v2 Pipelines.\n\"\"\"\n\nimport os\nfrom typing import Dict, Generic, List, Optional, Type, TypeVar, Union\n\n\n_GCS_LOCAL_MOUNT_PREFIX = '/gcs/'\n_MINIO_LOCAL_MOUNT_PREFIX = '/minio/'\n_S3_LOCAL_MOUNT_PREFIX = '/s3/'\n\n\nclass Artifact(object):\n \"\"\"Generic Artifact class.\n\n This class is meant to represent the metadata around an input or output\n machine-learning Artifact. Artifacts have URIs, which can either be a location\n on disk (or Cloud storage) or some other resource identifier such as\n an API resource name.\n\n Artifacts carry a `metadata` field, which is a dictionary for storing\n metadata related to this artifact.\n \"\"\"\n TYPE_NAME = 'system.Artifact'\n\n def __init__(self,\n name: Optional[str] = None,\n uri: Optional[str] = None,\n metadata: Optional[Dict] = None):\n \"\"\"Initializes the Artifact with the given name, URI and metadata.\"\"\"\n self.uri = uri or ''\n self.name = name or ''\n self.metadata = metadata or {}\n\n @property\n def path(self):\n return self._get_path()\n\n @path.setter\n def path(self, path):\n self._set_path(path)\n\n def _get_path(self) -> Optional[str]:\n if self.uri.startswith('gs://'):\n return _GCS_LOCAL_MOUNT_PREFIX + self.uri[len('gs://'):]\n elif self.uri.startswith('minio://'):\n return _MINIO_LOCAL_MOUNT_PREFIX + self.uri[len('minio://'):]\n elif self.uri.startswith('s3://'):\n return _S3_LOCAL_MOUNT_PREFIX + self.uri[len('s3://'):]\n return None\n\n def _set_path(self, path):\n if path.startswith(_GCS_LOCAL_MOUNT_PREFIX):\n path = 'gs://' + path[len(_GCS_LOCAL_MOUNT_PREFIX):]\n elif path.startswith(_MINIO_LOCAL_MOUNT_PREFIX):\n path = 'minio://' + path[len(_MINIO_LOCAL_MOUNT_PREFIX):]\n elif path.startswith(_S3_LOCAL_MOUNT_PREFIX):\n path = 's3://' + path[len(_S3_LOCAL_MOUNT_PREFIX):]\n self.uri = path\n\n\nclass Model(Artifact):\n \"\"\"An artifact representing an ML Model.\"\"\"\n TYPE_NAME = 'system.Model'\n\n def __init__(self,\n name: Optional[str] = None,\n uri: Optional[str] = None,\n metadata: Optional[Dict] = None):\n super().__init__(uri=uri, name=name, metadata=metadata)\n\n @property\n def framework(self) -> str:\n return self._get_framework()\n\n def _get_framework(self) -> str:\n return self.metadata.get('framework', '')\n\n @framework.setter\n def framework(self, framework: str):\n self._set_framework(framework)\n\n def _set_framework(self, framework: str):\n self.metadata['framework'] = framework\n\n\nclass Dataset(Artifact):\n \"\"\"An artifact representing an ML Dataset.\"\"\"\n TYPE_NAME = 'system.Dataset'\n\n def __init__(self,\n name: Optional[str] = None,\n uri: Optional[str] = None,\n metadata: Optional[Dict] = None):\n super().__init__(uri=uri, name=name, metadata=metadata)\n\n\nclass Metrics(Artifact):\n \"\"\"Represent a simple base Artifact type to store key-value scalar metrics.\"\"\"\n TYPE_NAME = 'system.Metrics'\n\n def __init__(self,\n name: Optional[str] = None,\n uri: Optional[str] = None,\n metadata: Optional[Dict] = None):\n super().__init__(uri=uri, name=name, metadata=metadata)\n\n def log_metric(self, metric: str, value: float):\n \"\"\"Sets a custom scalar metric.\n\n Args:\n metric: Metric key\n value: Value of the metric.\n \"\"\"\n self.metadata[metric] = value\n\n\nclass ClassificationMetrics(Artifact):\n \"\"\"Represents Artifact class to store Classification Metrics.\"\"\"\n TYPE_NAME = 'system.ClassificationMetrics'\n\n def __init__(self,\n name: Optional[str] = None,\n uri: Optional[str] = None,\n metadata: Optional[Dict] = None):\n super().__init__(uri=uri, name=name, metadata=metadata)\n\n def log_roc_data_point(self, fpr: float, tpr: float, threshold: float):\n \"\"\"Logs a single data point in the ROC Curve.\n\n Args:\n fpr: False positive rate value of the data point.\n tpr: True positive rate value of the data point.\n threshold: Threshold value for the data point.\n \"\"\"\n\n roc_reading = {\n 'confidenceThreshold': threshold,\n 'recall': tpr,\n 'falsePositiveRate': fpr\n }\n if 'confidenceMetrics' not in self.metadata.keys():\n self.metadata['confidenceMetrics'] = []\n\n self.metadata['confidenceMetrics'].append(roc_reading)\n\n def log_roc_curve(self, fpr: List[float], tpr: List[float],\n threshold: List[float]):\n \"\"\"Logs an ROC curve.\n\n The list length of fpr, tpr and threshold must be the same.\n\n Args:\n fpr: List of false positive rate values.\n tpr: List of true positive rate values.\n threshold: List of threshold values.\n \"\"\"\n if len(fpr) != len(tpr) or len(fpr) != len(threshold) or len(tpr) != len(\n threshold):\n raise ValueError('Length of fpr, tpr and threshold must be the same. '\n 'Got lengths {}, {} and {} respectively.'.format(\n len(fpr), len(tpr), len(threshold)))\n\n for i in range(len(fpr)):\n self.log_roc_data_point(fpr=fpr[i], tpr=tpr[i], threshold=threshold[i])\n\n def set_confusion_matrix_categories(self, categories: List[str]):\n \"\"\"Stores confusion matrix categories.\n\n Args:\n categories: List of strings specifying the categories.\n \"\"\"\n\n self._categories = []\n annotation_specs = []\n for category in categories:\n annotation_spec = {'displayName': category}\n self._categories.append(category)\n annotation_specs.append(annotation_spec)\n\n self._matrix = []\n for row in range(len(self._categories)):\n self._matrix.append({'row': [0] * len(self._categories)})\n\n self._confusion_matrix = {}\n self._confusion_matrix['annotationSpecs'] = annotation_specs\n self._confusion_matrix['rows'] = self._matrix\n self.metadata['confusionMatrix'] = self._confusion_matrix\n\n def log_confusion_matrix_row(self, row_category: str, row: List[float]):\n \"\"\"Logs a confusion matrix row.\n\n Args:\n row_category: Category to which the row belongs.\n row: List of integers specifying the values for the row.\n\n Raises:\n ValueError: If row_category is not in the list of categories\n set in set_categories call.\n \"\"\"\n if row_category not in self._categories:\n raise ValueError('Invalid category: {} passed. Expected one of: {}'.\\\n format(row_category, self._categories))\n\n if len(row) != len(self._categories):\n raise ValueError('Invalid row. Expected size: {} got: {}'.\\\n format(len(self._categories), len(row)))\n\n self._matrix[self._categories.index(row_category)] = {'row': row}\n self.metadata['confusionMatrix'] = self._confusion_matrix\n\n def log_confusion_matrix_cell(self, row_category: str, col_category: str,\n value: int):\n \"\"\"Logs a cell in the confusion matrix.\n\n Args:\n row_category: String representing the name of the row category.\n col_category: String representing the name of the column category.\n value: Int value of the cell.\n\n Raises:\n ValueError: If row_category or col_category is not in the list of\n categories set in set_categories.\n \"\"\"\n if row_category not in self._categories:\n raise ValueError('Invalid category: {} passed. Expected one of: {}'.\\\n format(row_category, self._categories))\n\n if col_category not in self._categories:\n raise ValueError('Invalid category: {} passed. Expected one of: {}'.\\\n format(row_category, self._categories))\n\n self._matrix[self._categories.index(row_category)]['row'][\n self._categories.index(col_category)] = value\n self.metadata['confusionMatrix'] = self._confusion_matrix\n\n def log_confusion_matrix(self, categories: List[str],\n matrix: List[List[int]]):\n \"\"\"Logs a confusion matrix.\n\n Args:\n categories: List of the category names.\n matrix: Complete confusion matrix.\n\n Raises:\n ValueError: Length of categories does not match number of rows or columns.\n \"\"\"\n self.set_confusion_matrix_categories(categories)\n\n if len(matrix) != len(categories):\n raise ValueError('Invalid matrix: {} passed for categories: {}'.\\\n format(matrix, categories))\n\n for index in range(len(categories)):\n if len(matrix[index]) != len(categories):\n raise ValueError('Invalid matrix: {} passed for categories: {}'.\\\n format(matrix, categories))\n\n self.log_confusion_matrix_row(categories[index], matrix[index])\n\n self.metadata['confusionMatrix'] = self._confusion_matrix\n\n\nclass SlicedClassificationMetrics(Artifact):\n \"\"\"Metrics class representing Sliced Classification Metrics.\n\n Similar to ClassificationMetrics clients using this class are expected to use\n log methods of the class to log metrics with the difference being each log\n method takes a slice to associate the ClassificationMetrics.\n\n \"\"\"\n\n TYPE_NAME = 'system.SlicedClassificationMetrics'\n\n def __init__(self,\n name: Optional[str] = None,\n uri: Optional[str] = None,\n metadata: Optional[Dict] = None):\n super().__init__(uri=uri, name=name, metadata=metadata)\n\n def _upsert_classification_metrics_for_slice(self, slice: str):\n \"\"\"Upserts the classification metrics instance for a slice.\"\"\"\n if slice not in self._sliced_metrics:\n self._sliced_metrics[slice] = ClassificationMetrics()\n\n def _update_metadata(self, slice: str):\n \"\"\"Updates metadata to adhere to the metrics schema.\"\"\"\n self.metadata = {}\n self.metadata['evaluationSlices'] = []\n for slice in self._sliced_metrics.keys():\n slice_metrics = {\n 'slice': slice,\n 'sliceClassificationMetrics': self._sliced_metrics[slice].metadata\n }\n self.metadata['evaluationSlices'].append(slice_metrics)\n\n def log_roc_reading(self, slice: str, threshold: float, tpr: float,\n fpr: float):\n \"\"\"Logs a single data point in the ROC Curve of a slice.\n\n Args:\n slice: String representing slice label.\n threshold: Thresold value for the data point.\n tpr: True positive rate value of the data point.\n fpr: False positive rate value of the data point.\n \"\"\"\n\n self._upsert_classification_metrics_for_slice(slice)\n self._sliced_metrics[slice].log_roc_reading(threshold, tpr, fpr)\n self._update_metadata(slice)\n\n def load_roc_readings(self, slice: str, readings: List[List[float]]):\n \"\"\"Supports bulk loading ROC Curve readings for a slice.\n\n Args:\n slice: String representing slice label.\n readings: A 2-D list providing ROC Curve data points.\n The expected order of the data points is: threshold,\n true_positive_rate, false_positive_rate.\n \"\"\"\n self._upsert_classification_metrics_for_slice(slice)\n self._sliced_metrics[slice].load_roc_readings(readings)\n self._update_metadata(slice)\n\n def set_confusion_matrix_categories(self, slice: str, categories: List[str]):\n \"\"\"Stores confusion matrix categories for a slice..\n\n Categories are stored in the internal metrics_utils.ConfusionMatrix\n instance of the slice.\n\n Args:\n slice: String representing slice label.\n categories: List of strings specifying the categories.\n \"\"\"\n self._upsert_classification_metrics_for_slice(slice)\n self._sliced_metrics[slice].set_confusion_matrix_categories(categories)\n self._update_metadata(slice)\n\n def log_confusion_matrix_row(self, slice: str, row_category: str,\n row: List[int]):\n \"\"\"Logs a confusion matrix row for a slice.\n\n Row is updated on the internal metrics_utils.ConfusionMatrix\n instance of the slice.\n\n Args:\n slice: String representing slice label.\n row_category: Category to which the row belongs.\n row: List of integers specifying the values for the row.\n \"\"\"\n self._upsert_classification_metrics_for_slice(slice)\n self._sliced_metrics[slice].log_confusion_matrix_row(row_category, row)\n self._update_metadata(slice)\n\n def log_confusion_matrix_cell(self, slice: str, row_category: str,\n col_category: str, value: int):\n \"\"\"Logs a confusion matrix cell for a slice..\n\n Cell is updated on the internal metrics_utils.ConfusionMatrix\n instance of the slice.\n\n Args:\n slice: String representing slice label.\n row_category: String representing the name of the row category.\n col_category: String representing the name of the column category.\n value: Int value of the cell.\n \"\"\"\n self._upsert_classification_metrics_for_slice(slice)\n self._sliced_metrics[slice].log_confusion_matrix_cell(\n row_category, col_category, value)\n self._update_metadata(slice)\n\n def load_confusion_matrix(self, slice: str, categories: List[str],\n matrix: List[List[int]]):\n \"\"\"Supports bulk loading the whole confusion matrix for a slice.\n\n Args:\n slice: String representing slice label.\n categories: List of the category names.\n matrix: Complete confusion matrix.\n \"\"\"\n self._upsert_classification_metrics_for_slice(slice)\n self._sliced_metrics[slice].log_confusion_matrix_cell(categories, matrix)\n self._update_metadata(slice)\n\n\nT = TypeVar('T')\n\n\nclass InputAnnotation():\n \"\"\"Marker type for input artifacts.\"\"\"\n pass\n\n\n\nclass OutputAnnotation():\n \"\"\"Marker type for output artifacts.\"\"\"\n pass\n\n\n# TODO: Use typing.Annotated instead of this hack.\n# With typing.Annotated (Python 3.9+ or typing_extensions package), the\n# following would look like:\n# Input = typing.Annotated[T, InputAnnotation]\n# Output = typing.Annotated[T, OutputAnnotation]\n\n\n# Input represents an Input artifact of type T.\nInput = Union[T, InputAnnotation]\n\n# Output represents an Output artifact of type T.\nOutput = Union[T, OutputAnnotation]\n\n\ndef is_artifact_annotation(typ) -> bool:\n if hasattr(typ, '_subs_tree'): # Python 3.6\n subs_tree = typ._subs_tree()\n return len(subs_tree) == 3 and subs_tree[0] == Union and subs_tree[2] in [InputAnnotation, OutputAnnotation]\n\n if not hasattr(typ, '__origin__'):\n return False\n\n\n if typ.__origin__ != Union and type(typ.__origin__) != type(Union):\n return False\n\n\n if not hasattr(typ, '__args__') or len(typ.__args__) != 2:\n return False\n\n if typ.__args__[1] not in [InputAnnotation, OutputAnnotation]:\n return False\n\n return True\n\ndef is_input_artifact(typ) -> bool:\n \"\"\"Returns True if typ is of type Input[T].\"\"\"\n if not is_artifact_annotation(typ):\n return False\n\n if hasattr(typ, '_subs_tree'): # Python 3.6\n subs_tree = typ._subs_tree()\n return len(subs_tree) == 3 and subs_tree[2] == InputAnnotation\n\n return typ.__args__[1] == InputAnnotation\n\ndef is_output_artifact(typ) -> bool:\n \"\"\"Returns True if typ is of type Output[T].\"\"\"\n if not is_artifact_annotation(typ):\n return False\n\n if hasattr(typ, '_subs_tree'): # Python 3.6\n subs_tree = typ._subs_tree()\n return len(subs_tree) == 3 and subs_tree[2] == OutputAnnotation\n\n return typ.__args__[1] == OutputAnnotation\n\ndef get_io_artifact_class(typ):\n if not is_artifact_annotation(typ):\n return None\n if typ == Input or typ == Output:\n return None\n\n if hasattr(typ, '_subs_tree'): # Python 3.6\n subs_tree = typ._subs_tree()\n if len(subs_tree) != 3:\n return None\n return subs_tree[1]\n\n return typ.__args__[0]\n\ndef get_io_artifact_annotation(typ):\n if not is_artifact_annotation(typ):\n return None\n\n if hasattr(typ, '_subs_tree'): # Python 3.6\n subs_tree = typ._subs_tree()\n if len(subs_tree) != 3:\n return None\n return subs_tree[2]\n\n return typ.__args__[1]\n\n\n\n_SCHEMA_TITLE_TO_TYPE: Dict[str, Artifact] = {\n x.TYPE_NAME: x\n for x in [Artifact, Model, Dataset, Metrics, ClassificationMetrics]\n}\n\n\ndef create_runtime_artifact(runtime_artifact: Dict) -> Artifact:\n \"\"\"Creates an Artifact instance from the specified RuntimeArtifact.\n\n Args:\n runtime_artifact: Dictionary representing JSON-encoded RuntimeArtifact.\n \"\"\"\n schema_title = runtime_artifact.get('type', {}).get('schemaTitle', '')\n\n artifact_type = _SCHEMA_TITLE_TO_TYPE.get(schema_title)\n if not artifact_type:\n artifact_type = Artifact\n return artifact_type(\n uri=runtime_artifact.get('uri', ''),\n name=runtime_artifact.get('name', ''),\n metadata=runtime_artifact.get('metadata', {}),\n )\n\nclass InputPath:\n '''When creating component from function, :class:`.InputPath` should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''\n def __init__(self, type=None):\n self.type = type\n\nclass OutputPath:\n '''When creating component from function, :class:`.OutputPath` should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''\n def __init__(self, type=None):\n self.type = type\n\nclass Executor():\n \"\"\"Executor executes v2-based Python function components.\"\"\"\n\n def __init__(self, executor_input: Dict, function_to_execute: Callable):\n self._func = function_to_execute\n self._input = executor_input\n self._input_artifacts: Dict[str, Artifact] = {}\n self._output_artifacts: Dict[str, Artifact] = {}\n\n for name, artifacts in self._input.get('inputs', {}).get('artifacts',\n {}).items():\n artifacts_list = artifacts.get('artifacts')\n if artifacts_list:\n self._input_artifacts[name] = self._make_input_artifact(\n artifacts_list[0])\n\n for name, artifacts in self._input.get('outputs', {}).get('artifacts',\n {}).items():\n artifacts_list = artifacts.get('artifacts')\n if artifacts_list:\n self._output_artifacts[name] = self._make_output_artifact(\n artifacts_list[0])\n\n self._return_annotation = inspect.signature(self._func).return_annotation\n self._executor_output = {}\n\n @classmethod\n def _make_input_artifact(cls, runtime_artifact: Dict):\n return create_runtime_artifact(runtime_artifact)\n\n @classmethod\n def _make_output_artifact(cls, runtime_artifact: Dict):\n import os\n artifact = create_runtime_artifact(runtime_artifact)\n os.makedirs(os.path.dirname(artifact.path), exist_ok=True)\n return artifact\n\n def _get_input_artifact(self, name: str):\n return self._input_artifacts.get(name)\n\n def _get_output_artifact(self, name: str):\n return self._output_artifacts.get(name)\n\n def _get_input_parameter_value(self, parameter_name: str, parameter_type: Any):\n parameter = self._input.get('inputs', {}).get('parameters',\n {}).get(parameter_name, None)\n if parameter is None:\n return None\n\n if parameter.get('stringValue'):\n if parameter_type == str:\n return parameter['stringValue']\n elif parameter_type == bool:\n # Use `.lower()` so it can also handle 'True' and 'False' (resulted from\n # `str(True)` and `str(False)`, respectively.\n return json.loads(parameter['stringValue'].lower())\n else:\n return json.loads(parameter['stringValue'])\n elif parameter.get('intValue'):\n return int(parameter['intValue'])\n elif parameter.get('doubleValue'):\n return float(parameter['doubleValue'])\n\n def _get_output_parameter_path(self, parameter_name: str):\n parameter_name = self._maybe_strip_path_suffix(parameter_name)\n parameter = self._input.get('outputs',\n {}).get('parameters',\n {}).get(parameter_name, None)\n if parameter is None:\n return None\n\n import os\n path = parameter.get('outputFile', None)\n if path:\n os.makedirs(os.path.dirname(path), exist_ok=True)\n return path\n\n def _get_output_artifact_path(self, artifact_name: str):\n artifact_name = self._maybe_strip_path_suffix(artifact_name)\n output_artifact = self._output_artifacts.get(artifact_name)\n if not output_artifact:\n raise ValueError(\n 'Failed to get output artifact path for artifact name {}'.format(\n artifact_name))\n return output_artifact.path\n\n def _get_input_artifact_path(self, artifact_name: str):\n artifact_name = self._maybe_strip_path_suffix(artifact_name)\n input_artifact = self._input_artifacts.get(artifact_name)\n if not input_artifact:\n raise ValueError(\n 'Failed to get input artifact path for artifact name {}'.format(\n artifact_name))\n return input_artifact.path\n\n def _write_output_parameter_value(self, name: str,\n value: Union[str, int, float, bool, dict,\n list, Dict, List]):\n if type(value) == str:\n output = {'stringValue': value}\n elif type(value) == int:\n output = {'intValue': value}\n elif type(value) == float:\n output = {'doubleValue': value}\n else:\n # For bool, list, dict, List, Dict, json serialize the value.\n output = {'stringValue': json.dumps(value)}\n\n if not self._executor_output.get('parameters'):\n self._executor_output['parameters'] = {}\n\n self._executor_output['parameters'][name] = output\n\n def _write_output_artifact_payload(self, name: str, value: Any):\n path = self._get_output_artifact_path(name)\n with open(path, 'w') as f:\n f.write(str(value))\n\n # TODO: extract to a util\n @classmethod\n def _get_short_type_name(cls, type_name: str) -> str:\n \"\"\"Extracts the short form type name.\n\n This method is used for looking up serializer for a given type.\n\n For example:\n typing.List -> List\n typing.List[int] -> List\n typing.Dict[str, str] -> Dict\n List -> List\n str -> str\n\n Args:\n type_name: The original type name.\n\n Returns:\n The short form type name or the original name if pattern doesn't match.\n \"\"\"\n import re\n match = re.match('(typing\\.)?(?P<type>\\w+)(?:\\[.+\\])?', type_name)\n if match:\n return match.group('type')\n else:\n return type_name\n\n # TODO: merge with type_utils.is_parameter_type\n @classmethod\n def _is_parameter(cls, annotation: Any) -> bool:\n if type(annotation) == type:\n return annotation in [str, int, float, bool, dict, list]\n\n # Annotation could be, for instance `typing.Dict[str, str]`, etc.\n return cls._get_short_type_name(str(annotation)) in ['Dict', 'List']\n\n @classmethod\n def _is_artifact(cls, annotation: Any) -> bool:\n if type(annotation) == type:\n return issubclass(annotation, Artifact)\n return False\n\n @classmethod\n def _is_named_tuple(cls, annotation: Any) -> bool:\n if type(annotation) == type:\n return issubclass(annotation, tuple) and hasattr(\n annotation, '_fields') and hasattr(annotation, '__annotations__')\n return False\n\n def _handle_single_return_value(self, output_name: str, annotation_type: Any,\n return_value: Any):\n if self._is_parameter(annotation_type):\n if type(return_value) != annotation_type:\n raise ValueError(\n 'Function `{}` returned value of type {}; want type {}'.format(\n self._func.__name__, type(return_value), annotation_type))\n self._write_output_parameter_value(output_name, return_value)\n elif self._is_artifact(annotation_type):\n self._write_output_artifact_payload(output_name, return_value)\n else:\n raise RuntimeError(\n 'Unknown return type: {}. Must be one of `str`, `int`, `float`, or a'\n ' subclass of `Artifact`'.format(annotation_type))\n\n def _write_executor_output(self, func_output: Optional[Any] = None):\n if self._output_artifacts:\n self._executor_output['artifacts'] = {}\n\n for name, artifact in self._output_artifacts.items():\n runtime_artifact = {\n 'name': artifact.name,\n 'uri': artifact.uri,\n 'metadata': artifact.metadata,\n }\n artifacts_list = {'artifacts': [runtime_artifact]}\n\n self._executor_output['artifacts'][name] = artifacts_list\n\n if func_output is not None:\n if self._is_parameter(self._return_annotation) or self._is_artifact(\n self._return_annotation):\n # Note: single output is named `Output` in component.yaml.\n self._handle_single_return_value('Output', self._return_annotation,\n func_output)\n elif self._is_named_tuple(self._return_annotation):\n if len(self._return_annotation._fields) != len(func_output):\n raise RuntimeError(\n 'Expected {} return values from function `{}`, got {}'.format(\n len(self._return_annotation._fields), self._func.__name__,\n len(func_output)))\n for i in range(len(self._return_annotation._fields)):\n field = self._return_annotation._fields[i]\n field_type = self._return_annotation.__annotations__[field]\n if type(func_output) == tuple:\n field_value = func_output[i]\n else:\n field_value = getattr(func_output, field)\n self._handle_single_return_value(field, field_type, field_value)\n else:\n raise RuntimeError(\n 'Unknown return type: {}. Must be one of `str`, `int`, `float`, a'\n ' subclass of `Artifact`, or a NamedTuple collection of these types.'\n .format(self._return_annotation))\n\n import os\n os.makedirs(\n os.path.dirname(self._input['outputs']['outputFile']), exist_ok=True)\n with open(self._input['outputs']['outputFile'], 'w') as f:\n f.write(json.dumps(self._executor_output))\n\n def _maybe_strip_path_suffix(self, name) -> str:\n if name.endswith('_path'):\n name = name[0:-len('_path')]\n if name.endswith('_file'):\n name = name[0:-len('_file')]\n return name\n\n def execute(self):\n annotations = inspect.getfullargspec(self._func).annotations\n\n # Function arguments.\n func_kwargs = {}\n\n for k, v in annotations.items():\n if k == 'return':\n continue\n\n if self._is_parameter(v):\n func_kwargs[k] = self._get_input_parameter_value(k, v)\n\n if is_artifact_annotation(v):\n if is_input_artifact(v):\n func_kwargs[k] = self._get_input_artifact(k)\n if is_output_artifact(v):\n func_kwargs[k] = self._get_output_artifact(k)\n\n elif isinstance(v, OutputPath):\n if self._is_parameter(v.type):\n func_kwargs[k] = self._get_output_parameter_path(k)\n else:\n func_kwargs[k] = self._get_output_artifact_path(k)\n elif isinstance(v, InputPath):\n func_kwargs[k] = self._get_input_artifact_path(k)\n\n result = self._func(**func_kwargs)\n self._write_executor_output(result)\n\n\ndef print_op(message: str):\n \"\"\"Prints a message.\"\"\"\n print(message)\n\n\ndef executor_main():\n import argparse\n import json\n\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--executor_input', type=str)\n parser.add_argument('--function_to_execute', type=str)\n\n args, _ = parser.parse_known_args()\n executor_input = json.loads(args.executor_input)\n function_to_execute = globals()[args.function_to_execute]\n\n executor = Executor(executor_input=executor_input,\n function_to_execute=function_to_execute)\n\n executor.execute()\n\n\nif __name__ == '__main__':\n executor_main()\n" ], "image": "python:3.7" } } } }, "pipelineInfo": { "name": "test" }, "root": { "dag": { "tasks": { "for-loop-1": { "componentRef": { "name": "comp-for-loop-1" }, "dependentTasks": [ "get-parallel-offsets" ], "inputs": { "artifacts": { "pipelineparam--get-parallel-offsets-offsets": { "taskOutputArtifact": { "outputArtifactKey": "offsets", "producerTask": "get-parallel-offsets" } } } }, "parameterIterator": { "itemInput": "pipelineparam--get-parallel-offsets-offsets-loop-item", "items": { "inputParameter": "pipelineparam--get-parallel-offsets-offsets" } }, "taskInfo": { "name": "for-loop-1" } }, "get-parallel-offsets": { "componentRef": { "name": "comp-get-parallel-offsets" }, "inputs": { "parameters": { "parallel_num": { "componentInputParameter": "parallel_num" }, "rows_count": { "runtimeValue": { "constantValue": { "intValue": "3000" } } } } }, "taskInfo": { "name": "get-parallel-offsets" } } } }, "inputDefinitions": { "parameters": { "parallel_num": { "type": "INT" } } } }, "schemaVersion": "2.0.0", "sdkVersion": "kfp-1.6.4" }, "runtimeConfig": { "parameters": { "parallel_num": { "intValue": "10" } } } } ``` </details> ### Expected result Successful validation of vertex pipelines. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5912/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5912/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5910
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5910/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5910/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5910/events
https://github.com/kubeflow/pipelines/issues/5910
928,247,917
MDU6SXNzdWU5MjgyNDc5MTc=
5,910
add if __name__ == "__main__" guard for sdk to support multi-processing in components
{ "login": "steveplazafb", "id": 81383317, "node_id": "MDQ6VXNlcjgxMzgzMzE3", "avatar_url": "https://avatars.githubusercontent.com/u/81383317?v=4", "gravatar_id": "", "url": "https://api.github.com/users/steveplazafb", "html_url": "https://github.com/steveplazafb", "followers_url": "https://api.github.com/users/steveplazafb/followers", "following_url": "https://api.github.com/users/steveplazafb/following{/other_user}", "gists_url": "https://api.github.com/users/steveplazafb/gists{/gist_id}", "starred_url": "https://api.github.com/users/steveplazafb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/steveplazafb/subscriptions", "organizations_url": "https://api.github.com/users/steveplazafb/orgs", "repos_url": "https://api.github.com/users/steveplazafb/repos", "events_url": "https://api.github.com/users/steveplazafb/events{/privacy}", "received_events_url": "https://api.github.com/users/steveplazafb/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
{ "login": "Ark-kun", "id": 1829149, "node_id": "MDQ6VXNlcjE4MjkxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ark-kun", "html_url": "https://github.com/Ark-kun", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "repos_url": "https://api.github.com/users/Ark-kun/repos", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "type": "User", "site_admin": false }
[ { "login": "Ark-kun", "id": 1829149, "node_id": "MDQ6VXNlcjE4MjkxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ark-kun", "html_url": "https://github.com/Ark-kun", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "repos_url": "https://api.github.com/users/Ark-kun/repos", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "type": "User", "site_admin": false }, { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @chensun ", "Can you please describe your scenario?\r\nThe command-line code generated by `create_component_from_func` is only intended to be run as a program.\r\n\r\nAm I understanding you correctly - multiprocessing cannot be used inside python lightweight component since it needs to be able to import the current file as a module?", "Correct. At least as I understand it, if you \"fork\" (in a non-linux environment) or \"spawn\" a process during multi-processing, it will re-import the main module which will cause code to re-execute if it is not if guarded.\r\n\r\nLet's say I create a component foo:\r\n\r\n```\r\ndef foo():\r\n print(\"inside\")\r\n # code that starts multiprocessing\r\n\r\n```\r\nWhen KFP serializes this function to yaml, the script creates a temporary python file, which invokes foo like this:\r\n\r\n```\r\n# some KFP initialization and definition of \"foo\"\r\noutputs = foo(**_parsed_args)\r\n\r\n```\r\nIf this code is run, \"inside\" is printed multiple times for each forked process. To avoid this behavior, I probably need to do this:\r\n\r\n```\r\nif __name__ == \"__main__\":\r\n outputs = foo(**_parsed_args)\r\n\r\n```\r\nThis should cause \"inside\" to only be called once as desired.\r\n\r\nThere are probably other ways to identify whether something is a child process (maybe tracking the PIDs for instance) within the component, but it seems like adding a __name__ guard would be cleanest. I could be missing something though.\r\n\r\n\r\n\r\n\r\n", "Thank you for the explanation.\r\n\r\nI think this is a desirable feature request.\r\n\r\nP.S. Does anything break when the name of the function changes? We're not guaranteeing that the function name will be the same (or that the function itself would exist at all...).", "I am not sure that I completely understand your question. Are you saying that in my example above, the function name \"foo\" might not be preserved in future versions of kfp? I don't think it would cause any big problems but I do use inspect to automatically create log messages with the component name in the message. \r\n\r\nThank you for looking into this issue!", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-06-23T13:16:26
2022-03-03T02:05:24
null
NONE
null
### Feature Area /area sdk ### What feature would you like to see? Add an `if __name__ == "__main__"` guard before calling function wrapped by "create_component_from_func" ### What is the use case or pain point? An if __main__ guard is needed for python multiprocessing code to run properly. ### Is there a workaround currently? I have not found a good workaround. It might be possible to determine whether I am in a forked call based on the depth of the call queue in some cases. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5910/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5910/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5907
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5907/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5907/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5907/events
https://github.com/kubeflow/pipelines/issues/5907
927,856,678
MDU6SXNzdWU5Mjc4NTY2Nzg=
5,907
[sdk] Error from kfp Client
{ "login": "shalankig", "id": 46866033, "node_id": "MDQ6VXNlcjQ2ODY2MDMz", "avatar_url": "https://avatars.githubusercontent.com/u/46866033?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shalankig", "html_url": "https://github.com/shalankig", "followers_url": "https://api.github.com/users/shalankig/followers", "following_url": "https://api.github.com/users/shalankig/following{/other_user}", "gists_url": "https://api.github.com/users/shalankig/gists{/gist_id}", "starred_url": "https://api.github.com/users/shalankig/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shalankig/subscriptions", "organizations_url": "https://api.github.com/users/shalankig/orgs", "repos_url": "https://api.github.com/users/shalankig/repos", "events_url": "https://api.github.com/users/shalankig/events{/privacy}", "received_events_url": "https://api.github.com/users/shalankig/received_events", "type": "User", "site_admin": false }
[ { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
closed
false
null
[]
null
[ "Hi @shalankig . Could you share your code to help us debug even further? From the above log, I think you are referring to a kfp experiment that doesn't exist or you might have missed to pass the namespace.", "Hi @shalankig, did you specify namespace for your call? In KFP multi-user mode, namespace is required for each pipeline run (and experiment creation).\r\n\r\nI guess the error message needs some improvement.", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n", "This issue has been automatically closed because it has not had recent activity. Please comment \"/reopen\" to reopen it.\n" ]
2021-06-23T04:53:27
2022-03-03T03:05:33
2022-03-03T03:05:33
NONE
null
I have successfully deployed Kubeflow on a private GKE Cluster using https://github.com/kubeflow/manifests#install-with-a-single-command GKE Version: 1.17.17-gke.9100 Kubeflow Version: 1.3 Workload Identity Enabled: Yes Kfp Version: 1.5.0 Error while creating a kubeflow pipeline: `Traceback (most recent call last): File "/path/to/pipeline.py" line 1073, in run_pipeline experiment_id = client.get_experiment(experiment_name=experiment_name).id File "/usr/local /lib/python3.7/dist-packages/kfp/client.py", line 359 in get_experiment list_experiments_response = self.list_experiments(page_size=100, page_token=next_page_token, namespace=namespace).............ApiException(http_resp=r) kfp_server_spi.exceptions.ApiException: (400) ` which further leads to the below error logs: `HTTP: response body: {"error":"Validate experiment failed: Invalid input error: Invalid resource references for experiment. Expect one namespace type with owner relationship. Got []","code":3, "message":"Validate experiment failed: Invalid input error: Invalid resource references for experiment. Expect one namespace type with owner relationship. Got []", "details":[{"@type":"type.googleapis.com/api.Error","error_message":"Invalid resource references for experiment. Expect one namespace type with owner relationship. Got []","error_details":"Validate experiment failed: Invalid input error: Invalid resource references for experiment. Expect one namespace type with owner relationship. Got []"`
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5907/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5907/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5906
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5906/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5906/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5906/events
https://github.com/kubeflow/pipelines/issues/5906
927,849,409
MDU6SXNzdWU5Mjc4NDk0MDk=
5,906
[bug] Deployed Kubeflow with OIDC Authentication but pipeline runs as single-mode user
{ "login": "lauradang", "id": 25867870, "node_id": "MDQ6VXNlcjI1ODY3ODcw", "avatar_url": "https://avatars.githubusercontent.com/u/25867870?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lauradang", "html_url": "https://github.com/lauradang", "followers_url": "https://api.github.com/users/lauradang/followers", "following_url": "https://api.github.com/users/lauradang/following{/other_user}", "gists_url": "https://api.github.com/users/lauradang/gists{/gist_id}", "starred_url": "https://api.github.com/users/lauradang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lauradang/subscriptions", "organizations_url": "https://api.github.com/users/lauradang/orgs", "repos_url": "https://api.github.com/users/lauradang/repos", "events_url": "https://api.github.com/users/lauradang/events{/privacy}", "received_events_url": "https://api.github.com/users/lauradang/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 2157634204, "node_id": "MDU6TGFiZWwyMTU3NjM0MjA0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/lifecycle/stale", "name": "lifecycle/stale", "color": "bbbbbb", "default": false, "description": "The issue / pull request is stale, any activities remove this label." } ]
open
false
null
[]
null
[ "/cc @aronchick \r\nwho should be the best contact for Azure distribution?", "I am also interested in this issue as I am also in the same boat, but installed on AWS. I don't think this issue has anything to do with Azure specifically. The best that I can find from the documentation about this \"single-user mode\" points me towards enabling multi-user support here: https://www.kubeflow.org/docs/components/pipelines/installation/overview/#full-kubeflow-deployment\r\n```\r\nDo you want to use Kubeflow Pipelines with multi-user support? \r\nIf yes, choose the full Kubeflow deployment with version >= v1.1.\r\n```\r\nWhich is quite unhelpful in this regard, how do we enable this multi-user mode? We are using kubeflow 1.2", "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.\n" ]
2021-06-23T04:35:00
2022-03-03T00:05:10
null
NONE
null
### What steps did you take I deployed Kubeflow to Azure Kubernetes Service (AKS) with OIDC Authentication as described in the [documentation](https://www.kubeflow.org/docs/distributions/azure/authentication-oidc). I instantiated a `kfp.Client()` like so: ```python kubeflow_client = TektonClient( host=f'{KUBEFLOW_PUBLIC_ENDPOINT_URL}/pipeline', cookies=f'authservice_session={SESSION_COOKIE}', ssl_ca_cert="cacert.pem", namespace="laura-dang" ) ``` Then I called this function next: ```python kubeflow_client.create_run_from_pipeline_func( pipeline_function, experiment_name=experiment_name, arguments={'bucket_name': bucket_name, 'experiment_id': experiment_id, 'python_version': python_version, 'model_info': model_info}, namespace="laura-dang" ) ``` ### What happened: This error message outputted after running `kubeflow_client.create_run...` as described above: ``` --------------------------------------------------------------------------- ApiException Traceback (most recent call last) <ipython-input-13-11f63c93dd6d> in <module> 12 python_version=notebook_config.python_version, 13 model_info=json.dumps(model_info), ---> 14 namespace=notebook_config.namespace) <ipython-input-12-98a442d3e373> in run_pipeline(input_train_path, input_test_path, pipeline, experiment_name, python_version, model_info, namespace) 53 'python_version': python_version, 54 'model_info': model_info}, ---> 55 namespace=namespace) 56 print("done") ~/.local/lib/python3.6/site-packages/kfp_tekton/_client.py in create_run_from_pipeline_func(self, pipeline_func, arguments, run_name, experiment_name, pipeline_conf, namespace) 59 TektonCompiler().compile(pipeline_func, pipeline_package_path, pipeline_conf=pipeline_conf) 60 return self.create_run_from_pipeline_package(pipeline_package_path, arguments, ---> 61 run_name, experiment_name, namespace) 62 finally: 63 os.remove(pipeline_package_path) ~/.local/lib/python3.6/site-packages/kfp/_client.py in create_run_from_pipeline_package(self, pipeline_file, arguments, run_name, experiment_name, namespace) 738 datetime.datetime.now().strftime( 739 '%Y-%m-%d %H-%M-%S')) --> 740 experiment = self.create_experiment(name=experiment_name, namespace=namespace) 741 run_info = self.run_pipeline(experiment.id, run_name, pipeline_file, arguments) 742 return RunPipelineResult(self, run_info) ~/.local/lib/python3.6/site-packages/kfp/_client.py in create_experiment(self, name, description, namespace) 343 experiment = None 344 try: --> 345 experiment = self.get_experiment(experiment_name=name, namespace=namespace) 346 except ValueError as error: 347 # Ignore error if the experiment does not exist. ~/.local/lib/python3.6/site-packages/kfp/_client.py in get_experiment(self, experiment_id, experiment_name, namespace) 447 next_page_token = '' 448 while next_page_token is not None: --> 449 list_experiments_response = self.list_experiments(page_size=100, page_token=next_page_token, namespace=namespace) 450 next_page_token = list_experiments_response.next_page_token 451 for experiment in list_experiments_response.experiments or []: ~/.local/lib/python3.6/site-packages/kfp/_client.py in list_experiments(self, page_token, page_size, sort_by, namespace) 419 sort_by=sort_by, 420 resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE, --> 421 resource_reference_key_id=namespace) 422 return response 423 ~/.local/lib/python3.6/site-packages/kfp_server_api/api/experiment_service_api.py in list_experiment(self, **kwargs) 565 """ 566 kwargs['_return_http_data_only'] = True --> 567 return self.list_experiment_with_http_info(**kwargs) # noqa: E501 568 569 def list_experiment_with_http_info(self, **kwargs): # noqa: E501 ~/.local/lib/python3.6/site-packages/kfp_server_api/api/experiment_service_api.py in list_experiment_with_http_info(self, **kwargs) 680 _preload_content=local_var_params.get('_preload_content', True), 681 _request_timeout=local_var_params.get('_request_timeout'), --> 682 collection_formats=collection_formats) 683 684 def unarchive_experiment(self, id, **kwargs): # noqa: E501 ~/.local/lib/python3.6/site-packages/kfp_server_api/api_client.py in call_api(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, async_req, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host) 367 response_type, auth_settings, 368 _return_http_data_only, collection_formats, --> 369 _preload_content, _request_timeout, _host) 370 371 return self.pool.apply_async(self.__call_api, (resource_path, ~/.local/lib/python3.6/site-packages/kfp_server_api/api_client.py in __call_api(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host) 186 except ApiException as e: 187 e.body = e.body.decode('utf-8') if six.PY3 else e.body --> 188 raise e 189 190 content_type = response_data.getheader('content-type') ~/.local/lib/python3.6/site-packages/kfp_server_api/api_client.py in __call_api(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host) 183 post_params=post_params, body=body, 184 _preload_content=_preload_content, --> 185 _request_timeout=_request_timeout) 186 except ApiException as e: 187 e.body = e.body.decode('utf-8') if six.PY3 else e.body ~/.local/lib/python3.6/site-packages/kfp_server_api/api_client.py in request(self, method, url, query_params, headers, post_params, body, _preload_content, _request_timeout) 391 _preload_content=_preload_content, 392 _request_timeout=_request_timeout, --> 393 headers=headers) 394 elif method == "HEAD": 395 return self.rest_client.HEAD(url, ~/.local/lib/python3.6/site-packages/kfp_server_api/rest.py in GET(self, url, headers, query_params, _preload_content, _request_timeout) 232 _preload_content=_preload_content, 233 _request_timeout=_request_timeout, --> 234 query_params=query_params) 235 236 def HEAD(self, url, headers=None, query_params=None, _preload_content=True, ~/.local/lib/python3.6/site-packages/kfp_server_api/rest.py in request(self, method, url, query_params, headers, body, post_params, _preload_content, _request_timeout) 222 223 if not 200 <= r.status <= 299: --> 224 raise ApiException(http_resp=r) 225 226 return r ApiException: (400) Reason: Bad Request HTTP response headers: HTTPHeaderDict({'x-powered-by': 'Express', 'content-type': 'application/json', 'trailer': 'Grpc-Trailer-Content-Type', 'date': 'Wed, 23 Jun 2021 04:13:24 GMT', 'x-envoy-upstream-service-time': '9', 'server': 'istio-envoy', 'transfer-encoding': 'chunked'}) HTTP response body: {"error":"Invalid input error: In single-user mode, ListExperiment cannot filter by namespace.","message":"Invalid input error: In single-user mode, ListExperiment cannot filter by namespace.","code":3,"details":[{"@type":"type.googleapis.com/api.Error","error_message":"In single-user mode, ListExperiment cannot filter by namespace.","error_details":"Invalid input error: In single-user mode, ListExperiment cannot filter by namespace."}]} ``` ### What did you expect to happen: I expected the experiment to run as normal in **multi-user mode**, not single-user mode since I deployed with OIDC Authentication and I was able to login and make a notebook server in my own namespace (`laura-dang`). ### Environment: * How do you deploy Kubeflow Pipelines (KFP)? On Azure with OIDC Authentication as mentioned above * KFP version: 1.04 * KFP SDK version: ``` kfp 1.4.0 kfp-pipeline-spec 0.1.6 kfp-server-api 1.4.1 kfp-tekton 0.7.0 ``` Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5906/reactions", "total_count": 7, "+1": 7, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5906/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/5904
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/5904/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/5904/comments
https://api.github.com/repos/kubeflow/pipelines/issues/5904/events
https://github.com/kubeflow/pipelines/issues/5904
927,167,937
MDU6SXNzdWU5MjcxNjc5Mzc=
5,904
[feature] Dataflow flex template component
{ "login": "DennisVis", "id": 1624008, "node_id": "MDQ6VXNlcjE2MjQwMDg=", "avatar_url": "https://avatars.githubusercontent.com/u/1624008?v=4", "gravatar_id": "", "url": "https://api.github.com/users/DennisVis", "html_url": "https://github.com/DennisVis", "followers_url": "https://api.github.com/users/DennisVis/followers", "following_url": "https://api.github.com/users/DennisVis/following{/other_user}", "gists_url": "https://api.github.com/users/DennisVis/gists{/gist_id}", "starred_url": "https://api.github.com/users/DennisVis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DennisVis/subscriptions", "organizations_url": "https://api.github.com/users/DennisVis/orgs", "repos_url": "https://api.github.com/users/DennisVis/repos", "events_url": "https://api.github.com/users/DennisVis/events{/privacy}", "received_events_url": "https://api.github.com/users/DennisVis/received_events", "type": "User", "site_admin": false }
[ { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "After creating the issue and attempting to open a PR to fix it I saw someone else already added this feature." ]
2021-06-22T12:14:37
2021-06-22T12:22:23
2021-06-22T12:22:23
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area samples --> /area components ### What feature would you like to see? In addition to launching Dataflow jobs from Python and classic templates it would be useful to add an additional component for launching Dataflow flex templates. ### What is the use case or pain point? Dataflow flex templates have some advantages over classic templates. Most importantly they allow for dynamically changing the pipeline execution graph at runtime by mitigating the need for ValueProvider arguments. ### Is there a workaround currently? There is currently no support for Dataflow flex templates within Kubeflow Pipelines. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a 👍. We prioritize fulfilling features with the most 👍.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/5904/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/5904/timeline
null
completed
null
null
false