url
stringlengths
59
59
repository_url
stringclasses
1 value
labels_url
stringlengths
73
73
comments_url
stringlengths
68
68
events_url
stringlengths
66
66
html_url
stringlengths
49
49
id
int64
782M
1.89B
node_id
stringlengths
18
24
number
int64
4.97k
9.98k
title
stringlengths
2
306
user
dict
labels
list
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
unknown
updated_at
unknown
closed_at
unknown
author_association
stringclasses
4 values
active_lock_reason
null
body
stringlengths
0
63.6k
βŒ€
reactions
dict
timeline_url
stringlengths
68
68
performed_via_github_app
null
state_reason
stringclasses
3 values
draft
bool
0 classes
pull_request
dict
is_pull_request
bool
1 class
https://api.github.com/repos/kubeflow/pipelines/issues/8313
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8313/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8313/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8313/events
https://github.com/kubeflow/pipelines/issues/8313
1,387,958,970
I_kwDOB-71UM5SupK6
8,313
[sdk] 403 error when API key is auto-refreshed with Out of Cluster kube-context
{ "login": "alexlatchford", "id": 628146, "node_id": "MDQ6VXNlcjYyODE0Ng==", "avatar_url": "https://avatars.githubusercontent.com/u/628146?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alexlatchford", "html_url": "https://github.com/alexlatchford", "followers_url": "https://api.github.com/users/alexlatchford/followers", "following_url": "https://api.github.com/users/alexlatchford/following{/other_user}", "gists_url": "https://api.github.com/users/alexlatchford/gists{/gist_id}", "starred_url": "https://api.github.com/users/alexlatchford/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alexlatchford/subscriptions", "organizations_url": "https://api.github.com/users/alexlatchford/orgs", "repos_url": "https://api.github.com/users/alexlatchford/repos", "events_url": "https://api.github.com/users/alexlatchford/events{/privacy}", "received_events_url": "https://api.github.com/users/alexlatchford/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false }
[ { "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false }, { "login": "gkcalat", "id": 35157096, "node_id": "MDQ6VXNlcjM1MTU3MDk2", "avatar_url": "https://avatars.githubusercontent.com/u/35157096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gkcalat", "html_url": "https://github.com/gkcalat", "followers_url": "https://api.github.com/users/gkcalat/followers", "following_url": "https://api.github.com/users/gkcalat/following{/other_user}", "gists_url": "https://api.github.com/users/gkcalat/gists{/gist_id}", "starred_url": "https://api.github.com/users/gkcalat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gkcalat/subscriptions", "organizations_url": "https://api.github.com/users/gkcalat/orgs", "repos_url": "https://api.github.com/users/gkcalat/repos", "events_url": "https://api.github.com/users/gkcalat/events{/privacy}", "received_events_url": "https://api.github.com/users/gkcalat/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @alexlatchford!\r\n\r\nIt seems that the `refresh_token` got expired or removed. Could you logout from your Google account and try repeating the steps you outlined?", "Hey @gkcalat we're on EKS πŸ˜…", "@alexlatchford, I am not an AWS expert, but it might have an option to set the expiration of the tokens. @surajkota may be able to help here.\r\n\r\nGenerally, the error is by design. Feel free to propose your solution here. If there will be a change in the interface, you will need to write a short design doc and present it on a KFP Community Meeting.", "> @alexlatchford, I am not an AWS expert, but it might have an option to set the expiration of the tokens. @surajkota may be able to help here.\r\n> \r\n> Generally, the error is by design. Feel free to propose your solution here. If there will be a change in the interface, you will need to write a short design doc and present it on a KFP Community Meeting.\r\n\r\nYep in general we can avoid but not having longer running workflows or but upping the time but as your mention it's an inherent flaw with the current design at the moment 😞 \r\n\r\nI'm not sure what the best solution here is given it seems like a thorny design caused by KFP not exposing a `VirtualService` by default and using `kube-proxy`, then it needing to interface with `kfp-server-api` which I believe is auto-generated so likely not super flexible either πŸ˜… I'll have a think though.", "Afaik, this is a known issue, we have encountered this before. This needs more investigation, I dont have a solution a this time" ]
"2022-09-27T15:29:08"
"2022-10-04T21:33:33"
null
CONTRIBUTOR
null
### Environment * KFP version: <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> We run KFP v1.8.1 via KF v1.5.0. * KFP SDK version: <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> We run a forked version of KFP SDK which is at ~v1.9.0, mostly has a couple of patches for problems we've contributed back but haven't fully reincorporated. * All dependencies version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> From my investigation of the current code on `master` in the open source repo, this bug still exists and should be pretty easy to reproduce with a sufficiently long KFP run such that the API needs to be refreshed. ### Steps to reproduce 1. Instantiate a `Client` using a local Kube config, outside of a Kubernetes cluster. 2. Use a long running method like `wait_for_run_completion` (code [here](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/client/client.py#L1302)). 3. Wait for long enough for the API key to expire. 4. You get a `403` error 😒 It gets a 403 because the URL it is hitting on the Kubernetes API service doesn't exist as we lose the "proxy path" that gets injected, more details below. **PS. This has the potential to impact ANY KFP SDK call to the KFP API service from what I can tell when operating outside of a cluster.** ### Expected result <!-- What should the correct behavior be? --> It simply shouldn't error, it should refresh the `api_key` without needing to reload the `host`. ### Materials and Reference The root cause of the 403 is that in these scenarios instead of making calls to: `<Kube API Server Host>/api/v1/namespaces/<namespace>/services/ml-pipeline:http/proxy//apis/v1beta1/runs/<run id>` Instead it calls (which doesn't exist, thus the 403): `<Kube API Server Host>/apis/v1beta1/runs/<run id>` Effectively it loses the `Client._KUBE_PROXY_PATH` that is injected [here](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/client/client.py#L302). Now why this is happens because: 1. During `Client.__init__` the `load_kube_config` method is (see [here](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/client/client.py#L295)) called in the upstream `kubernetes` library. - Under the hood eventually it sets the `refresh_api_key_hook` (see [here](https://github.com/kubernetes-client/python/blob/ada96faca164f5d5c018fb21b8ef2ecafbdf5e43/kubernetes/base/config/kube_config.py#L576)). - In that `_refresh_api_key` method it calls `_set_config`, which will re-pull the `host` from the `~/.kube/config`. - **Normally this would be fine but [here](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/client/client.py#L302) KFP mutates that value to inject `Client._KUBE_PROXY_PATH` 😒** 2. Now for `wait_for_run_completion`, that under the hood calls the `kfp_server_api.api.run_service_api.RunService. get_run_with_http_info`. 3. The `get_run_with_http_info` method calls the `auth_settings` method, which in scenarios when the credentials expire it'll call the `refresh_api_key_hook`. **How to potentially fix this?** I guess my preference would be to work out some way to not mutate the `config.host` to append the `Client._KUBE_PROXY_PATH` as that "hacky" approach clearly has its shortcomings. Unclear though what the alternative is as this is quite a convenient solution most of the time! --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8313/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8313/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8312
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8312/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8312/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8312/events
https://github.com/kubeflow/pipelines/issues/8312
1,387,804,595
I_kwDOB-71UM5SuDez
8,312
[sdk] <Bug Name>
{ "login": "riyaj8888", "id": 29457825, "node_id": "MDQ6VXNlcjI5NDU3ODI1", "avatar_url": "https://avatars.githubusercontent.com/u/29457825?v=4", "gravatar_id": "", "url": "https://api.github.com/users/riyaj8888", "html_url": "https://github.com/riyaj8888", "followers_url": "https://api.github.com/users/riyaj8888/followers", "following_url": "https://api.github.com/users/riyaj8888/following{/other_user}", "gists_url": "https://api.github.com/users/riyaj8888/gists{/gist_id}", "starred_url": "https://api.github.com/users/riyaj8888/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riyaj8888/subscriptions", "organizations_url": "https://api.github.com/users/riyaj8888/orgs", "repos_url": "https://api.github.com/users/riyaj8888/repos", "events_url": "https://api.github.com/users/riyaj8888/events{/privacy}", "received_events_url": "https://api.github.com/users/riyaj8888/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[]
"2022-09-27T13:49:34"
"2022-09-29T22:41:28"
"2022-09-29T22:41:28"
NONE
null
null
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8312/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8312/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8308
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8308/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8308/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8308/events
https://github.com/kubeflow/pipelines/issues/8308
1,386,500,014
I_kwDOB-71UM5SpE-u
8,308
[feature] Container start and end times associated with a Pod
{ "login": "ameya-parab", "id": 75458630, "node_id": "MDQ6VXNlcjc1NDU4NjMw", "avatar_url": "https://avatars.githubusercontent.com/u/75458630?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ameya-parab", "html_url": "https://github.com/ameya-parab", "followers_url": "https://api.github.com/users/ameya-parab/followers", "following_url": "https://api.github.com/users/ameya-parab/following{/other_user}", "gists_url": "https://api.github.com/users/ameya-parab/gists{/gist_id}", "starred_url": "https://api.github.com/users/ameya-parab/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ameya-parab/subscriptions", "organizations_url": "https://api.github.com/users/ameya-parab/orgs", "repos_url": "https://api.github.com/users/ameya-parab/repos", "events_url": "https://api.github.com/users/ameya-parab/events{/privacy}", "received_events_url": "https://api.github.com/users/ameya-parab/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Hi @ameya-parab, thank you for bringing up this feature request. We recommend using the API from k8s to collect pod info now, and we don't have plans to implement this functionality. Feel free to open this thread if you have more thoughts!" ]
"2022-09-26T18:05:01"
"2022-09-29T22:52:10"
"2022-09-29T22:52:09"
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? We want to track the start and end times of containers within a pod (Kubeflow Pipeline/Run task). In other words, for better observability, we would like to log the start and end times of the `init`, `wait` and `main` container which are triggered as part of a KFP pod. The `mlpipeline` metabase currently tracks total run and task duration but does not have information on container runtimes. ### What is the use case or pain point? To improve the observability experience, we would like to track the initialization, wait, and execution time for a Kubeflow pipeline task (preferably in the metabase). ### Is there a workaround currently? We are currently polling the Kubernetes API server periodically to retrieve container runtimes for a specific Pod (KFP task), but there is a major issue with short lived tasks (<10 seconds), where the Pod's container details are lost if the node on which the Pod was running is scaled down. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8308/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8308/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8307
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8307/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8307/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8307/events
https://github.com/kubeflow/pipelines/issues/8307
1,386,037,128
I_kwDOB-71UM5SnT-I
8,307
[bug] Unable to produce pipeline with basic Component I/O
{ "login": "MartinRogmann", "id": 93972266, "node_id": "U_kgDOBZnnKg", "avatar_url": "https://avatars.githubusercontent.com/u/93972266?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MartinRogmann", "html_url": "https://github.com/MartinRogmann", "followers_url": "https://api.github.com/users/MartinRogmann/followers", "following_url": "https://api.github.com/users/MartinRogmann/following{/other_user}", "gists_url": "https://api.github.com/users/MartinRogmann/gists{/gist_id}", "starred_url": "https://api.github.com/users/MartinRogmann/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MartinRogmann/subscriptions", "organizations_url": "https://api.github.com/users/MartinRogmann/orgs", "repos_url": "https://api.github.com/users/MartinRogmann/repos", "events_url": "https://api.github.com/users/MartinRogmann/events{/privacy}", "received_events_url": "https://api.github.com/users/MartinRogmann/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1260031624, "node_id": "MDU6TGFiZWwxMjYwMDMxNjI0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/samples", "name": "area/samples", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Hi @MartinRogmann, maybe consider upgrading the kfp SDK to 1.8.14 and see if the error persists? ", "Hi @Linchin, that solved the problem. Thank you" ]
"2022-09-26T12:56:54"
"2022-09-30T08:19:53"
"2022-09-30T08:19:53"
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> Installed kubeflow manifest **v1.6.0-rc.1** with **minikube v1.26.0** and **kubectl v1.21.14** For detailed output: minikube version: v1.26.0 commit: f4b412861bb746be73053c9f6d2895f12cf78565 kubectl version Client Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.14", GitCommit:"0f77da5bd4809927e15d1658fb4aa8f13ad890a5", GitTreeState:"clean", BuildDate:"2022-06-15T14:17:29Z", GoVersion:"go1.16.15", Compiler:"gc", Platform:"linux/amd64"} Server Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.14", GitCommit:"0f77da5bd4809927e15d1658fb4aa8f13ad890a5", GitTreeState:"clean", BuildDate:"2022-06-15T14:11:36Z", GoVersion:"go1.16.15", Compiler:"gc", Platform:"linux/amd64"} * KFP version: <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> Inside notebook: pip list | grep kfp kfp 1.6.3 kfp-pipeline-spec 0.1.16 kfp-server-api 1.6.0 ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> We tried to produce a basic pipeline that saves pandas dataframes in one component and uses it in the next component using artifacts. We experimented with both Output[Dataset] and OutputPath(str) and tried various samples from v1/v2 SDK without success. To reproduce, create the following options: **Imports** ``` import kfp import kfp.dsl as dsl from kfp.v2.dsl import component, Input, Output, InputPath, OutputPath, Dataset, Metrics, Model, Artifact ``` **Option A: Using csv** ``` @component( packages_to_install = ["pandas", "sklearn"], ) def load(data: Output[Dataset]): import pandas as pd from sklearn import datasets dataset = datasets.load_iris() df = pd.DataFrame(data=dataset.data, columns= ["Petal Length", "Petal Width", "Sepal Length", "Sepal Width"]) df.to_csv(data.path) @component( packages_to_install = ["pandas"], ) def print_head(data: Input[Dataset]): import pandas as pd df = pd.read_csv(data.path) print(df.head()) @dsl.pipeline( name='Iris', description='iris' ) def pipeline(): load_task = load() print_task = print_head(data=load_task.outputs["data"]) kfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile( pipeline_func=pipeline, package_path='iris_csv.yaml') ``` **Option B: Using pickle** ``` @component( packages_to_install = ["pandas", "sklearn"], ) def load(data: Output[Dataset]): import pickle import pandas as pd from sklearn import datasets dataset = datasets.load_iris() df = pd.DataFrame(data=dataset.data, columns= ["Petal Length", "Petal Width", "Sepal Length", "Sepal Width"]) with open(data.path, "wb") as f: pickle.dump(df, f) @component( packages_to_install = ["pandas"], ) def print_head(data: Input[Dataset]): import pickle import pandas as pd with open(data.path, 'rb') as f: df = pickle.load(f) print(df.head()) @dsl.pipeline( name='Iris', description='iris' ) def pipeline(): load_task = load() print_task = print_head(data=load_task.outputs["data"]) kfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile( pipeline_func=pipeline, package_path='iris_pickle.yaml') ``` **Option C: Using json** ``` @component( packages_to_install = ["pandas", "sklearn"], ) def load(data: Output[Dataset]): import pandas as pd from sklearn import datasets dataset = datasets.load_iris() df = pd.DataFrame(data=dataset.data, columns= ["Petal Length", "Petal Width", "Sepal Length", "Sepal Width"]) json_df = {"data": df} with open(data.path, "wb") as f: f.write(json.dumps(result, default=lambda df: json.loads(df.to_json()))) @component( packages_to_install = ["pandas"], ) def print_head(data: Input[Dataset]): import pandas as pd with open(data.path, 'rb') as f: df = pd.DataFrame(json.loads(f.read())["data"]) print(df.head()) @dsl.pipeline( name='Iris', description='iris' ) def pipeline(): load_task = load() print_task = print_head(data=load_task.outputs["data"]) kfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile( pipeline_func=pipeline, package_path='iris_json.yaml') ``` All three options result in the following error: ``` time="2022-09-26T09:50:42.264Z" level=info msg="capturing logs" argo=true WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv Traceback (most recent call last): File "/tmp/tmp.CjkMbFoRTh", line 826, in <module> executor_main() File "/tmp/tmp.CjkMbFoRTh", line 820, in executor_main function_to_execute=function_to_execute) File "/tmp/tmp.CjkMbFoRTh", line 549, in __init__ artifacts_list[0]) File "/tmp/tmp.CjkMbFoRTh", line 562, in _make_output_artifact os.makedirs(os.path.dirname(artifact.path), exist_ok=True) File "/usr/local/lib/python3.7/posixpath.py", line 156, in dirname p = os.fspath(p) TypeError: expected str, bytes or os.PathLike object, not NoneType F0926 09:51:03.011047 26 main.go:56] Failed to execute component: exit status 1 time="2022-09-26T09:51:03.028Z" level=error msg="cannot save artifact /tmp/outputs/data/data" argo=true error="stat /tmp/outputs/data/data: no such file or directory" Error: exit status 1 ``` ### Expected result <!-- What should the correct behavior be? --> Clearer examples in https://github.com/kubeflow/pipelines/tree/master/samples. As it is hard to distinguish which sample are actually using the v1 or v2 syntax and are compatible with the latest version. Can you provide a minimal working example that ideally also includes the use of multiple inputs and outputs and the logging of metrics and models. The samples we found in the docs or on github either use the deprecated container_ops instead of components or could not be run by us. ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area testing --> /area sdk /area samples /area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8307/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8307/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8302
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8302/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8302/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8302/events
https://github.com/kubeflow/pipelines/issues/8302
1,385,145,397
I_kwDOB-71UM5Sj6Q1
8,302
[sdk] KFPv2: Custom Container component set bool default value `False` to `0` which caused runtime failure
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "Linchin", "id": 12806577, "node_id": "MDQ6VXNlcjEyODA2NTc3", "avatar_url": "https://avatars.githubusercontent.com/u/12806577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Linchin", "html_url": "https://github.com/Linchin", "followers_url": "https://api.github.com/users/Linchin/followers", "following_url": "https://api.github.com/users/Linchin/following{/other_user}", "gists_url": "https://api.github.com/users/Linchin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Linchin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Linchin/subscriptions", "organizations_url": "https://api.github.com/users/Linchin/orgs", "repos_url": "https://api.github.com/users/Linchin/repos", "events_url": "https://api.github.com/users/Linchin/events{/privacy}", "received_events_url": "https://api.github.com/users/Linchin/received_events", "type": "User", "site_admin": false }
[ { "login": "Linchin", "id": 12806577, "node_id": "MDQ6VXNlcjEyODA2NTc3", "avatar_url": "https://avatars.githubusercontent.com/u/12806577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Linchin", "html_url": "https://github.com/Linchin", "followers_url": "https://api.github.com/users/Linchin/followers", "following_url": "https://api.github.com/users/Linchin/following{/other_user}", "gists_url": "https://api.github.com/users/Linchin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Linchin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Linchin/subscriptions", "organizations_url": "https://api.github.com/users/Linchin/orgs", "repos_url": "https://api.github.com/users/Linchin/repos", "events_url": "https://api.github.com/users/Linchin/events{/privacy}", "received_events_url": "https://api.github.com/users/Linchin/received_events", "type": "User", "site_admin": false }, { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-09-25T19:56:04"
"2023-02-23T19:36:36"
"2023-02-23T19:36:36"
COLLABORATOR
null
### Environment * KFP version: KFP 2.0.0-alpha.4 * KFP SDK version: SDK 2.0.0-beta4 ### Steps to reproduce Create the following custom container component: ``` from kfp.dsl import Input, Output, Artifact, container_component, ContainerSpec @dsl.container_component def create_dataset(experiment_name: str, experiment_namespace: str, experiment_timeout_minutes: int, experiment_spec_json: Input[Artifact], parameter_set: Output[Artifact], delete_finished_experiment: bool = False): return ContainerSpec( image='docker.io/kubeflowkatib/kubeflow-pipelines-launcher', command=['python', 'src/launch_experiment.py'], args=[ '--experiment-name', experiment_name, '--experiment-namespace', experiment_namespace, '--experiment-spec', experiment_spec_json.path, '--experiment-timeout-minutes',experiment_timeout_minutes, '--delete-after-done', delete_finished_experiment, '--output-file', parameter_set.path, ]) ``` Note that `delete_finished_experiment` has a default value `False`. When compiling the pipeline, this component is converted to IR with the following format: ``` tasks: create-dataset: cachingOptions: enableCache: true componentRef: name: comp-create-dataset dependentTasks: - create-katib-experiment-task inputs: artifacts: experiment_spec_json: taskOutputArtifact: outputArtifactKey: experiment_spec_json producerTask: create-katib-experiment-task parameters: delete_finished_experiment: runtimeValue: constant: 0 experiment_name: componentInputParameter: name experiment_namespace: componentInputParameter: namespace experiment_timeout_minutes: runtimeValue: constant: 60 taskInfo: name: create-dataset ``` Note that `delete_finished_experiment` has a runtime value as 0. ### Expected result What should the type of boolean default value be? I am expecting the value to be either of followings: bool `false`, bool `False` or string `False`. I didn't expect the value to be number 0. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8302/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8302/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8300
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8300/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8300/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8300/events
https://github.com/kubeflow/pipelines/issues/8300
1,384,498,692
I_kwDOB-71UM5ShcYE
8,300
[sdk] Unable to import ServiceAccountTokenVolumeCredentials in KFPv2 SDK
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Thank you, @zijianjoy!" ]
"2022-09-24T03:09:25"
"2022-09-26T15:08:13"
null
COLLABORATOR
null
### Environment * KFP version: KFP 2.0.0-alpha4 * KFP SDK version: 2.0.0-beta.4 ### Steps to reproduce Follow https://www.kubeflow.org/docs/components/pipelines/v1/sdk/connect-api/#full-kubeflow-subfrom-inside-clustersub to set the default SA token path as empty. But `ServiceAccountTokenVolumeCredentials` cannot be imported. From the `__init__.py` file, ServiceAccountTokenVolumeCredentials is not exported: https://github.com/kubeflow/pipelines/blob/e14a784327c83e2d7f3e66dc09f4b3af3323cbc7/sdk/python/kfp/client/__init__.py#L17 If we don't import `ServiceAccountTokenVolumeCredentials`, that means by default it should use default SA token path when creating KFP client: `kfp.Client()`. However, this command has failed due to: ``` File /opt/conda/lib/python3.8/site-packages/kfp/client/client.py:372, in Client._get_config_with_default_credentials(self, config) 360 """Apply default credentials to the configuration object. 361 362 This method accepts a Configuration object and extends it with 363 some default credentials interface. 364 """ 365 # XXX: The default credentials are audience-based service account tokens 366 # projected by the kubelet (ServiceAccountTokenVolumeCredentials). As we 367 # implement more and more credentials, we can have some heuristic and (...) 370 371 # TODO: auth.ServiceAccountCredentials does not exist... dead code path? --> 372 credentials = auth.ServiceAccountTokenVolumeCredentials() 373 config_copy = copy.deepcopy(config) 375 try: AttributeError: module 'kfp.client.auth' has no attribute 'ServiceAccountTokenVolumeCredentials' ``` ### Expected result User can create KFP client using the default SA token path. User can also configure SA token path to be custom value so KFP SDK can read from a different location. User can configure SA token path to be None, in this case, KFP SDK should be able to use environment variable KF_PIPELINES_SA_TOKEN_PATH. cc @chensun @connor-mccarthy --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8300/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8300/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8294
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8294/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8294/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8294/events
https://github.com/kubeflow/pipelines/issues/8294
1,382,977,963
I_kwDOB-71UM5SbpGr
8,294
[feature] Stop recurring run when failed
{ "login": "casassg", "id": 6912589, "node_id": "MDQ6VXNlcjY5MTI1ODk=", "avatar_url": "https://avatars.githubusercontent.com/u/6912589?v=4", "gravatar_id": "", "url": "https://api.github.com/users/casassg", "html_url": "https://github.com/casassg", "followers_url": "https://api.github.com/users/casassg/followers", "following_url": "https://api.github.com/users/casassg/following{/other_user}", "gists_url": "https://api.github.com/users/casassg/gists{/gist_id}", "starred_url": "https://api.github.com/users/casassg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/casassg/subscriptions", "organizations_url": "https://api.github.com/users/casassg/orgs", "repos_url": "https://api.github.com/users/casassg/repos", "events_url": "https://api.github.com/users/casassg/events{/privacy}", "received_events_url": "https://api.github.com/users/casassg/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "This feels like a custom logic that probably shouldn't be part of a scheduling system. Alternatives could be implementing the custom logic yourself either from client side or in component level like using exit handler.", "@chensun I wonder wdym to implement this from client side? Scheduled runs after they are created as a resource are only checked by the controller itself so not sure how could client perform retries. Unless you mean reimplementing recurring runs from client manually\r\n\r\nAlso exithandler is an option as I mentioned above, however it requires the pipeline component to access the server and modify its state. In addition to the fact that this means you cant do any other exit handler (aka you end up having to add all code within 1 component if you need any other clean up).\r\n\r\nalso if we use Airflow as reference this are all implement in the scheduler there btw, so it doesnt seem logic that shouldnt be part of a scheduling system \r\n" ]
"2022-09-22T20:28:32"
"2022-09-23T00:51:46"
"2022-09-22T22:54:31"
CONTRIBUTOR
null
### Feature Area /area backend ### What feature would you like to see? Support for stopping recurring runs (or retry N times) if a run fails. ### What is the use case or pain point? At the moment, when a recurring run has a failed run, it will continue scheduling new runs independently of previous state. Ideally, we would like to define a custom behaviour were a scheduled run gets retried N times or it stops scheduling new runs after trying to schedule N runs. This is mostly for cases where running for date X+1 is counterproducing when X is missing. For example for data processing where you want to make sure that no gaps are missing between segments. ### Is there a workaround currently? Currently there is some sort of way which is to use an ExitHandler to stop the run by instrospecting into itself and finding the recurring_run id associated with current run. However that makes it complicated to support. Ideally we make the handling more transparent by doing this on the controller and configuring it from UI. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8294/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8294/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8292
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8292/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8292/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8292/events
https://github.com/kubeflow/pipelines/issues/8292
1,382,464,256
I_kwDOB-71UM5SZrsA
8,292
[feature] Add a progress bar in the DAG UI for long-running tasks
{ "login": "MainRo", "id": 814804, "node_id": "MDQ6VXNlcjgxNDgwNA==", "avatar_url": "https://avatars.githubusercontent.com/u/814804?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MainRo", "html_url": "https://github.com/MainRo", "followers_url": "https://api.github.com/users/MainRo/followers", "following_url": "https://api.github.com/users/MainRo/following{/other_user}", "gists_url": "https://api.github.com/users/MainRo/gists{/gist_id}", "starred_url": "https://api.github.com/users/MainRo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MainRo/subscriptions", "organizations_url": "https://api.github.com/users/MainRo/orgs", "repos_url": "https://api.github.com/users/MainRo/repos", "events_url": "https://api.github.com/users/MainRo/events{/privacy}", "received_events_url": "https://api.github.com/users/MainRo/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "This is infeasible, as each task is a containerized app. KFP system doesn't have visibility into the progress of a task. \r\nOur suggestion would be use the log workaround as you described.", "ok, thanks for answering." ]
"2022-09-22T13:33:29"
"2022-09-23T08:06:10"
"2022-09-22T22:47:37"
NONE
null
### Feature Area /area frontend /area backend /area sdk ### What feature would you like to see? In the UI displaying the DAG of the pipeline, it would be great to see a progress bar on each task. This would allow to visually see how far the task is to completion. In the code of the component, we could provide information on the progress with a dedicated API in the SDK. ### What is the use case or pain point? It is difficult to estimate how long a task will run. The only option I know, as of today, is to log the progress information. ### Is there a workaround currently? I add some traces/logs in my task, but this implies looking regularly to the logs of each running task. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8292/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8292/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8291
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8291/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8291/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8291/events
https://github.com/kubeflow/pipelines/issues/8291
1,382,180,542
I_kwDOB-71UM5SYma-
8,291
[sdk] Package version conflict in kfp and google-cloud-pipeline-components
{ "login": "wardVD", "id": 2136274, "node_id": "MDQ6VXNlcjIxMzYyNzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2136274?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wardVD", "html_url": "https://github.com/wardVD", "followers_url": "https://api.github.com/users/wardVD/followers", "following_url": "https://api.github.com/users/wardVD/following{/other_user}", "gists_url": "https://api.github.com/users/wardVD/gists{/gist_id}", "starred_url": "https://api.github.com/users/wardVD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wardVD/subscriptions", "organizations_url": "https://api.github.com/users/wardVD/orgs", "repos_url": "https://api.github.com/users/wardVD/repos", "events_url": "https://api.github.com/users/wardVD/events{/privacy}", "received_events_url": "https://api.github.com/users/wardVD/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "chongyouquan", "id": 48691403, "node_id": "MDQ6VXNlcjQ4NjkxNDAz", "avatar_url": "https://avatars.githubusercontent.com/u/48691403?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chongyouquan", "html_url": "https://github.com/chongyouquan", "followers_url": "https://api.github.com/users/chongyouquan/followers", "following_url": "https://api.github.com/users/chongyouquan/following{/other_user}", "gists_url": "https://api.github.com/users/chongyouquan/gists{/gist_id}", "starred_url": "https://api.github.com/users/chongyouquan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chongyouquan/subscriptions", "organizations_url": "https://api.github.com/users/chongyouquan/orgs", "repos_url": "https://api.github.com/users/chongyouquan/repos", "events_url": "https://api.github.com/users/chongyouquan/events{/privacy}", "received_events_url": "https://api.github.com/users/chongyouquan/received_events", "type": "User", "site_admin": false }
[ { "login": "chongyouquan", "id": 48691403, "node_id": "MDQ6VXNlcjQ4NjkxNDAz", "avatar_url": "https://avatars.githubusercontent.com/u/48691403?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chongyouquan", "html_url": "https://github.com/chongyouquan", "followers_url": "https://api.github.com/users/chongyouquan/followers", "following_url": "https://api.github.com/users/chongyouquan/following{/other_user}", "gists_url": "https://api.github.com/users/chongyouquan/gists{/gist_id}", "starred_url": "https://api.github.com/users/chongyouquan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chongyouquan/subscriptions", "organizations_url": "https://api.github.com/users/chongyouquan/orgs", "repos_url": "https://api.github.com/users/chongyouquan/repos", "events_url": "https://api.github.com/users/chongyouquan/events{/privacy}", "received_events_url": "https://api.github.com/users/chongyouquan/received_events", "type": "User", "site_admin": false } ]
null
[ "`google-cloud-pipeline-components` is currently not fully compatible with kfp 2.0*. We're working on the migration, and @chongyouquan may help provide a rough timeline.\r\n", "Any update here?", "Hi, the following `requirements.in` works:\r\n```\r\nkfp==2.0.0b10\r\nproto-plus==1.19.6\r\ngoogleapis-common-protos==1.56.4\r\ngoogle-api-core==2.8.1\r\ngoogle-cloud-notebooks==1.3.2\r\ngoogle-cloud-aiplatform==1.16.1\r\ngoogle-cloud-resource-manager==1.6.0\r\ngoogle-cloud-pipeline-components\r\ngrpcio-status==1.47.0\r\n```" ]
"2022-09-22T10:03:22"
"2023-01-11T08:51:00"
null
NONE
null
### Environment * KFP version: 2.0.0b1 ### Steps to reproduce Create a `requirements.in` file: ``` kfp==2.0.0b1 google-cloud-pipeline-components ``` Then run `pip-compile -v requirements.in` ### Result ```Using indexes: https://pypi.org/simple ROUND 1 Current constraints: google-cloud-pipeline-components (from -r requirements.in (line 7)) kfp==2.0.0b1 (from -r requirements.in (line 6)) Finding the best candidates: found candidate google-cloud-pipeline-components==1.0.22 (constraint was <any>) found candidate kfp==2.0.0b1 (constraint was ==2.0.0b1) Finding secondary dependencies: kfp==2.0.0b1 requires absl-py<2,>=0.9, click<9,>=7.1.2, cloudpickle<3,>=2.0.0, Deprecated<2,>=1.2.7, docstring-parser<1,>=0.7.3, fire<1,>=0.3.1, google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5, google-auth<3,>=1.6.1, google-cloud-storage<3,>=2.2.1, jsonschema<4,>=3.0.1, kfp-pipeline-spec<0.2.0,>=0.1.14, kfp-server-api<3.0.0,>=2.0.0a0, kubernetes<19,>=8.0.0, protobuf<4,>=3.13.0, PyYAML<6,>=5.3, requests-toolbelt<1,>=0.8.0, strip-hints<1,>=0.1.8, tabulate<1,>=0.8.6, typer<1.0,>=0.3.2, uritemplate<4,>=3.0.1 google-cloud-pipeline-components==1.0.22 requires google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5, google-cloud-aiplatform<2,>=1.11.0, google-cloud-notebooks>=0.4.0, google-cloud-storage<2,>=1.20.0, kfp<2.0.0,>=1.8.9 New dependencies found in this round: adding ('absl-py', '<2,>=0.9', []) adding ('click', '<9,>=7.1.2', []) adding ('cloudpickle', '<3,>=2.0.0', []) adding ('deprecated', '<2,>=1.2.7', []) adding ('docstring-parser', '<1,>=0.7.3', []) adding ('fire', '<1,>=0.3.1', []) adding ('google-api-core', '!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5', []) adding ('google-auth', '<3,>=1.6.1', []) adding ('google-cloud-aiplatform', '<2,>=1.11.0', []) adding ('google-cloud-notebooks', '>=0.4.0', []) adding ('google-cloud-storage', '<2,<3,>=1.20.0,>=2.2.1', []) adding ('jsonschema', '<4,>=3.0.1', []) adding ('kfp', '<2.0.0,>=1.8.9', []) adding ('kfp-pipeline-spec', '<0.2.0,>=0.1.14', []) adding ('kfp-server-api', '<3.0.0,>=2.0.0a0', []) adding ('kubernetes', '<19,>=8.0.0', []) adding ('protobuf', '<4,>=3.13.0', []) adding ('pyyaml', '<6,>=5.3', []) adding ('requests-toolbelt', '<1,>=0.8.0', []) adding ('strip-hints', '<1,>=0.1.8', []) adding ('tabulate', '<1,>=0.8.6', []) adding ('typer', '<1.0,>=0.3.2', []) adding ('uritemplate', '<4,>=3.0.1', []) Removed dependencies in this round: ------------------------------------------------------------ Result of round 1: not stable ROUND 2 Current constraints: absl-py<2,>=0.9 (from kfp==2.0.0b1->-r requirements.in (line 6)) click<9,>=7.1.2 (from kfp==2.0.0b1->-r requirements.in (line 6)) cloudpickle<3,>=2.0.0 (from kfp==2.0.0b1->-r requirements.in (line 6)) Deprecated<2,>=1.2.7 (from kfp==2.0.0b1->-r requirements.in (line 6)) docstring-parser<1,>=0.7.3 (from kfp==2.0.0b1->-r requirements.in (line 6)) fire<1,>=0.3.1 (from kfp==2.0.0b1->-r requirements.in (line 6)) google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5 (from kfp==2.0.0b1->-r requirements.in (line 6)) google-auth<3,>=1.6.1 (from kfp==2.0.0b1->-r requirements.in (line 6)) google-cloud-aiplatform<2,>=1.11.0 (from google-cloud-pipeline-components==1.0.22->-r requirements.in (line 7)) google-cloud-notebooks>=0.4.0 (from google-cloud-pipeline-components==1.0.22->-r requirements.in (line 7)) google-cloud-pipeline-components (from -r requirements.in (line 7)) google-cloud-storage<2,<3,>=1.20.0,>=2.2.1 (from kfp==2.0.0b1->-r requirements.in (line 6)) jsonschema<4,>=3.0.1 (from kfp==2.0.0b1->-r requirements.in (line 6)) kfp<2.0.0,==2.0.0b1,>=1.8.9 (from -r requirements.in (line 6)) kfp-pipeline-spec<0.2.0,>=0.1.14 (from kfp==2.0.0b1->-r requirements.in (line 6)) kfp-server-api<3.0.0,>=2.0.0a0 (from kfp==2.0.0b1->-r requirements.in (line 6)) kubernetes<19,>=8.0.0 (from kfp==2.0.0b1->-r requirements.in (line 6)) protobuf<4,>=3.13.0 (from kfp==2.0.0b1->-r requirements.in (line 6)) PyYAML<6,>=5.3 (from kfp==2.0.0b1->-r requirements.in (line 6)) requests-toolbelt<1,>=0.8.0 (from kfp==2.0.0b1->-r requirements.in (line 6)) strip-hints<1,>=0.1.8 (from kfp==2.0.0b1->-r requirements.in (line 6)) tabulate<1,>=0.8.6 (from kfp==2.0.0b1->-r requirements.in (line 6)) typer<1.0,>=0.3.2 (from kfp==2.0.0b1->-r requirements.in (line 6)) uritemplate<4,>=3.0.1 (from kfp==2.0.0b1->-r requirements.in (line 6)) Finding the best candidates: found candidate absl-py==1.2.0 (constraint was >=0.9,<2) found candidate click==8.1.3 (constraint was >=7.1.2,<9) found candidate cloudpickle==2.2.0 (constraint was >=2.0.0,<3) found candidate deprecated==1.2.13 (constraint was >=1.2.7,<2) found candidate docstring-parser==0.15 (constraint was >=0.7.3,<1) found candidate fire==0.4.0 (constraint was >=0.3.1,<1) found candidate google-api-core==2.10.1 (constraint was >=1.31.5,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev) found candidate google-auth==1.35.0 (constraint was >=1.6.1,<3) found candidate google-cloud-aiplatform==1.17.1 (constraint was >=1.11.0,<2) found candidate google-cloud-notebooks==1.4.2 (constraint was >=0.4.0) found candidate google-cloud-pipeline-components==1.0.22 (constraint was <any>) Could not find a version that matches google-cloud-storage<2,<3,>=1.20.0,>=2.2.1 (from kfp==2.0.0b1->-r requirements.in (line 6)) Tried: 0.20.0, 0.20.0, 0.21.0, 0.21.0, 0.22.0, 0.22.0, 0.23.0, 0.23.0, 0.23.1, 0.23.1, 1.0.0, 1.0.0, 1.1.0, 1.1.0, 1.1.1, 1.1.1, 1.2.0, 1.2.0, 1.3.0, 1.3.0, 1.3.1, 1.3.1, 1.3.2, 1.3.2, 1.4.0, 1.4.0, 1.5.0, 1.5.0, 1.6.0, 1.6.0, 1.7.0, 1.7.0, 1.8.0, 1.8.0, 1.9.0, 1.9.0, 1.10.0, 1.10.0, 1.11.0, 1.11.0, 1.11.1, 1.11.1, 1.12.0, 1.12.0, 1.12.1, 1.12.1, 1.13.0, 1.13.0, 1.13.1, 1.13.1, 1.13.2, 1.13.2, 1.13.3, 1.13.3, 1.14.0, 1.14.0, 1.14.1, 1.14.1, 1.15.0, 1.15.0, 1.15.1, 1.15.1, 1.15.2, 1.15.2, 1.16.0, 1.16.0, 1.16.1, 1.16.1, 1.16.2, 1.16.2, 1.17.0, 1.17.0, 1.17.1, 1.17.1, 1.18.0, 1.18.0, 1.18.1, 1.18.1, 1.19.0, 1.19.0, 1.19.1, 1.19.1, 1.20.0, 1.20.0, 1.21.0, 1.21.0, 1.22.0, 1.22.0, 1.23.0, 1.23.0, 1.24.0, 1.24.0, 1.24.1, 1.24.1, 1.25.0, 1.25.0, 1.26.0, 1.26.0, 1.27.0, 1.27.0, 1.28.0, 1.28.0, 1.28.1, 1.28.1, 1.29.0, 1.29.0, 1.30.0, 1.30.0, 1.31.0, 1.31.0, 1.31.1, 1.31.1, 1.31.2, 1.31.2, 1.32.0, 1.32.0, 1.33.0, 1.33.0, 1.34.0, 1.34.0, 1.35.0, 1.35.0, 1.35.1, 1.35.1, 1.36.0, 1.36.0, 1.36.1, 1.36.1, 1.36.2, 1.36.2, 1.37.0, 1.37.0, 1.37.1, 1.37.1, 1.38.0, 1.38.0, 1.39.0, 1.39.0, 1.40.0, 1.40.0, 1.41.0, 1.41.0, 1.41.1, 1.41.1, 1.42.0, 1.42.0, 1.42.1, 1.42.1, 1.42.2, 1.42.2, 1.42.3, 1.42.3, 1.43.0, 1.43.0, 1.44.0, 1.44.0, 2.0.0, 2.0.0, 2.1.0, 2.1.0, 2.2.0, 2.2.0, 2.2.1, 2.2.1, 2.3.0, 2.3.0, 2.4.0, 2.4.0, 2.5.0, 2.5.0 There are incompatible versions in the resolved dependencies: google-cloud-storage<3,>=2.2.1 (from kfp==2.0.0b1->-r requirements.in (line 6)) google-cloud-storage<2,>=1.20.0 (from google-cloud-pipeline-components==1.0.22->-r requirements.in (line 7)) ``` ### Expected result An output with compatible package versions. ### Possible solution Bump the google-cloud-storage version to include `<3,>=2.2.1` in https://github.com/kubeflow/pipelines/blob/2.0.0b4/components/google-cloud/dependencies.py <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8291/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8291/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8283
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8283/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8283/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8283/events
https://github.com/kubeflow/pipelines/issues/8283
1,378,737,179
I_kwDOB-71UM5SLdwb
8,283
[backend] metadata-writer cannot save metadata of S3 artifacts with argo v3.1+
{ "login": "tktest1234", "id": 113957092, "node_id": "U_kgDOBsrY5A", "avatar_url": "https://avatars.githubusercontent.com/u/113957092?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tktest1234", "html_url": "https://github.com/tktest1234", "followers_url": "https://api.github.com/users/tktest1234/followers", "following_url": "https://api.github.com/users/tktest1234/following{/other_user}", "gists_url": "https://api.github.com/users/tktest1234/gists{/gist_id}", "starred_url": "https://api.github.com/users/tktest1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tktest1234/subscriptions", "organizations_url": "https://api.github.com/users/tktest1234/orgs", "repos_url": "https://api.github.com/users/tktest1234/repos", "events_url": "https://api.github.com/users/tktest1234/events{/privacy}", "received_events_url": "https://api.github.com/users/tktest1234/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "@tktest1234 Are you running on AWS and want to use KFP with S3 as storage backend?\r\n\r\n", "Yes, I install kubeflow1.4 on AWS(EKS) and want to use KFP with S3 as storage backend.\r\nIsn't it expected?", "Hey @tktest1234 please follow the instructions on AWS distribution of Kubeflow to install KFP with S3 as artifact storage https://awslabs.github.io/kubeflow-manifests/docs/deployment/rds-s3/\r\n\r\nPlease create an issue on the awslabs repository if you face any issues", "Thank you, but awslabs's installation doesn't solve the problem.\r\n\r\n#5829 is exactly the PR for this issue. I hope it to progress." ]
"2022-09-20T02:39:36"
"2022-12-28T03:18:38"
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? kubeflow 1.4 manifest (git::https://github.com/kubeflow/manifests.git?ref=v1.4.0 ) and manually change config for S3 * KFP version: 1.7.0 (kubeflow 1.4) * KFP SDK version: 1.7.1 ### Steps to reproduce 1. setting to save artifacts to S3 (change configmap for workflow-controller) 1. Run pipeline: [Tutorial] Data passing in python components metadata-writer output log ``` Kubernetes Pod event: ADDED file-passing-pipelines-txmqj-2254123803 14598940 Traceback (most recent call last): File "/kfp/metadata_writer/metadata_writer.py", line 238, in <module> artifact_uri = argo_artifact_to_uri(argo_artifact) File "/kfp/metadata_writer/metadata_writer.py", line 106, in argo_artifact_to_uri provider=get_object_store_provider(s3_artifact['endpoint']), KeyError: 'endpoint' ``` result: - Artifact in metadb is empty ### Expected result metadata for the artifacts is logged into metadb ### Materials and Reference I think it is caused by changes for argo Key-Only Artifacts. In https://github.com/kubeflow/pipelines/blob/1.7.0/backend/metadata_writer/src/metadata_writer.py#L318, metadata-writer read "workflows.argoproj.io/outputs", but endpoint and bucket are not passed by argo3.1+. So https://github.com/kubeflow/pipelines/blob/1.7.0/backend/metadata_writer/src/metadata_writer.py#L105 cannot set the correct URL. Related frontend issue: https://github.com/kubeflow/pipelines/issues/5930 --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8283/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8283/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8269
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8269/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8269/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8269/events
https://github.com/kubeflow/pipelines/issues/8269
1,374,042,311
I_kwDOB-71UM5R5jjH
8,269
Cannot get key for artifact location
{ "login": "SeibertronSS", "id": 69496864, "node_id": "MDQ6VXNlcjY5NDk2ODY0", "avatar_url": "https://avatars.githubusercontent.com/u/69496864?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SeibertronSS", "html_url": "https://github.com/SeibertronSS", "followers_url": "https://api.github.com/users/SeibertronSS/followers", "following_url": "https://api.github.com/users/SeibertronSS/following{/other_user}", "gists_url": "https://api.github.com/users/SeibertronSS/gists{/gist_id}", "starred_url": "https://api.github.com/users/SeibertronSS/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SeibertronSS/subscriptions", "organizations_url": "https://api.github.com/users/SeibertronSS/orgs", "repos_url": "https://api.github.com/users/SeibertronSS/repos", "events_url": "https://api.github.com/users/SeibertronSS/events{/privacy}", "received_events_url": "https://api.github.com/users/SeibertronSS/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "@SeibertronSS, do other pipelines have this error? Have you successfully run a pipeline on this deployment?\r\n\r\nIt seems like there may be an issue with the deployment related to the [minio artifact secret](https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/manifests/kustomize/third-party/minio/base/mlpipeline-minio-artifact-secret.yaml)." ]
"2022-09-15T07:11:35"
"2022-09-15T22:52:43"
null
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? I deploy the Kubeflow Pipelines (standalone) by Kubeflow Manifests (https://github.com/kubeflow/manifests) * KFP version: 1.7.0 * KFP SDK version: 1.8.2 When I submit a pipeline, I will get the error `This step is in Error state with this message: Error (exit code 1): key unsupported: cannot get key for artifact location, because it is invalid` ![image](https://user-images.githubusercontent.com/69496864/190337738-fdab8af7-f0af-4ba1-9fa2-a7529bc90c7a.png) The code is as follows ``` import kfp from kfp.components import create_component_from_func client = kfp.Client() def add(a: float, b: float) -> float: '''Calculates sum of two arguments''' return a + b add_op = create_component_from_func( add, output_component_file='add_component.yaml') import kfp.dsl as dsl @dsl.pipeline( name='Addition pipeline', description='An example pipeline that performs addition calculations.' ) def add_pipeline( a='1', b='7', ): # Passes a pipeline parameter and a constant value to the `add_op` factory # function. first_add_task = add_op(a, 4) # Passes an output reference from `first_add_task` and a pipeline parameter # to the `add_op` factory function. For operations with a single return # value, the output reference can be accessed as `task.output` or # `task.outputs['output_name']`. second_add_task = add_op(first_add_task.output, b) # Specify argument values for your pipeline run. arguments = {'a': '7', 'b': '8'} # Create a pipeline run, using the client you initialized in a prior step. client.create_run_from_pipeline_func(add_pipeline, arguments=arguments, run_name="add-8") print("submit pipeline") ``` This problem seems to be related to Minio. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8269/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8269/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8267
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8267/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8267/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8267/events
https://github.com/kubeflow/pipelines/issues/8267
1,373,732,443
I_kwDOB-71UM5R4X5b
8,267
[sdk] ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number
{ "login": "pablofiumara", "id": 4154361, "node_id": "MDQ6VXNlcjQxNTQzNjE=", "avatar_url": "https://avatars.githubusercontent.com/u/4154361?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pablofiumara", "html_url": "https://github.com/pablofiumara", "followers_url": "https://api.github.com/users/pablofiumara/followers", "following_url": "https://api.github.com/users/pablofiumara/following{/other_user}", "gists_url": "https://api.github.com/users/pablofiumara/gists{/gist_id}", "starred_url": "https://api.github.com/users/pablofiumara/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pablofiumara/subscriptions", "organizations_url": "https://api.github.com/users/pablofiumara/orgs", "repos_url": "https://api.github.com/users/pablofiumara/repos", "events_url": "https://api.github.com/users/pablofiumara/events{/privacy}", "received_events_url": "https://api.github.com/users/pablofiumara/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Feels like something is wrong with your kubernetes/kubeflow setup.\r\nCan you see the pipelines UI or does it also show this error?", "@BroderPeters Thank you very much for your answer. I can see pipelines UI. \r\n\r\nIt's a weird error. The error does not happen to me. It happens to my work colleague sometimes. Sometimes they are able to publish pipelines from their local environments, sometimes they are not.", "This sounds like it may be an issue related to a specific network setup. It will be hard to help without some more information about what the variance is attributed to.\r\n\r\nCan you see if you are able to reproduce the issue with the KFP v2 SDK `kfp==1.8.13`? If this does not help, consider upgrading the `urllib3` library manually.", "@connor-mccarthy Thank you very much for your answer. I will try that" ]
"2022-09-15T00:20:34"
"2022-09-15T23:52:59"
null
NONE
null
### Environment * KFP version: 1.8.1 * KFP SDK version: 1.6.2 * All dependencies version: kfp 1.6.2 kfp-pipeline-spec 0.1.16 kfp-server-api 1.8.5 ### Steps to reproduce It happens on GCP, on a Kubeflow 1.5 cluster To reproduce you should execute the following code: ``` from kfp import Client kfp_client = Client( host="", client_id="", other_client_id="", other_client_secret="", namespace="aNamespace" ) kfp_client.set_user_namespace(namespace="aNamespace") pipeline_id = kfp_client.get_pipeline_id(name="aPipelineName") print(pipeline_id) ``` ### Expected result I should get a pipeline id Instead I get Traceback (most recent call last): File "/Users/user/.pyenv/versions/lib/python3.9/site-packages/urllib3/connectionpool.py", line 703, in urlopen httplib_response = self._make_request( File "/Users/user/.pyenv/versions/lib/python3.9/site-packages/urllib3/connectionpool.py", line 386, in _make_request self._validate_conn(conn) File "/Users/user/.pyenv/versions/lib/python3.9/site-packages/urllib3/connectionpool.py", line 1040, in _validate_conn conn.connect() File "/Users/user/.pyenv/versions/lib/python3.9/site-packages/urllib3/connection.py", line 416, in connect self.sock = ssl_wrap_socket( File "/Users/user/.pyenv/versions/lib/python3.9/site-packages/urllib3/util/ssl_.py", line 449, in ssl_wrap_socket ssl_sock = _ssl_wrap_socket_impl( File "/Users/user/.pyenv/versions/lib/python3.9/site-packages/urllib3/util/ssl_.py", line 493, in _ssl_wrap_socket_impl return ssl_context.wrap_socket(sock, server_hostname=server_hostname) File "/Users/user/.pyenv/versions/3.9.9/lib/python3.9/ssl.py", line 500, in wrap_socket return self.sslsocket_class._create( File "/Users/user/.pyenv/versions/3.9.9/lib/python3.9/ssl.py", line 1040, in _create self.do_handshake() File "/Users/user/.pyenv/versions/3.9.9/lib/python3.9/ssl.py", line 1309, in do_handshake self._sslobj.do_handshake() ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:1129) --------------------- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8267/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8267/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8257
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8257/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8257/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8257/events
https://github.com/kubeflow/pipelines/issues/8257
1,370,642,028
I_kwDOB-71UM5RslZs
8,257
[feature] Allow GCP ModelUploadOp to upload a model version
{ "login": "parthmishra", "id": 3813311, "node_id": "MDQ6VXNlcjM4MTMzMTE=", "avatar_url": "https://avatars.githubusercontent.com/u/3813311?v=4", "gravatar_id": "", "url": "https://api.github.com/users/parthmishra", "html_url": "https://github.com/parthmishra", "followers_url": "https://api.github.com/users/parthmishra/followers", "following_url": "https://api.github.com/users/parthmishra/following{/other_user}", "gists_url": "https://api.github.com/users/parthmishra/gists{/gist_id}", "starred_url": "https://api.github.com/users/parthmishra/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/parthmishra/subscriptions", "organizations_url": "https://api.github.com/users/parthmishra/orgs", "repos_url": "https://api.github.com/users/parthmishra/repos", "events_url": "https://api.github.com/users/parthmishra/events{/privacy}", "received_events_url": "https://api.github.com/users/parthmishra/received_events", "type": "User", "site_admin": false }
[ { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "/cc @chongyouquan ", "Similarly other Ops that can generate a model (Custom*TrainingJobRunOp, AutoML*TrainingJobRunOp etc) should support adding new versions of a model as well.", "> \r\n\r\n+1", "This should now be supported by the latest release, via the parent_model parameter. Please reopen if you see it's not working.\r\nhttps://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-1.0.34/google_cloud_pipeline_components.v1.model.html", "Was this functionality also added to components that use the uploaded models? For example how would one reference a specific model version (with a version alias) in BatchPredictOp in python? Maybe I missed it but can't seem to find documentation on this." ]
"2022-09-12T23:21:34"
"2023-05-31T09:55:18"
"2023-01-26T17:50:51"
CONTRIBUTOR
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area samples --> /area components ### What feature would you like to see? The ability to upload a Vertex Model version to the model registry ### What is the use case or pain point? In the REST API for[ model upload](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/upload#request-body), the request body has several fields including `parentModel` which can be supplied if uploading a new model version. The current version of [`ModelUploadOp`](https://github.com/kubeflow/pipelines/blob/1739434f4ad2000105e0a8feff41b587e3c9de8c/components/google-cloud/google_cloud_pipeline_components/container/v1/gcp_launcher/upload_model_remote_runner.py#L55) only supplies the `model` field in the request: ```python upload_model_request = { # TODO(IronPan) temporarily remove the empty fields from the spec 'model': json_util.recursive_remove_empty( append_unmanaged_model_artifact_into_payload( executor_input, model_spec)) } ``` ### Is there a workaround currently? I believe this can be done by creating a custom component that uses the `gcloud` sdk and then `gcloud ai model upload ... --parent-model ... --version-aliases ` but I haven't tried it yet. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8257/reactions", "total_count": 13, "+1": 13, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8257/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8256
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8256/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8256/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8256/events
https://github.com/kubeflow/pipelines/issues/8256
1,369,572,385
I_kwDOB-71UM5RogQh
8,256
[bug] Pipeline metrics Invalid input error: Unknown execution spec
{ "login": "dbg-raghulkrishna", "id": 106546936, "node_id": "U_kgDOBlnG-A", "avatar_url": "https://avatars.githubusercontent.com/u/106546936?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dbg-raghulkrishna", "html_url": "https://github.com/dbg-raghulkrishna", "followers_url": "https://api.github.com/users/dbg-raghulkrishna/followers", "following_url": "https://api.github.com/users/dbg-raghulkrishna/following{/other_user}", "gists_url": "https://api.github.com/users/dbg-raghulkrishna/gists{/gist_id}", "starred_url": "https://api.github.com/users/dbg-raghulkrishna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dbg-raghulkrishna/subscriptions", "organizations_url": "https://api.github.com/users/dbg-raghulkrishna/orgs", "repos_url": "https://api.github.com/users/dbg-raghulkrishna/repos", "events_url": "https://api.github.com/users/dbg-raghulkrishna/events{/privacy}", "received_events_url": "https://api.github.com/users/dbg-raghulkrishna/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "I'm experiencing the same issue. It seems that the issue was introduced in the alpha.3 release. Metrics are successfully picked up when downgrading the ml-pipeline deployment to the previous image `gcr.io/ml-pipeline/api-server:2.0.0-alpha.2`", "@Tobiasgoerke and myself can reproduce this too. @kimwnasptd @annajung this is another serious regresseion and candidate for Kubeflow 1.6.1\r\n@Linchin @james-jwu Is this fixed in a newer release that we can use for 1.6.1?", "I'll take a look. \r\n\r\nSlightly off the topic, per https://github.com/kubeflow/pipelines/issues/8256#issuecomment-1247757386, this is a regression in KFP 2.0.0a3. While last time we communicated, KFP 2.0.0a2 should be the version to go with Kubeflow 1.6 release (https://github.com/kubeflow/manifests/issues/2198#issuecomment-1171703073).\r\n\r\nGiven the natural of alpha phase release, I think we probably shouldn't automatically chase the latest alpha release available, they may not be very well tested. So while I investigate this issue, can we pin back to KFP 2.0.0a2 in KF 1.6? I assume that version is possibly tested more thoroughly with KF 1.6 RC releases. WDTY? @annajung @zijianjoy @gkcalat ", "> can we pin back to KFP 2.0.0a2 in KF 1.6? I assume that version is possibly tested more thoroughly with KF 1.6 RC releases. \r\n\r\nI take this back, checked the change history and it appears we never included KFP 2.0.0a2 in any KF 1.6 RC release. \r\nLet me focus on this specific issue first.", "> > can we pin back to KFP 2.0.0a2 in KF 1.6? I assume that version is possibly tested more thoroughly with KF 1.6 RC releases.\r\n> \r\n> I take this back, checked the change history and it appears we never included KFP 2.0.0a2 in any KF 1.6 RC release. Let me focus on this specific issue first.\r\n\r\nDiscussed with @gkcalat offline. We'll release with 2.0.0a5 with the fix, and tentatively use this version for KF 1.6 patch release. \r\nThat being said, I still think in future KF release, we shouldn't automatically pick the latest KFP alpha release available.", "Thanks @chensun, do you know when we can expect 2.0.0.a5?", "> Thanks @chensun, do you know when we can expect 2.0.0.a5?\r\n\r\n@annajung 2.0.0a5 release is done: https://github.com/kubeflow/pipelines/releases/tag/2.0.0-alpha.5" ]
"2022-09-12T09:36:20"
"2022-09-26T19:27:53"
"2022-09-21T17:31:09"
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Kubeflow/manifest v1.6 AKS v1.22 <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: ml-pipeline/api-server:2.0.0-alpha.3 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: '1.8.13' <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ``` from kfp.components import InputPath, OutputPath, create_component_from_func from kfp import dsl def produce_metrics( # Note when the `create_component_from_func` method converts the function to a component, the function parameter "mlpipeline_metrics_path" becomes an output with name "mlpipeline_metrics" which is the correct name for metrics output. mlpipeline_metrics_path: OutputPath('Metrics'), ): import json accuracy = 0.1 metrics = { 'metrics': [{ 'name': 'accuracy-score', # The name of the metric. Visualized as the column name in the runs table. 'numberValue': accuracy, # The value of the metric. Must be a numeric value. 'format': "PERCENTAGE", # The optional format of the metric. Supported values are "RAW" (displayed in raw format) and "PERCENTAGE" (displayed in percentage format). }] } with open(mlpipeline_metrics_path, 'w') as f: json.dump(metrics, f) produce_metrics_op = create_component_from_func( produce_metrics, base_image='python:3.7', packages_to_install=[], output_component_file='component.yaml', ) from kfp import dsl import os, kfp @dsl.pipeline( name='KF iris PoC', description='Simple TF' ) def test(): task=produce_metrics_op() import os,kfp with open(os.environ['KF_PIPELINES_SA_TOKEN_PATH'], "r") as f: TOKEN = f.read() client = kfp.Client(host='http://ml-pipeline.kubeflow.svc.cluster.local:8888',existing_token=TOKEN) client.create_run_from_pipeline_func( test, mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY, arguments={} ) ``` ### Expected result ![image](https://user-images.githubusercontent.com/106546936/189620869-f40b0cca-d25e-483d-84f4-5a16c12565c0.png) <!-- What should the correct behavior be? --> ### Materials and reference https://www.kubeflow.org/docs/components/pipelines/sdk/pipelines-metrics/ <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> I cannot see the pipeline metrics in the UI but the pipeline artifact is logged i am getting this error in ml-pipeline ``` I0912 09:12:02.846997 8 error.go:259] Invalid input error: Unknown execution spec InternalServerError: failed to unmarshal workflow ``` ![image](https://user-images.githubusercontent.com/106546936/189621689-ded04c6a-01b0-46d7-bfaa-bd55bde8f24a.png) ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8256/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8256/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8255
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8255/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8255/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8255/events
https://github.com/kubeflow/pipelines/issues/8255
1,369,054,485
I_kwDOB-71UM5Rmh0V
8,255
[frontend] Can't see log field in UI (SDK v2)
{ "login": "sergeykuprikov", "id": 85128586, "node_id": "MDQ6VXNlcjg1MTI4NTg2", "avatar_url": "https://avatars.githubusercontent.com/u/85128586?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sergeykuprikov", "html_url": "https://github.com/sergeykuprikov", "followers_url": "https://api.github.com/users/sergeykuprikov/followers", "following_url": "https://api.github.com/users/sergeykuprikov/following{/other_user}", "gists_url": "https://api.github.com/users/sergeykuprikov/gists{/gist_id}", "starred_url": "https://api.github.com/users/sergeykuprikov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sergeykuprikov/subscriptions", "organizations_url": "https://api.github.com/users/sergeykuprikov/orgs", "repos_url": "https://api.github.com/users/sergeykuprikov/repos", "events_url": "https://api.github.com/users/sergeykuprikov/events{/privacy}", "received_events_url": "https://api.github.com/users/sergeykuprikov/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "We do not support main.logs in KFP v2 compatible mode. We are no longer supporting KFP v2 compatible mode.\r\n\r\nWe support main.logs in KFP v1 and are working toward support in KFP v2." ]
"2022-09-11T20:24:44"
"2022-09-15T22:49:39"
"2022-09-15T22:49:39"
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? from kubeflow/manifests v1.4 * KFP version: 1.7.0 ### Steps to reproduce QQ When I try to run a simple pipeline on SDK 2, I don't see the main.log fields in the output artifact. Although I can see the log saved in minio. ![image](https://user-images.githubusercontent.com/85128586/189547248-264e5617-6669-4a25-8ac7-fc58ef6223ae.png) UI: ![image](https://user-images.githubusercontent.com/85128586/189547303-fed1e165-c70e-4cec-9093-d4d0651f6fd6.png) Minio: ![image](https://user-images.githubusercontent.com/85128586/189547416-d75dd922-6bf4-486a-bbd1-d66f58991152.png) Log: ![image](https://user-images.githubusercontent.com/85128586/189547475-be9b13f9-f422-49a0-b496-af8f1c1857de.png) ### Expected result Like this: ![image](https://user-images.githubusercontent.com/85128586/189547501-83a3e5d5-a92c-4acf-a2ca-bf037099668b.png) ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8255/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8255/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8252
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8252/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8252/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8252/events
https://github.com/kubeflow/pipelines/issues/8252
1,367,373,279
I_kwDOB-71UM5RgHXf
8,252
[bug] GCP ModelBatchPredictOp is never cached
{ "login": "MainRo", "id": 814804, "node_id": "MDQ6VXNlcjgxNDgwNA==", "avatar_url": "https://avatars.githubusercontent.com/u/814804?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MainRo", "html_url": "https://github.com/MainRo", "followers_url": "https://api.github.com/users/MainRo/followers", "following_url": "https://api.github.com/users/MainRo/following{/other_user}", "gists_url": "https://api.github.com/users/MainRo/gists{/gist_id}", "starred_url": "https://api.github.com/users/MainRo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MainRo/subscriptions", "organizations_url": "https://api.github.com/users/MainRo/orgs", "repos_url": "https://api.github.com/users/MainRo/repos", "events_url": "https://api.github.com/users/MainRo/events{/privacy}", "received_events_url": "https://api.github.com/users/MainRo/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[]
"2022-09-09T06:56:51"
"2022-09-09T06:56:55"
null
NONE
null
### Environment * How do you deploy Kubeflow Pipelines (KFP)? Vertex-AI pipelines * KFP version: I don't know (vertex ai) * KFP SDK version: 1.8.12 ### Steps to reproduce Create a pipeline that uses the ModelBatchPredictOp, and run it several times. The task is never cached, but systematically executed. ### Expected result The task should be cached when the input parameters do not change (dataset, model, gcs source uri, gcs destination prefix, and machine type). ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> According to the documentation, the gcs output uri is created with the following pattern: prediction-model-display-name-job-create-time. The usage of the job creation time here prevents any caching of this operator. https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-1.0.16/google_cloud_pipeline_components.aiplatform.html?highlight=ModelUploadOp#google_cloud_pipeline_components.aiplatform.ModelUploadOp ### Labels /area sdk /area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8252/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8252/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8242
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8242/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8242/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8242/events
https://github.com/kubeflow/pipelines/issues/8242
1,363,758,041
I_kwDOB-71UM5RSUvZ
8,242
[feature] Allow Filtering On Pipeline ID for List Runs Api Endpoint
{ "login": "tarat44", "id": 32471142, "node_id": "MDQ6VXNlcjMyNDcxMTQy", "avatar_url": "https://avatars.githubusercontent.com/u/32471142?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tarat44", "html_url": "https://github.com/tarat44", "followers_url": "https://api.github.com/users/tarat44/followers", "following_url": "https://api.github.com/users/tarat44/following{/other_user}", "gists_url": "https://api.github.com/users/tarat44/gists{/gist_id}", "starred_url": "https://api.github.com/users/tarat44/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tarat44/subscriptions", "organizations_url": "https://api.github.com/users/tarat44/orgs", "repos_url": "https://api.github.com/users/tarat44/repos", "events_url": "https://api.github.com/users/tarat44/events{/privacy}", "received_events_url": "https://api.github.com/users/tarat44/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "/cc @Linchin\r\n\r\n" ]
"2022-09-06T20:05:43"
"2022-09-08T22:46:11"
null
CONTRIBUTOR
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? Extend the api endpoint for listing runs to allow selecting or filtering runs by pipeline ID. ### What is the use case or pain point? This feature will be helpful when it is necessary to easily gather all runs associated with a given pipeline ### Is there a workaround currently? No --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8242/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8242/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8240
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8240/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8240/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8240/events
https://github.com/kubeflow/pipelines/issues/8240
1,361,857,532
I_kwDOB-71UM5RLEv8
8,240
when using the emissary executor you must either explicitly specify the command, or list the image's command in the index: https://argoproj.github.io/argo-workflows/workflow-executors/#emissary-emissary[bug] <Bug Name>
{ "login": "kotalakshman", "id": 81070473, "node_id": "MDQ6VXNlcjgxMDcwNDcz", "avatar_url": "https://avatars.githubusercontent.com/u/81070473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kotalakshman", "html_url": "https://github.com/kotalakshman", "followers_url": "https://api.github.com/users/kotalakshman/followers", "following_url": "https://api.github.com/users/kotalakshman/following{/other_user}", "gists_url": "https://api.github.com/users/kotalakshman/gists{/gist_id}", "starred_url": "https://api.github.com/users/kotalakshman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kotalakshman/subscriptions", "organizations_url": "https://api.github.com/users/kotalakshman/orgs", "repos_url": "https://api.github.com/users/kotalakshman/repos", "events_url": "https://api.github.com/users/kotalakshman/events{/privacy}", "received_events_url": "https://api.github.com/users/kotalakshman/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "using kubeflow , machine learning model deployed in to kubernetes,then i'm getting the attached error message", "Hello @kotalakshman ,\r\n\r\nThere are multiple information in here, I am assuming you are using container component in your pipeline:\r\n\r\n1. In the issue title, this is by design because you need to explicitly specify the command in your container spec. Example: https://github.com/kubeflow/pipelines/pull/6143/files\r\n2. In the issue description, you need to start using Python 3 because Python 2 is no longer supported. \r\n\r\nI am closing this issue for now with solutions provided. If you have more questions, feel free to reopen." ]
"2022-09-05T12:20:37"
"2022-09-08T23:04:39"
"2022-09-08T23:04:38"
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.7.1 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version:1.7.1 <!-- Specify the output of the following shell command: $pip list | grep kfp -->DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality. ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ### Expected result <!-- What should the correct behavior be? --> ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8240/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8240/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8236
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8236/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8236/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8236/events
https://github.com/kubeflow/pipelines/issues/8236
1,359,557,825
I_kwDOB-71UM5RCTTB
8,236
[bug] Disabling caching for a step by set_caching_options lead to disabling caching for subsequent steps
{ "login": "ysk24ok", "id": 3449164, "node_id": "MDQ6VXNlcjM0NDkxNjQ=", "avatar_url": "https://avatars.githubusercontent.com/u/3449164?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ysk24ok", "html_url": "https://github.com/ysk24ok", "followers_url": "https://api.github.com/users/ysk24ok/followers", "following_url": "https://api.github.com/users/ysk24ok/following{/other_user}", "gists_url": "https://api.github.com/users/ysk24ok/gists{/gist_id}", "starred_url": "https://api.github.com/users/ysk24ok/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ysk24ok/subscriptions", "organizations_url": "https://api.github.com/users/ysk24ok/orgs", "repos_url": "https://api.github.com/users/ysk24ok/repos", "events_url": "https://api.github.com/users/ysk24ok/events{/privacy}", "received_events_url": "https://api.github.com/users/ysk24ok/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hello @ysk24ok \r\n\r\n1. It is likely that `download_task` generates new output every time, even if the content is the same. As a result, if the upstream is not cached, then the downstream task will not be able to use cache.\r\n2. We have deprecated V2_COMPATIBLE mode, please use 2.0.0-XXX and remove the use of `V2_COMPATIBLE`.\r\n\r\nFor now, the workaround is to pass the string value instead of Artifact to `print_file_content_op` to bypass the caching. We will think more about how to properly design the cache system. \r\n\r\ncc @chensun " ]
"2022-09-02T00:02:18"
"2022-09-08T23:00:28"
null
CONTRIBUTOR
null
I'm not sure if it's a bug or an expected behavior, but let me mark as a bug for now. ### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? full Kubeflow deployment <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.8.1 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: 1.8.13 <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ```py from kfp import components, dsl from kfp.v2.dsl import Dataset, Input, component download_op = components.load_component_from_url( "https://raw.githubusercontent.com/kubeflow/pipelines/master/components/contrib/google-cloud/storage/download_blob/component.yaml" ) @component def print_file_content_op(file: Input[Dataset]) -> None: with open(file.path) as f: print(f.read()) @dsl.pipeline(name="caching-test") def pipeline(path: str) -> None: download_task = download_op(path) download_task.set_caching_options(False) print_file_content_task = print_file_content_op(download_task.outputs["data"]) ``` Submit the pipeline using `create_run_from_pipeline_func` with `V2_COMPATIBLE` mode two times. We'll see `print_file_content_task` is not cached and executed on the second run. The behavior is counterintuitive as I don't call `set_caching_options(False)` for `print_file_content_task`. Why I call `download_task.set_caching_options(False)` because the file at the GCS path might be replaced at some point. I know it would be better to upload a file to a different path instead of replacing the current file and make the step deterministic, but to do so we need to put some efforts to update our existing system. ### Expected result <!-- What should the correct behavior be? --> `print_file_content_task` is cached and skipped if the file content is the same as before. ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8236/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8236/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8230
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8230/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8230/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8230/events
https://github.com/kubeflow/pipelines/issues/8230
1,358,783,253
I_kwDOB-71UM5Q_WMV
8,230
[bug] Kubeflow Pipeline UI not rendering Pipeline Metrics (again?)
{ "login": "harshvladha", "id": 7391816, "node_id": "MDQ6VXNlcjczOTE4MTY=", "avatar_url": "https://avatars.githubusercontent.com/u/7391816?v=4", "gravatar_id": "", "url": "https://api.github.com/users/harshvladha", "html_url": "https://github.com/harshvladha", "followers_url": "https://api.github.com/users/harshvladha/followers", "following_url": "https://api.github.com/users/harshvladha/following{/other_user}", "gists_url": "https://api.github.com/users/harshvladha/gists{/gist_id}", "starred_url": "https://api.github.com/users/harshvladha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/harshvladha/subscriptions", "organizations_url": "https://api.github.com/users/harshvladha/orgs", "repos_url": "https://api.github.com/users/harshvladha/repos", "events_url": "https://api.github.com/users/harshvladha/events{/privacy}", "received_events_url": "https://api.github.com/users/harshvladha/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Ignore. I found that the value inside the metrics json was screwed up.. I was passing a dict unintentionally in one of the metric where I had to pass the number.\r\n\r\nI think KFP Compiler can show this as warning when ml-pipeline metrics doesn't have an expected return output schema (or it can be shown in UI so that debugging becomes easier)\r\n" ]
"2022-09-01T12:39:16"
"2022-09-01T22:55:36"
"2022-09-01T22:55:36"
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Ans: Full Kubeflow Deployment * KFP version: For some reason the sidebar shows : **build version dev_local** but was installed using YAMLs at: https://github.com/kubeflow/manifests Docker Image and Tag info: `ml-pipeline-api-server`: `gcr.io/ml-pipeline/api-server:1.8.1` `ml-pipeline-ui`: `gcr.io/ml-pipeline/frontend:1.8.1` * KFP SDK version: KFP SDK version: 1.8.13 ### Steps to reproduce Built component (part of the pipeline and is the last component as well) using KFP SDK v1 API My Component looks like following: ```py def train_model(customer_id: str, start_time: str, end_time: str, take_target_log: bool, log_addition: float, model: OutputPath('Model'), x_test: OutputPath('CSV'), y_test: OutputPath('CSV'), mlpipeline_metrics_path: OutputPath('Metrics')): metrics = { 'metrics': [ { 'name': 'best_iteration', 'numberValue': 101, 'format': 'RAW' }, { 'name': 'best_score', 'numberValue': 1.2, 'format': 'RAW' } ] } with open(mlpipeline_metrics_path, 'w') as f: json.dump(metrics, f) ``` Above component is added in Pipeline using `kfp.components.create_component_from_func` function. 1. Changed metrics argument in above `train_model` signature to - `mlpipeline_metrics_path: OutputPath('Metrics')` - `mlpipeline_metrics: OutputPath('Metrics')` - `mlpipeline_metrics: OutputPath()` All of them yield YAML with ```yaml outputs: artifacts: - {name: mlpipeline-metrics, path: /tmp/outputs/mlpipeline_metrics/data} ``` `train_model` component in UI shows the Artifacts as : ![Screenshot 2022-09-01 at 6 03 09 PM](https://user-images.githubusercontent.com/7391816/187914569-0ddb5cdd-5209-47d5-a9d3-24f06c1d5f0a.png) Weird thing to note here is the `Type` of the artifact. 2. I tried returning `NamedTuple` as : ```py def train_model(customer_id: str, start_time: str, end_time: str, take_target_log: bool, log_addition: float, model: OutputPath('Model'), x_test: OutputPath('CSV'), y_test: OutputPath('CSV')) -> NamedTuple('Outputs', [('mlpipeline_metrics', 'Metrics')]): metrics = { 'metrics': [ { 'name': 'best_iteration', 'numberValue': 101, 'format': 'RAW' }, { 'name': 'best_score', 'numberValue': 1.2, 'format': 'RAW' } ] } return [json.dumps(metrics)] ``` Above generates YAML with ```yaml outputs: artifacts: - {name: mlpipeline-metrics, path: /tmp/outputs/mlpipeline_metrics/data} ``` No diff in component spec, but container args had `----output-paths` extra arguments in the yaml ```yaml '----output-paths', /tmp/outputs/mlpipeline_metrics/data ``` ### Expected result Expected to see ML Pipeline Metrics (like `best_score` and `best_iterations` ) in following screenshot ![Screenshot 2022-09-01 at 6 08 07 PM](https://user-images.githubusercontent.com/7391816/187915505-c2e2d45a-33f6-4475-a905-07da7f66f0e1.png) <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8230/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8230/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8224
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8224/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8224/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8224/events
https://github.com/kubeflow/pipelines/issues/8224
1,357,755,047
I_kwDOB-71UM5Q7bKn
8,224
[backend] metadata-grpc-deployment cannot connect to mysql
{ "login": "jielou", "id": 64111800, "node_id": "MDQ6VXNlcjY0MTExODAw", "avatar_url": "https://avatars.githubusercontent.com/u/64111800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jielou", "html_url": "https://github.com/jielou", "followers_url": "https://api.github.com/users/jielou/followers", "following_url": "https://api.github.com/users/jielou/following{/other_user}", "gists_url": "https://api.github.com/users/jielou/gists{/gist_id}", "starred_url": "https://api.github.com/users/jielou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jielou/subscriptions", "organizations_url": "https://api.github.com/users/jielou/orgs", "repos_url": "https://api.github.com/users/jielou/repos", "events_url": "https://api.github.com/users/jielou/events{/privacy}", "received_events_url": "https://api.github.com/users/jielou/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
open
false
{ "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false }
[ { "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false }, { "login": "gkcalat", "id": 35157096, "node_id": "MDQ6VXNlcjM1MTU3MDk2", "avatar_url": "https://avatars.githubusercontent.com/u/35157096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gkcalat", "html_url": "https://github.com/gkcalat", "followers_url": "https://api.github.com/users/gkcalat/followers", "following_url": "https://api.github.com/users/gkcalat/following{/other_user}", "gists_url": "https://api.github.com/users/gkcalat/gists{/gist_id}", "starred_url": "https://api.github.com/users/gkcalat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gkcalat/subscriptions", "organizations_url": "https://api.github.com/users/gkcalat/orgs", "repos_url": "https://api.github.com/users/gkcalat/repos", "events_url": "https://api.github.com/users/gkcalat/events{/privacy}", "received_events_url": "https://api.github.com/users/gkcalat/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @jielou !\r\nCould you please try a newer version of KFP standalone deployment (e.g. 1.8.5) and let us know if you still encounter the problem.", "@gkcalat Hi. I tried to install 1.8.5, but `metadata-grpc-deployment` still failed. Logs showed `MetadataStore cannot be created with the given connection config`. I installed with the platform agnostic in AWS EKS. Can someone help? thanks.\r\n\r\nMore details:\r\n- Pipelines UI showed `failed to retrieve list of pipelines`", "Hi @jielou\r\n\r\nDid you follow [these instruction](https://github.com/kubeflow/pipelines/blob/master/manifests/kustomize/README.md)? I was able to deploy KFP 1.8.5 on GCP. There are also [AWS instructions](https://github.com/kubeflow/pipelines/blob/master/manifests/kustomize/env/aws/README.md). \r\n\r\n/CC @surajkota, as this might be AWS-specific issue?\r\n\r\nFYI, here are instructions for [standalone installation using kustomize](https://www.kubeflow.org/docs/components/pipelines/v1/installation/standalone-deployment/).", "@gkcalat thanks for the reply. I followed the first link of instruction in `platform-agnostic` in an existing EKS cluster. I haven't tried AWS instructions because I don't want to use S3 and RDS for now.", "As we are waiting for @surajkota or someone else from AWS, can you provide what exactly you run to deploy KFP?\r\n\r\nBesides, you can try deploying KFP 1.8.5:\r\n```bash\r\nexport PIPELINE_VERSION=1.8.5\r\nkubectl apply -k \"github.com/kubeflow/pipelines/manifests/kustomize/cluster-scoped-resources?ref=$PIPELINE_VERSION\"\r\nkubectl wait --for condition=established --timeout=60s crd/applications.app.k8s.io\r\nkubectl apply -k \"github.com/kubeflow/pipelines/manifests/kustomize/env/platform-agnostic?ref=$PIPELINE_VERSION\"\r\nkubectl wait pods -l application-crd-id=kubeflow-pipelines -n kubeflow --for condition=Ready --timeout=1800s\r\nkubectl port-forward -n kubeflow svc/ml-pipeline-ui 8080:80\r\n```\r\nThen try accessing Kubeflow Pipelines UI in your browser: http://localhost:8080/. If you are using ssh, you have to connect with port forwarding: `ssh -R 9902:localhost:8080 <remote hostname>` and then access it locally via http://localhost:9902/.", "sure. I cloned the pipelines repo and then checkout 1.8.5 tag. and then followed the instructions in [readme](https://github.com/kubeflow/pipelines/tree/1.8.5/manifests/kustomize#envplatform-agnostic-install-on-any-kubernetes-cluster):\r\n```\r\nKFP_ENV=platform-agnostic\r\nkubectl apply -k cluster-scoped-resources/\r\nkubectl wait crd/applications.app.k8s.io --for condition=established --timeout=60s\r\nkubectl apply -k \"env/${KFP_ENV}/\"\r\nkubectl wait pods -l application-crd-id=kubeflow-pipelines -n kubeflow --for condition=Ready --timeout=1800s\r\nkubectl port-forward -n kubeflow svc/ml-pipeline-ui 8080:80\r\n```\r\nI will try the instructions you sent me, but I think they did the same job. thanks.", "I got a new error in the deployment (happened in both using the instructions you shared and my old deployment method). The cache server failed to launch now. \r\n```\r\nWarning FailedMount 3m41s kubelet Unable to attach or mount volumes: unmounted volumes=[webhook-tls-certs], unattached volumes=[kubeflow-pipelines-cache-token-88wc7 webhook-tls-certs]: timed out waiting for the condition\r\n Warning FailedMount 83s (x3 over 8m17s) kubelet Unable to attach or mount volumes: unmounted volumes=[webhook-tls-certs], unattached volumes=[webhook-tls-certs kubeflow-pipelines-cache-token-88wc7]: timed out waiting for the condition\r\n Warning FailedMount 4s (x13 over 10m) kubelet MountVolume.SetUp failed for volume β€œwebhook-tls-certs” : secret β€œwebhook-server-tls” not found\r\n```\r\nit worked before probably because I did not clean up resources after installing 1.5. This time, I cleaned up the cluster before installing 1.8.5 and it failed.", "Hi @jielou, do you want to install Kubeflow pipelines standalone? or are you interested in trying out the full Kubeflow?\r\n\r\nPlease clean up your existing installation and follow one of these options according to your choice:\r\n- If you want to try full kubeflow but without RDS/S3, you can follow the instructions for **Vanilla** deployment option.\r\n - https://awslabs.github.io/kubeflow-manifests/docs/deployment/. These instructions are for Kubeflow 1.6.0 which installs Kubeflow pipelines version 2.0.0-alpha3. This release is in preview/RC and we will soon be updating with Kubeflow 1.6.1 support. You will need an EKS version 1.22 or above\r\n - If you want to install Kubeflow 1.5, you can follow instructions here: https://awslabs.github.io/kubeflow-manifests/release-v1.5.1-aws-b1.0.2/docs/deployment/. EKS compatibility is mentioned [here](https://awslabs.github.io/kubeflow-manifests/release-v1.5.1-aws-b1.0.2/docs/about/eks-compatibility/)\r\n\r\nIf you are looking to install Kubeflow pipelines 1.8.5 standalone on AWS, you would need to:\r\n1. Install [cert-manager](https://cert-manager.io/docs/installation/)\r\n2. Use the cert-manager overlays under https://github.com/kubeflow/pipelines/tree/master/manifests/kustomize/env/cert-manager, so \r\n```\r\nexport PIPELINE_VERSION=1.8.5\r\nkubectl apply -k \"github.com/kubeflow/pipelines/manifests/kustomize/env/cert-manager/cluster-scoped-resources?ref=$PIPELINE_VERSION\"\r\nkubectl wait --for condition=established --timeout=60s crd/applications.app.k8s.io\r\nkubectl apply -k \"github.com/kubeflow/pipelines/manifests/kustomize/env/cert-manager/dev?ref=$PIPELINE_VERSION\"\r\nkubectl wait pods -l application-crd-id=kubeflow-pipelines -n kubeflow --for condition=Ready --timeout=1800s\r\nkubectl port-forward -n kubeflow svc/ml-pipeline-ui 8080:80\r\n```\r\n\r\nWe need update the documentation here: https://www.kubeflow.org/docs/components/pipelines/v1/installation/standalone-deployment/ and remove outdated README content", "@surajkota thanks for the instructions. I want to install Kubeflow pipelines 1.8.5 standalone on AWS. Which version of cert-manager would you recommend? I used the latest one but saw the error when installing kubeflow pipelines:\r\n```\r\nunable to recognize β€œenv/cert-manager/dev”: no matches for kind β€œCertificate” in version β€œhttp://cert-manager.io/v1”\r\nunable to recognize β€œenv/cert-manager/dev”: no matches for kind β€œIssuer” in version β€œhttp://cert-manager.io/v1”\r\n```", "@jielou What EKS version are you on?" ]
"2022-08-31T19:15:15"
"2022-10-07T15:45:38"
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? follow the standalone deployment doc, but run `kubectl create -k platform-agnostic' instead. * KFP version: 1.5 * KFP SDK version: NA * EKS cluster k8s version: 1.19 ### Steps to reproduce I follow the guideline the deploy the standalone kubeflow pipelines. However, the metadata-grpc-deployment pod always crush, and logs shows ``` F ml_metadata/metadata_store/metadata_store_server_main.cc:220] Non-OK-status: status status: Internal: mysql_real_connect failed: errno: 2005, error: Unknown MySQL server host β€˜mysql’ (-3)MetadataStore cannot be created with the given connection config. ``` mysql pod is running fine. ``` mysql: ready for connections. ``` also meta-writer, ml-pipeline,ml-pipeline-persistenceagent also has crashloopbackoff or 0/1 ready status. ### Expected result pods are running ok. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8224/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8224/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8205
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8205/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8205/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8205/events
https://github.com/kubeflow/pipelines/issues/8205
1,354,090,761
I_kwDOB-71UM5QtckJ
8,205
[feature] import python function in component
{ "login": "TranThanh96", "id": 26323599, "node_id": "MDQ6VXNlcjI2MzIzNTk5", "avatar_url": "https://avatars.githubusercontent.com/u/26323599?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TranThanh96", "html_url": "https://github.com/TranThanh96", "followers_url": "https://api.github.com/users/TranThanh96/followers", "following_url": "https://api.github.com/users/TranThanh96/following{/other_user}", "gists_url": "https://api.github.com/users/TranThanh96/gists{/gist_id}", "starred_url": "https://api.github.com/users/TranThanh96/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TranThanh96/subscriptions", "organizations_url": "https://api.github.com/users/TranThanh96/orgs", "repos_url": "https://api.github.com/users/TranThanh96/repos", "events_url": "https://api.github.com/users/TranThanh96/events{/privacy}", "received_events_url": "https://api.github.com/users/TranThanh96/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[ "Which SDK version are you using?\r\n\r\nIn both KFP v1 (v2 namespace) and KFP v2, this is provided by the containerized component functionality. Try `kfp component build --help` on the command line and view the reference docs for `kfp.v2.dsl.component` and `kfp.dsl.component` in v1 and v2, respectively. Reference docs: https://kubeflow-pipelines.readthedocs.io/en/1.8.13/", "I'm struggling with the same issue. Do I understand correctly that it is generally possible?\r\n@connor-mccarthy do you have an example implementation you could reference too?", "@JonasTischer\r\n\r\nHere is the most up to date documentation on this feature: https://www.kubeflow.org/docs/components/pipelines/v2/author-a-pipeline/components/#2-containerized-python-components\r\n\r\nPair this with `kfp component build --help` for more information.\r\n\r\nIf that doesn't help, please let me know and I will update the documentation to fill in any gaps." ]
"2022-08-29T10:51:27"
"2022-12-27T17:05:26"
"2022-12-27T17:05:05"
NONE
null
for example: I have a file utils.py which defined some functions. How can I import those functions in my components? Any ideas?
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8205/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8205/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8203
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8203/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8203/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8203/events
https://github.com/kubeflow/pipelines/issues/8203
1,353,271,685
I_kwDOB-71UM5QqUmF
8,203
[sdk] Can't use pipeline's input object in dsl.Condition
{ "login": "ittus", "id": 5120965, "node_id": "MDQ6VXNlcjUxMjA5NjU=", "avatar_url": "https://avatars.githubusercontent.com/u/5120965?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ittus", "html_url": "https://github.com/ittus", "followers_url": "https://api.github.com/users/ittus/followers", "following_url": "https://api.github.com/users/ittus/following{/other_user}", "gists_url": "https://api.github.com/users/ittus/gists{/gist_id}", "starred_url": "https://api.github.com/users/ittus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ittus/subscriptions", "organizations_url": "https://api.github.com/users/ittus/orgs", "repos_url": "https://api.github.com/users/ittus/repos", "events_url": "https://api.github.com/users/ittus/events{/privacy}", "received_events_url": "https://api.github.com/users/ittus/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting this @ittus. This is a known bug.", "Any update on this? We'd like to use the dsl.Condition to trigger some alert ops and can't whilst this is open.", "This has not yet been fixed in KFP v1.\r\n\r\nThis was fixed at least as early as `kfp==2.0.0b1`. The following runs without issue:\r\n\r\n```\r\nimport kfp\r\n\r\nassert kfp.__version__ == '2.0.0b1'\r\n\r\nfrom kfp import compiler\r\nfrom kfp import dsl\r\n\r\n\r\n@dsl.component\r\ndef identity(string: str) -> str:\r\n return string\r\n\r\n\r\n@dsl.pipeline()\r\ndef my_pipeline(string: str = 'string'):\r\n with dsl.Condition(string == 'string'):\r\n op1 = identity(string=string)\r\n\r\n\r\nir_file = __file__.replace('.py', '.yaml')\r\ncompiler.Compiler().compile(pipeline_func=my_pipeline, package_path=ir_file)\r\n```", "That may run, however anything beyond a trivial equality (like checking if a file needs to be decompressed by checking a file extension) fails due to PipelineParameterChannel attribute issues. This seems related to #8626 and essentially prevents me from using kubeflow pipelines in a production setting.", "@MatthewRalston, thanks for your response. If I understand what you're describing correctly, that's independent from the use of pipeline input in `dsl.Condition`.\r\n\r\nBut yes, only a [limited number of operators](https://github.com/kubeflow/pipelines/blob/1d8272b4cd3de767c538dce1d3f69a92a9c7e877/sdk/python/kfp/components/pipeline_channel.py#L143-L159) are permitted in `dsl.Condition`. Arbitrary user code is not permitted in `dsl.Condition` (or the pipeline body more generally) since the pipeline body defines the orchestration and will not actually be run at pipeline runtime.", "I am also getting the same error message as long as the condition is not based on the PipelineParam, e.g. \r\n```\r\na_local_variable=5\r\nwith dsl.Condition(a_local_variable == 5):\r\n pass\r\n```\r\n\r\n\r\nAttributeError: 'bool' object has no attribute 'operand1'" ]
"2022-08-28T07:22:29"
"2023-02-01T02:50:31"
null
CONTRIBUTOR
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Full Kubeflow deployment * KFP version: 1.8.1 * KFP SDK version: 1.8.12 ### Steps to reproduce ```python @dsl.pipeline(name="Test pipeline", description=__doc__) def test_pipeline( output_config: Optional[dict] = { "s3_bucket": "my-s3-bucket", "s3_prefix": "my-s3-prefix" } ) -> None: with dsl.Condition(output_config is not None): ( # run a container op ) ``` Then there is an error when create the pipeline ```bash if isinstance(group.condition.operand1, dsl.PipelineParam): AttributeError: 'bool' object has no attribute 'operand1' ``` If I change the condition to ``` with dsl.Condition(output_config != None) ``` then when running, there is another error ``` This step is in Error state with this message: Invalid 'when' expression '"{"s3_bucket": "my-s3-bucket", "s3_prefix": "my-s3-prefix}" != "None"': Cannot transition token types from STRING [{] to VARIABLE [s3_bucket] ``` ### Expected result - Able to use dsl.Condition with pipeline's input ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> /area sdk <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8203/reactions", "total_count": 11, "+1": 11, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8203/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8201
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8201/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8201/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8201/events
https://github.com/kubeflow/pipelines/issues/8201
1,352,355,575
I_kwDOB-71UM5Qm073
8,201
[sdk] sdk documentation of kfp.v2
{ "login": "verhoek", "id": 30193551, "node_id": "MDQ6VXNlcjMwMTkzNTUx", "avatar_url": "https://avatars.githubusercontent.com/u/30193551?v=4", "gravatar_id": "", "url": "https://api.github.com/users/verhoek", "html_url": "https://github.com/verhoek", "followers_url": "https://api.github.com/users/verhoek/followers", "following_url": "https://api.github.com/users/verhoek/following{/other_user}", "gists_url": "https://api.github.com/users/verhoek/gists{/gist_id}", "starred_url": "https://api.github.com/users/verhoek/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/verhoek/subscriptions", "organizations_url": "https://api.github.com/users/verhoek/orgs", "repos_url": "https://api.github.com/users/verhoek/repos", "events_url": "https://api.github.com/users/verhoek/events{/privacy}", "received_events_url": "https://api.github.com/users/verhoek/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[ "@verhoek, thanks for raising this. We are working on a significant documentation refresh that should help with this. In the meantime, maybe I can help here.\r\n\r\nWhich version of the SDK are you using?\r\n\r\n> Some functionality from the package kfp.v2.google seems to be referenced on the web/from the vertex ai docu\r\n\r\nCan you point me in the direction of this? It will be easier to speak in specifics based on the functionality you're trying to use from the Vertex docs.\r\n\r\n> PS: Also, what was the original intent of the v2 package? Already making available v2 sdk functionality from 1.8.x ?\r\n\r\nThe v2 subpackage/namespace within 1.8.x was intended to 1) provide the some of the KFP SDK v2 authoring experience within the 1.8.x and 2) enable compilation to the new IR pipeline definition protocol. Both should make for an easier migration to 2.0.0.\r\n\r\n> In the 2.x versions the kfp.v2 seems to point back to the main package kfp itself, which probably makes sense.\r\n\r\nYes, this is for backward compatibility of existing user import code.\r\n\r\n> would expect this documentation, even now deprecated, is still available in the SDK documentation.\r\n\r\nReference documentation on Read The Docs has not historically been pinned for all released versions, unfortunately. This is changing beginning with the 2.x.x stable release (and some 2.x.x beta releases). Reference docs are generated based on docstrings, so that is one way to find historical reference docs. Please let me know if I'm missing something about this comment.", "\r\n> \r\n> Which version of the SDK are you using?\r\n> \r\n\r\nIt was 1.8.13. \r\n\r\n> > Some functionality from the package kfp.v2.google seems to be referenced on the web/from the vertex ai docu\r\n> \r\n> Can you point me in the direction of this? It will be easier to speak in specifics based on the functionality you're trying to use from the Vertex docs.\r\n\r\nWe tried to use the deprecated package kfp.v2.google for the aiplatformclient, which seems to have been moved to the vertex ai sdk in the google.cloud package. When I check the vertex ai documentation though, a hint has been added that one should use the google.cloud package. I don't know if that was there before or we didn't check properly - I retract the statement that vertex ai docu references to the kfp.v2.google package, though it does refer to the kfp.v2 \r\n\r\nWe tried to submit/schedule a job and came across https://cloud.google.com/vertex-ai/docs/pipelines/schedule-cloud-scheduler where kfp.v2 is used but undocumented for 1.8.x .\r\n\r\n> \r\n> > PS: Also, what was the original intent of the v2 package? Already making available v2 sdk functionality from 1.8.x ?\r\n> \r\n> The v2 subpackage/namespace within 1.8.x was intended to 1) provide the some of the KFP SDK v2 authoring experience within the 1.8.x and 2) enable compilation to the new IR pipeline definition protocol. Both should make for an easier migration to 2.0.0.\r\n> \r\n> > In the 2.x versions the kfp.v2 seems to point back to the main package kfp itself, which probably makes sense.\r\n> \r\n> Yes, this is for backward compatibility of existing user import code.\r\n> \r\n> > would expect this documentation, even now deprecated, is still available in the SDK documentation.\r\n> \r\n> Reference documentation on Read The Docs has not historically been pinned for all released versions, unfortunately. This is changing beginning with the 2.x.x stable release (and some 2.x.x beta releases). Reference docs are generated based on docstrings, so that is one way to find historical reference docs. Please let me know if I'm missing something about this comment.\r\n\r\nOkay, sounds reasonable, thanks for clarification!", "> We tried to use the deprecated package kfp.v2.google for the aiplatformclient, which seems to have been moved to the vertex ai sdk in the google.cloud package.\r\n\r\nThat is correct. For submitting jobs to Vertex, use `google.cloud.aiplatform.PipelineJob(...).submit()` from the Vertex SDK.\r\n\r\n> We tried to submit/schedule a job and came across https://cloud.google.com/vertex-ai/docs/pipelines/schedule-cloud-scheduler where kfp.v2 is used but undocumented for 1.8.x .\r\n\r\nI see. This may already be clear, but just to be sure: there is a v2 namespace in `kfp==1.8.x`. There is also `kfp==2.x.x` which is currently in beta stage. The docs you are referring to are about the v2 namespace in v1, which applies to `kfp==1.8.x`.\r\n\r\nThe v2 namespace in `kfp==1.8.x` will compile to IR YAML, a pipeline protocol format that Vertex AI can read and execute. The main namespace in `kfp==1.8.x` will compile to Argo workflow, which Vertex cannot read and execute. If you wish to use Vertex as your BE, use the v2 namespace.", "@verhoek, the following docs will likely be helpful for you: https://www.kubeflow.org/docs/components/pipelines/v2/migration/" ]
"2022-08-26T14:35:40"
"2023-04-04T17:52:25"
"2023-04-04T17:52:25"
NONE
null
I am having difficulties regarding the documentation of the python SDK of pipelines, in particular the kfp.v2 package of the 1.8.x releases. Some functionality from the package kfp.v2.google seems to be referenced on the web/from the vertex ai docu, yet I cannot find any official referenced documentation of it from the project itself, for example on https://kubeflow-pipelines.readthedocs.io . I understand by browsing through the issues that apparently kfp.v2.google contains deprecated code such as the AIPlatformClient. The effect is that for older versions of kfp there is no SDK documentation available of all that was once/is in kfp.v2 release 1.8.x that is later actually removed in kfp release 2.x . I think this is very confusing for people new to kfp, for which the stable release is still 1.8.x, but from tutorials from vertex ai use code from kfp.v2. I would expect this documentation, even now deprecated, is still available in the SDK documentation. PS: Also, what was the original intent of the v2 package? Already making available v2 sdk functionality from 1.8.x ? In the 2.x versions the kfp.v2 seems to point back to the main package kfp itself, which probably makes sense.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8201/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8201/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8200
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8200/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8200/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8200/events
https://github.com/kubeflow/pipelines/issues/8200
1,351,897,426
I_kwDOB-71UM5QlFFS
8,200
[backend] Metadata writer pod always restarting
{ "login": "andre-lx", "id": 44682155, "node_id": "MDQ6VXNlcjQ0NjgyMTU1", "avatar_url": "https://avatars.githubusercontent.com/u/44682155?v=4", "gravatar_id": "", "url": "https://api.github.com/users/andre-lx", "html_url": "https://github.com/andre-lx", "followers_url": "https://api.github.com/users/andre-lx/followers", "following_url": "https://api.github.com/users/andre-lx/following{/other_user}", "gists_url": "https://api.github.com/users/andre-lx/gists{/gist_id}", "starred_url": "https://api.github.com/users/andre-lx/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/andre-lx/subscriptions", "organizations_url": "https://api.github.com/users/andre-lx/orgs", "repos_url": "https://api.github.com/users/andre-lx/repos", "events_url": "https://api.github.com/users/andre-lx/events{/privacy}", "received_events_url": "https://api.github.com/users/andre-lx/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "Hello @andre-lx , does this issue happen during start-up time, or does it happen when you are running specific pipeline? If there is more information about how to reproduce this issue, it will help us to investigate the problem. \r\n\r\n/assign @chensun ", "> Hello @andre-lx , does this issue happen during start-up time, or does it happen when you are running specific pipeline? If there is more information about how to reproduce this issue, it will help us to investigate the problem.\r\n> \r\n> /assign @chensun\r\n\r\nHey @zijianjoy .\r\n\r\nCompletely loss this message. sorry. \r\n\r\nThis is happens wih all our clusters, as soon we start the kubeflow pipelines the metadata-writes starts restarting with this issue. \r\n\r\nthis happens until today, with k8s 1.24.\r\n\r\nnot sure I can give you more information.\r\n\r\nbut I have some more logs:\r\n\r\n```logs\r\nbash-5.1# kubectl logs metadata-writer-76675f9f9-tjr7j -n kubeflow\r\nConnected to the metadata store\r\nStart watching Kubernetes Pods created by Argo\r\nbash-5.1# kubectl logs metadata-writer-76675f9f9-tjr7j -n kubeflow --previous\r\nConnected to the metadata store\r\nStart watching Kubernetes Pods created by Argo\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/urllib3/response.py\", line 697, in _update_chunk_length\r\n self.chunk_left = int(line, 16)\r\nValueError: invalid literal for int() with base 16: b''\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/urllib3/response.py\", line 438, in _error_catcher\r\n yield\r\n File \"/usr/local/lib/python3.7/site-packages/urllib3/response.py\", line 764, in read_chunked\r\n self._update_chunk_length()\r\n File \"/usr/local/lib/python3.7/site-packages/urllib3/response.py\", line 701, in _update_chunk_length\r\n raise InvalidChunkLength(self, line)\r\nurllib3.exceptions.InvalidChunkLength: InvalidChunkLength(got length b'', 0 bytes read)\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/kfp/metadata_writer/metadata_writer.py\", line 157, in <module>\r\n for event in pod_stream:\r\n File \"/usr/local/lib/python3.7/site-packages/kubernetes/watch/watch.py\", line 144, in stream\r\n for line in iter_resp_lines(resp):\r\n File \"/usr/local/lib/python3.7/site-packages/kubernetes/watch/watch.py\", line 48, in iter_resp_lines\r\n for seg in resp.read_chunked(decode_content=False):\r\n File \"/usr/local/lib/python3.7/site-packages/urllib3/response.py\", line 793, in read_chunked\r\n self._original_response.close()\r\n File \"/usr/local/lib/python3.7/contextlib.py\", line 130, in __exit__\r\n self.gen.throw(type, value, traceback)\r\n File \"/usr/local/lib/python3.7/site-packages/urllib3/response.py\", line 455, in _error_catcher\r\n raise ProtocolError(\"Connection broken: %r\" % e, e)\r\nurllib3.exceptions.ProtocolError: (\"Connection broken: InvalidChunkLength(got length b'', 0 bytes read)\", InvalidChunkLength(got length b'', 0 bytes read))\r\n```" ]
"2022-08-26T07:53:41"
"2023-05-16T20:08:33"
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Manifests in k8s * K8S Version: 1.21 * KFP version: 1.8.1/1.8.2/1.8.3/1.8.4 ### Steps to reproduce Hi. Since release 1.8.1 (can't be sure about older versions) our metadata-writer pod is always restarting infinitely with the following message error: ``` metadata-writer-78fc7d5bb8-ph9kj 2/2 Running 299 78d ``` ``` Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/urllib3/response.py", line 697, in _update_chunk_length self.chunk_left = int(line, 16) ValueError: invalid literal for int() with base 16: b'' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/urllib3/response.py", line 438, in _error_catcher yield File "/usr/local/lib/python3.7/site-packages/urllib3/response.py", line 764, in read_chunked self._update_chunk_length() File "/usr/local/lib/python3.7/site-packages/urllib3/response.py", line 701, in _update_chunk_length raise InvalidChunkLength(self, line) urllib3.exceptions.InvalidChunkLength: InvalidChunkLength(got length b'', 0 bytes read) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/kfp/metadata_writer/metadata_writer.py", line 157, in <module> for event in pod_stream: File "/usr/local/lib/python3.7/site-packages/kubernetes/watch/watch.py", line 144, in stream for line in iter_resp_lines(resp): File "/usr/local/lib/python3.7/site-packages/kubernetes/watch/watch.py", line 48, in iter_resp_lines for seg in resp.read_chunked(decode_content=False): File "/usr/local/lib/python3.7/site-packages/urllib3/response.py", line 793, in read_chunked self._original_response.close() File "/usr/local/lib/python3.7/contextlib.py", line 130, in __exit__ self.gen.throw(type, value, traceback) File "/usr/local/lib/python3.7/site-packages/urllib3/response.py", line 455, in _error_catcher raise ProtocolError("Connection broken: %r" % e, e) urllib3.exceptions.ProtocolError: ("Connection broken: InvalidChunkLength(got length b'', 0 bytes read)", InvalidChunkLength(got length b'', 0 bytes read)) ``` We already try the most recent versions of version 1.8 (we did not try version 2.0.0). The pipelines are working very well, and we don't have any problems till now because of this, but this only happens with this pod. This happens in our multiple clusters with multiple installations, so don't look like an issue of a specific cluster. ### Expected result The pod should stop restarting. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8200/reactions", "total_count": 6, "+1": 6, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8200/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8189
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8189/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8189/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8189/events
https://github.com/kubeflow/pipelines/issues/8189
1,349,923,063
I_kwDOB-71UM5QdjD3
8,189
[backend] Cannot list artifacts
{ "login": "pablofiumara", "id": 4154361, "node_id": "MDQ6VXNlcjQxNTQzNjE=", "avatar_url": "https://avatars.githubusercontent.com/u/4154361?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pablofiumara", "html_url": "https://github.com/pablofiumara", "followers_url": "https://api.github.com/users/pablofiumara/followers", "following_url": "https://api.github.com/users/pablofiumara/following{/other_user}", "gists_url": "https://api.github.com/users/pablofiumara/gists{/gist_id}", "starred_url": "https://api.github.com/users/pablofiumara/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pablofiumara/subscriptions", "organizations_url": "https://api.github.com/users/pablofiumara/orgs", "repos_url": "https://api.github.com/users/pablofiumara/repos", "events_url": "https://api.github.com/users/pablofiumara/events{/privacy}", "received_events_url": "https://api.github.com/users/pablofiumara/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 2710158147, "node_id": "MDU6TGFiZWwyNzEwMTU4MTQ3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/needs%20more%20info", "name": "needs more info", "color": "DBEF12", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[ "Have you checked that both `ml-pipeline-ui` deployment in `kubeflow` namespace and the `ml-pipeline-ui-artifact` deployment in user namespaces are all using `ml-pipeline/frontend:1.8.1`?", "@zijianjoy Yes, I have\r\n\r\n```\r\nName: ml-pipeline-ui\r\nNamespace: kubeflow\r\nCreationTimestamp: Wed, 23 Jun 2021 21:52:54 -0300\r\nLabels: app=ml-pipeline-ui\r\n app.kubernetes.io/component=ml-pipeline\r\n app.kubernetes.io/name=kubeflow-pipelines\r\nAnnotations: deployment.kubernetes.io/revision: 21\r\nSelector: app=ml-pipeline-ui,app.kubernetes.io/component=ml-pipeline,app.kubernetes.io/name=kubeflow-pipelines\r\nReplicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable\r\nStrategyType: RollingUpdate\r\nMinReadySeconds: 0\r\nRollingUpdateStrategy: 25% max unavailable, 25% max surge\r\nPod Template:\r\n Labels: app=ml-pipeline-ui\r\n app.kubernetes.io/component=ml-pipeline\r\n app.kubernetes.io/name=kubeflow-pipelines\r\n Annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: true\r\n kubectl.kubernetes.io/restartedAt: 2022-08-25T18:19:01-03:00\r\n Service Account: ml-pipeline-ui\r\n Containers:\r\n ml-pipeline-ui:\r\n Image: gcr.io/ml-pipeline/frontend:1.8.1\r\n Port: 3000/TCP\r\n Host Port: 0/TCP\r\n Requests:\r\n cpu: 10m\r\n memory: 70Mi\r\n Liveness: exec [wget -q -S -O - http://localhost:3000/apis/v1beta1/healthz] delay=3s timeout=2s period=5s #success=1 #failure=3\r\n Readiness: exec [wget -q -S -O - http://localhost:3000/apis/v1beta1/healthz] delay=3s timeout=2s period=5s #success=1 #failure=3\r\n Environment:\r\n KUBEFLOW_USERID_HEADER: <set to the key 'userid-header' of config map 'kubeflow-config'> Optional: false\r\n KUBEFLOW_USERID_PREFIX: <set to the key 'userid-prefix' of config map 'kubeflow-config'> Optional: false\r\n VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH: /etc/config/viewer-pod-template.json\r\n DEPLOYMENT: KUBEFLOW\r\n ARTIFACTS_SERVICE_PROXY_NAME: ml-pipeline-ui-artifact\r\n ARTIFACTS_SERVICE_PROXY_PORT: 80\r\n ARTIFACTS_SERVICE_PROXY_ENABLED: true\r\n ENABLE_AUTHZ: true\r\n MINIO_NAMESPACE: (v1:metadata.namespace)\r\n MINIO_ACCESS_KEY: <set to the key 'accesskey' in secret 'mlpipeline-minio-artifact'> Optional: false\r\n MINIO_SECRET_KEY: <set to the key 'secretkey' in secret 'mlpipeline-minio-artifact'> Optional: false\r\n ALLOW_CUSTOM_VISUALIZATIONS: true\r\n Mounts:\r\n /etc/config from config-volume (ro)\r\n Volumes:\r\n config-volume:\r\n Type: ConfigMap (a volume populated by a ConfigMap)\r\n Name: ml-pipeline-ui-configmap\r\n Optional: false\r\nConditions:\r\n Type Status Reason\r\n ---- ------ ------\r\n Available True MinimumReplicasAvailable\r\n Progressing True NewReplicaSetAvailable\r\nOldReplicaSets: <none>\r\nNewReplicaSet: ml-pipeline-ui-oneId (1/1 replicas created)\r\nEvents: <none>\r\n```\r\n```\r\n\r\nName: ml-pipeline-ui-artifact\r\nNamespace: myNamespace\r\nCreationTimestamp: Mon, 13 Jun 2022 17:20:27 -0300\r\nLabels: app=ml-pipeline-ui-artifact\r\n controller-uid=34641e66-4d49-4025-b235-fc433a8e2049\r\nAnnotations: deployment.kubernetes.io/revision: 4\r\n metacontroller.k8s.io/last-applied-configuration:\r\n {\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"labels\":{\"app\":\"ml-pipeline-ui-artifact\",\"controller-uid\":\"34641e66-4d49-4025-b23...\r\nSelector: app=ml-pipeline-ui-artifact\r\nReplicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable\r\nStrategyType: RollingUpdate\r\nMinReadySeconds: 0\r\nRollingUpdateStrategy: 25% max unavailable, 25% max surge\r\nPod Template:\r\n Labels: app=ml-pipeline-ui-artifact\r\n Annotations: kubectl.kubernetes.io/restartedAt: 2022-08-23T18:23:11-03:00\r\n Service Account: default-editor\r\n Containers:\r\n ml-pipeline-ui-artifact:\r\n Image: gcr.io/ml-pipeline/frontend:1.8.1\r\n Port: 3000/TCP\r\n Host Port: 0/TCP\r\n Limits:\r\n cpu: 100m\r\n memory: 500Mi\r\n Requests:\r\n cpu: 10m\r\n memory: 70Mi\r\n Environment:\r\n MINIO_ACCESS_KEY: <set to the key 'accesskey' in secret 'mlpipeline-minio-artifact'> Optional: false\r\n MINIO_SECRET_KEY: <set to the key 'secretkey' in secret 'mlpipeline-minio-artifact'> Optional: false\r\n Mounts: <none>\r\n Volumes: <none>\r\nConditions:\r\n Type Status Reason\r\n ---- ------ ------\r\n Available True MinimumReplicasAvailable\r\n Progressing True NewReplicaSetAvailable\r\nOldReplicaSets: <none>\r\nNewReplicaSet: ml-pipeline-ui-artifact-bb5bc4b57 (1/1 replicas created)\r\nEvents: <none>\r\n\r\n```\r\n\r\nWhat else can I check?\r\n\r\n\r\n", "If I go to myCluster/ml_metadata.MetadataStoreService/GetEventsByArtifactIDs, I get the message\r\n\r\n`upstream connect error or disconnect/reset before headers. reset reason: remote reset`\r\n\r\n\r\nUsing asm-1143-0", "ml-metadata has been upgraded from 1.0.0 to 1.5.0 when Kubeflow is upgraded from 1.3 to 1.5. https://github.com/kubeflow/pipelines/commits/master/third_party/ml-metadata\r\n\r\nAs a result, MLMD schema version has been changed. So you need to follow the instruction to upgrade MLMD dependency: https://github.com/google/ml-metadata/blob/master/g3doc/get_started.md#upgrade-the-mlmd-library", "@zijianjoy Thank you very much for your answer. If I execute\r\n\r\n`kubectl describe deployment metadata-grpc-deployment -n kubeflow`\r\n\r\nI get\r\n```\r\n\r\nName: metadata-grpc-deployment\r\nNamespace: kubeflow\r\nCreationTimestamp: Wed, 23 Jun 2021 21:52:53 -0300\r\nLabels: component=metadata-grpc-server\r\nAnnotations: deployment.kubernetes.io/revision: 27\r\nSelector: component=metadata-grpc-server\r\nReplicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable\r\nStrategyType: RollingUpdate\r\nMinReadySeconds: 0\r\nRollingUpdateStrategy: 25% max unavailable, 25% max surge\r\nPod Template:\r\n Labels: component=metadata-grpc-server\r\n Annotations: kubectl.kubernetes.io/restartedAt: 2022-08-26T16:44:45-03:00\r\n Service Account: metadata-grpc-server\r\n Containers:\r\n container:\r\n Image: gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0\r\n Port: 8080/TCP\r\n Host Port: 0/TCP\r\n Command:\r\n /bin/metadata_store_server\r\n Args:\r\n --grpc_port=8080\r\n --mysql_config_database=$(MYSQL_DATABASE)\r\n --mysql_config_host=$(MYSQL_HOST)\r\n --mysql_config_port=$(MYSQL_PORT)\r\n --mysql_config_user=$(DBCONFIG_USER)\r\n --mysql_config_password=$(DBCONFIG_PASSWORD)\r\n --enable_database_upgrade=true\r\n Liveness: tcp-socket :grpc-api delay=3s timeout=2s period=5s #success=1 #failure=3\r\n Readiness: tcp-socket :grpc-api delay=3s timeout=2s period=5s #success=1 #failure=3\r\n Environment:\r\n DBCONFIG_USER: <set to the key 'username' in secret 'mysql-secret'> Optional: false\r\n DBCONFIG_PASSWORD: <set to the key 'password' in secret 'mysql-secret'> Optional: false\r\n MYSQL_DATABASE: <set to the key 'mlmdDb' of config map 'pipeline-install-config'> Optional: false\r\n MYSQL_HOST: <set to the key 'dbHost' of config map 'pipeline-install-config'> Optional: false\r\n MYSQL_PORT: <set to the key 'dbPort' of config map 'pipeline-install-config'> Optional: false\r\n Mounts: <none>\r\n Volumes: <none>\r\nConditions:\r\n Type Status Reason\r\n ---- ------ ------\r\n Available True MinimumReplicasAvailable\r\n Progressing True NewReplicaSetAvailable\r\nOldReplicaSets: <none>\r\nNewReplicaSet: metadata-grpc-deployment-56779cf65 (1/1 replicas created)\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal ScalingReplicaSet 50m deployment-controller Scaled up replica set metadata-grpc-deployment-bb6856f48 to 1\r\n Normal ScalingReplicaSet 48m deployment-controller Scaled down replica set metadata-grpc-deployment-58c7dbcd8b to 0\r\n Normal ScalingReplicaSet 39m deployment-controller Scaled up replica set metadata-grpc-deployment-6cc4b76c8d to 1\r\n Normal ScalingReplicaSet 38m deployment-controller Scaled down replica set metadata-grpc-deployment-bb6856f48 to 0\r\n Normal ScalingReplicaSet 36m deployment-controller Scaled up replica set metadata-grpc-deployment-8c74d44b5 to 1\r\n Normal ScalingReplicaSet 35m deployment-controller Scaled down replica set metadata-grpc-deployment-6cc4b76c8d to 0\r\n Normal ScalingReplicaSet 2m53s deployment-controller Scaled up replica set metadata-grpc-deployment-56779cf65 to 1\r\n Normal ScalingReplicaSet 2m19s deployment-controller Scaled down replica set metadata-grpc-deployment-8c74d44b5 to 0\r\n\r\n```\r\n\r\n\r\nDoes this mean MLMD dependency version is correct? What am I missing?", "You need to upgrade the MLMD database schema: https://github.com/google/ml-metadata/blob/master/g3doc/get_started.md#upgrade-the-database-schema", "There is a tool for MLMD upgrade: https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/tools/metadatastore-upgrade/README.md", "Hi @zijianjoy, Our cluster is a freshly installed 1.5.0 kubeflow cluster.\r\n\r\nWe also see the below error page when accessing myClusterURL/pipeline/artifacts.\r\n![image](https://user-images.githubusercontent.com/31303808/199405635-78917d45-2809-4946-8eee-812202ff087d.png)\r\n\r\nIn the beginning, the artifacts page can be loaded successfully, but after we ran about 600 recurring runs, the artifacts page failed to load with the above message.\r\n\r\nEven we removed all the content under mlpipeline/artifacts/ path in minio. The artifacts page still failed to load with the error.\r\n\r\nIs there any way to recover? Thanks!\r\n", "@celiawa Currently it is listing all artifacts from MLMD store. Even if you deleted the content in MinIO, the MLMD store doesn't delete the corresponding MLMD object. It is likely a timeout trying to list all the artifacts. There is a plan to improve this page https://github.com/kubeflow/pipelines/issues/3226", "Thanks @zijianjoy. I checked the mysql got MLMD store, there're many tables in it. Which tables we shall delete to recover our artifacts page back. We don't want to reinstall.", "Hi @zijianjoy @celiawa I am also facing the same issue, unable to see the Artifacts in Kubeflow, let me know solution to fix the same", "Upgrading KFP to the latest version should allow you to see paginated artifact list now. ", "Thanks @zijianjoy, we upgraded to kfp version 2.01 and can see artifact list pagination now." ]
"2022-08-24T19:28:06"
"2023-09-13T08:24:47"
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Using https://www.kubeflow.org/docs/distributions/gke/deploy/upgrade/ * KFP version: ml-pipeline/frontend:1.8.1 ml-pipeline/api-server:1.8.1 ### Steps to reproduce Upgrade from Kubeflow 1.3 to Kubeflow 1.5 allows to replicate the problem ### Expected result I expect to be able to see a list of artifacts when I access myClusterURL/pipeline/artifacts. Instead I get this https://user-images.githubusercontent.com/74205824/186285977-cba538c2-e496-416e-8f27-67fa4950b4cc.png ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8189/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8189/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8185
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8185/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8185/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8185/events
https://github.com/kubeflow/pipelines/issues/8185
1,347,536,775
I_kwDOB-71UM5QUceH
8,185
[feature] add delete button for runs to KFP UI
{ "login": "tingshua-yts", "id": 57979478, "node_id": "MDQ6VXNlcjU3OTc5NDc4", "avatar_url": "https://avatars.githubusercontent.com/u/57979478?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tingshua-yts", "html_url": "https://github.com/tingshua-yts", "followers_url": "https://api.github.com/users/tingshua-yts/followers", "following_url": "https://api.github.com/users/tingshua-yts/following{/other_user}", "gists_url": "https://api.github.com/users/tingshua-yts/gists{/gist_id}", "starred_url": "https://api.github.com/users/tingshua-yts/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tingshua-yts/subscriptions", "organizations_url": "https://api.github.com/users/tingshua-yts/orgs", "repos_url": "https://api.github.com/users/tingshua-yts/repos", "events_url": "https://api.github.com/users/tingshua-yts/events{/privacy}", "received_events_url": "https://api.github.com/users/tingshua-yts/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Hi @tingshua-yts , you can archive the runs first and then delete them in the archived list." ]
"2022-08-23T08:19:39"
"2022-08-25T22:47:17"
"2022-08-25T22:47:17"
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> /area frontend <!-- /area backend --> <!-- /area sdk --> <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? ![image](https://user-images.githubusercontent.com/57979478/186108318-99aa686b-6b9d-4247-ab98-c68b7d00d796.png) can add delete button for runs ? Currently, there is no delete button. Is there any design concern? <!-- Provide a description of this feature and the user experience. --> ### What is the use case or pain point? I submitted a batch of tasks and found that they did not meet expectations. I hope to delete them <!-- It helps us understand the benefit of this feature for your use case. --> ### Is there a workaround currently? <!-- Without this feature, how do you accomplish your task today? --> I use kubectl delete workflow xxxx --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8185/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8185/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8184
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8184/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8184/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8184/events
https://github.com/kubeflow/pipelines/issues/8184
1,347,443,703
I_kwDOB-71UM5QUFv3
8,184
[backend] disabling task caching doesn't appear to work
{ "login": "ptitzler", "id": 13068832, "node_id": "MDQ6VXNlcjEzMDY4ODMy", "avatar_url": "https://avatars.githubusercontent.com/u/13068832?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ptitzler", "html_url": "https://github.com/ptitzler", "followers_url": "https://api.github.com/users/ptitzler/followers", "following_url": "https://api.github.com/users/ptitzler/following{/other_user}", "gists_url": "https://api.github.com/users/ptitzler/gists{/gist_id}", "starred_url": "https://api.github.com/users/ptitzler/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ptitzler/subscriptions", "organizations_url": "https://api.github.com/users/ptitzler/orgs", "repos_url": "https://api.github.com/users/ptitzler/repos", "events_url": "https://api.github.com/users/ptitzler/events{/privacy}", "received_events_url": "https://api.github.com/users/ptitzler/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "I think this is because this function only works when using SDK V2.\r\n\r\nTo disable the cache for a component in V1, use:\r\n\r\n```python\r\ncontainer_op.execution_options.caching_strategy.max_cache_staleness = \"P0D\"\r\n```", "Thanks! That appears to work.", "> Thanks! That appears to work.\r\n\r\nPlease close your issue if it's resolved." ]
"2022-08-23T07:05:07"
"2022-09-06T10:20:22"
"2022-09-06T10:20:22"
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? ``` $ kubectl version --short Client Version: v1.24.4 Kustomize Version: v4.5.4 Server Version: v1.20.2 ``` - minikube v1.18.1 - kubernetes v1.20.2 <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: Deployed using [kubeflow/manifests v1.5.0](https://github.com/kubeflow/manifests/archive/refs/tags/v1.5.0.tar.gz) <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> ``` $ pip list | grep kfp kfp 1.8.13 kfp-pipeline-spec 0.1.16 kfp-server-api 1.8.4 kfp-tekton 1.3.1 ``` ### Steps to reproduce Create a ContainerOp and use [`set_caching_options(False)`](https://kubeflow-pipelines.readthedocs.io/en/1.8.13/source/kfp.dsl.html#kfp.dsl.ContainerOp.set_caching_options) to disable task caching ``` from kfp import dsl from kfp.compiler import Compiler @dsl.pipeline(name='ContainerOp caching test', description='') def pipeline(): co = dsl.ContainerOp( name='sample ContainerOp', image='busybox', command=['echo', 'Hello KFP!'], ) co.set_caching_options(False) if __name__ == '__main__': Compiler().compile(pipeline, __file__ + '.yaml') ``` Compile the pipeline. The compiled pipeline looks as follows: ``` apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: generateName: containerop-caching-test- annotations: {pipelines.kubeflow.org/kfp_sdk_version: 1.8.13, pipelines.kubeflow.org/pipeline_compilation_time: '2022-08-23T08:20:01.620863', pipelines.kubeflow.org/pipeline_spec: '{"name": "ContainerOp caching test"}'} labels: {pipelines.kubeflow.org/kfp_sdk_version: 1.8.13} spec: entrypoint: containerop-caching-test templates: - name: containerop-caching-test dag: tasks: - {name: sample-containerop, template: sample-containerop} - name: sample-containerop container: command: [echo, Hello KFP!] image: busybox metadata: labels: pipelines.kubeflow.org/kfp_sdk_version: 1.8.13 pipelines.kubeflow.org/pipeline-sdk-type: kfp pipelines.kubeflow.org/enable_caching: "false" arguments: parameters: [] serviceAccountName: pipeline-runner ``` Run the pipeline multiple times and review the run info in the Central Dashboard. ![image](https://user-images.githubusercontent.com/13068832/186090182-846f6837-af23-4e2b-a1c8-3cc743ebf0db.png) fwiw the following labels are defined on the pod: ``` Labels: pipeline/runid=40313ed7-32c0-4d01-9588-7c1f033b1b14 pipelines.kubeflow.org/cache_enabled=true pipelines.kubeflow.org/cache_id=813 pipelines.kubeflow.org/enable_caching=false pipelines.kubeflow.org/kfp_sdk_version=1.8.13 pipelines.kubeflow.org/metadata_execution_id=28 pipelines.kubeflow.org/metadata_written=true pipelines.kubeflow.org/pipeline-sdk-type=kfp pipelines.kubeflow.org/reused_from_cache=true workflows.argoproj.io/completed=true workflows.argoproj.io/workflow=lambda-4mk5p ``` Note `pipelines.kubeflow.org/cache_enabled=true` and `pipelines.kubeflow.org/enable_caching=false` <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ### Expected result <!-- What should the correct behavior be? --> Task results are not re-used across multiple pipeline runs. Not sure what (if anything) I am missing. Disabling caching for entire pipelines or the entire KFP deployment is not a viable option since the caching requirements vary. Please advise. Thank you! ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8184/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8184/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8176
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8176/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8176/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8176/events
https://github.com/kubeflow/pipelines/issues/8176
1,345,406,619
I_kwDOB-71UM5QMUab
8,176
[sdk] Missing outputs field in generated component yaml
{ "login": "capoolebugchat", "id": 83812596, "node_id": "MDQ6VXNlcjgzODEyNTk2", "avatar_url": "https://avatars.githubusercontent.com/u/83812596?v=4", "gravatar_id": "", "url": "https://api.github.com/users/capoolebugchat", "html_url": "https://github.com/capoolebugchat", "followers_url": "https://api.github.com/users/capoolebugchat/followers", "following_url": "https://api.github.com/users/capoolebugchat/following{/other_user}", "gists_url": "https://api.github.com/users/capoolebugchat/gists{/gist_id}", "starred_url": "https://api.github.com/users/capoolebugchat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/capoolebugchat/subscriptions", "organizations_url": "https://api.github.com/users/capoolebugchat/orgs", "repos_url": "https://api.github.com/users/capoolebugchat/repos", "events_url": "https://api.github.com/users/capoolebugchat/events{/privacy}", "received_events_url": "https://api.github.com/users/capoolebugchat/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Close as this is resolved by updating the kfp SDK to v1.8.13 and above. Seems like a minor bug in the v1 to v2 migration that got fixed." ]
"2022-08-21T07:09:12"
"2022-08-22T15:22:17"
"2022-08-22T15:21:00"
NONE
null
### Environment * KFP version: <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP SDK version: 1.8.3 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce I'm writing a component for the KFP v2 Pipeline backend, I wrote it in Python with the following definition: @component def train( dataset: Input[Dataset], config: Input[Artifact], model: Output[Model], bucket_name: str) then, I tried to package it for later usages, using from kfp.v2.components.component_factory import create_component_from_func create_component_from_func( func=train, base_image="[docker.io/](http://docker.io/)...", output_component_file="component_v2.yaml" ) A file showed up without any error, but weirdly it's missing my outputs artifacts, only showing inputs and implementations instead. I dont khow whether this is expected or I did it wrong somehow? ### Expected result <!-- What should the correct behavior be? --> ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8176/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8176/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8175
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8175/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8175/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8175/events
https://github.com/kubeflow/pipelines/issues/8175
1,345,405,364
I_kwDOB-71UM5QMUG0
8,175
[backend] <Bug Name>TypeError: compile() got an unexpected keyword argument 'pipeline_parameters'
{ "login": "r-matsuzaka", "id": 76238346, "node_id": "MDQ6VXNlcjc2MjM4MzQ2", "avatar_url": "https://avatars.githubusercontent.com/u/76238346?v=4", "gravatar_id": "", "url": "https://api.github.com/users/r-matsuzaka", "html_url": "https://github.com/r-matsuzaka", "followers_url": "https://api.github.com/users/r-matsuzaka/followers", "following_url": "https://api.github.com/users/r-matsuzaka/following{/other_user}", "gists_url": "https://api.github.com/users/r-matsuzaka/gists{/gist_id}", "starred_url": "https://api.github.com/users/r-matsuzaka/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/r-matsuzaka/subscriptions", "organizations_url": "https://api.github.com/users/r-matsuzaka/orgs", "repos_url": "https://api.github.com/users/r-matsuzaka/repos", "events_url": "https://api.github.com/users/r-matsuzaka/events{/privacy}", "received_events_url": "https://api.github.com/users/r-matsuzaka/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @r-matsuzaka, \r\n\r\nSorry about the confusion here. The doc you referred to is about KFP v1, and the master branch code is about KFP v2. The code for v1 can be found in the `sdk/release-1.8` branch: https://github.com/kubeflow/pipelines/blob/0ad4cdfaabebbb566dad2b9523267d886942f2ad/sdk/python/kfp/compiler/compiler.py#L1129-L1144\r\n\r\nand `pipeline_parameters` was not there.\r\n\r\nWe're working on adding the v2 doc, and you may find some recent changes in: https://github.com/kubeflow/website/commits/pipelines-v2-docs" ]
"2022-08-21T07:02:29"
"2022-08-25T23:05:40"
"2022-08-25T23:05:39"
NONE
null
### Environment #### How did you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> - minikube latest (kubernets 1.21.14) - kustomize 3.2.0 - kubectl 1.21.14 - kubeflow/manifests v1.6-branch ```shell git clone https://github.com/kubeflow/manifests.git -b v1.6-branch cd manifests bash hack/setup-kubeflow.sh ``` * KFP version: ``` $ print(kfp.__version__) 1.8.13 ``` I tried both: ``` pip install kfp==1.8.13 pip install --pre kfp ``` ### Steps to reproduce https://www.kubeflow.org/docs/components/pipelines/sdk/build-pipeline/#getting-started-building-a-pipeline ### Expected result I cannot get kubeflow pipeline yaml file by following method. And I got `TypeError: compile() got an unexpected keyword argument 'pipeline_parameters'` ``` kfp.compiler.Compiler().compile( pipeline_func=my_pipeline, package_path='pipeline.yaml', pipeline_parameters={'url': 'https://storage.googleapis.com/ml-pipeline-playground/iris-csv-files.tar.gz'}) ``` From this implementation, I should use `pipeline_parameters` argument for `kfp.compiler.Compiler().compile` https://github.com/kubeflow/pipelines/blob/3b1bd08ce0188605cff90e3f14563d55436aac96/sdk/python/kfp/compiler/compiler.py#L62 ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8175/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8175/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8171
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8171/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8171/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8171/events
https://github.com/kubeflow/pipelines/issues/8171
1,344,856,124
I_kwDOB-71UM5QKOA8
8,171
[backend] Use the taskInfo name to find artifact
{ "login": "zichuan-scott-xu", "id": 34961455, "node_id": "MDQ6VXNlcjM0OTYxNDU1", "avatar_url": "https://avatars.githubusercontent.com/u/34961455?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zichuan-scott-xu", "html_url": "https://github.com/zichuan-scott-xu", "followers_url": "https://api.github.com/users/zichuan-scott-xu/followers", "following_url": "https://api.github.com/users/zichuan-scott-xu/following{/other_user}", "gists_url": "https://api.github.com/users/zichuan-scott-xu/gists{/gist_id}", "starred_url": "https://api.github.com/users/zichuan-scott-xu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zichuan-scott-xu/subscriptions", "organizations_url": "https://api.github.com/users/zichuan-scott-xu/orgs", "repos_url": "https://api.github.com/users/zichuan-scott-xu/repos", "events_url": "https://api.github.com/users/zichuan-scott-xu/events{/privacy}", "received_events_url": "https://api.github.com/users/zichuan-scott-xu/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[]
"2022-08-19T20:10:55"
"2022-08-19T20:10:55"
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: 2.0.0-alpha.3 * KFP SDK version: 2.0.0b2 <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce 1. Open https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/compiler/test_data/pipelines/two_step_pipeline_containerized.py 2. Call `.set_display_name('name1')` on `component1` and `.set_display_name('name2')` on `component2` when defining `my_pipeline` 3. Run the file to compile a yaml IR 4. Submit the IR to KFP standalone ### Expected result The pipeline is not able to run as the second component couldn't find the component `component1`, which is renamed. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/compiler/test_data/pipelines/two_step_pipeline_containerized.py https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/compiler/test_data/pipelines/two_step_pipeline_containerized.yaml --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8171/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8171/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8166
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8166/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8166/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8166/events
https://github.com/kubeflow/pipelines/issues/8166
1,343,011,553
I_kwDOB-71UM5QDLrh
8,166
[sdk] Not able to parse `JsonObject` type input correctly
{ "login": "KarolisKont", "id": 56597152, "node_id": "MDQ6VXNlcjU2NTk3MTUy", "avatar_url": "https://avatars.githubusercontent.com/u/56597152?v=4", "gravatar_id": "", "url": "https://api.github.com/users/KarolisKont", "html_url": "https://github.com/KarolisKont", "followers_url": "https://api.github.com/users/KarolisKont/followers", "following_url": "https://api.github.com/users/KarolisKont/following{/other_user}", "gists_url": "https://api.github.com/users/KarolisKont/gists{/gist_id}", "starred_url": "https://api.github.com/users/KarolisKont/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/KarolisKont/subscriptions", "organizations_url": "https://api.github.com/users/KarolisKont/orgs", "repos_url": "https://api.github.com/users/KarolisKont/repos", "events_url": "https://api.github.com/users/KarolisKont/events{/privacy}", "received_events_url": "https://api.github.com/users/KarolisKont/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "chongyouquan", "id": 48691403, "node_id": "MDQ6VXNlcjQ4NjkxNDAz", "avatar_url": "https://avatars.githubusercontent.com/u/48691403?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chongyouquan", "html_url": "https://github.com/chongyouquan", "followers_url": "https://api.github.com/users/chongyouquan/followers", "following_url": "https://api.github.com/users/chongyouquan/following{/other_user}", "gists_url": "https://api.github.com/users/chongyouquan/gists{/gist_id}", "starred_url": "https://api.github.com/users/chongyouquan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chongyouquan/subscriptions", "organizations_url": "https://api.github.com/users/chongyouquan/orgs", "repos_url": "https://api.github.com/users/chongyouquan/repos", "events_url": "https://api.github.com/users/chongyouquan/events{/privacy}", "received_events_url": "https://api.github.com/users/chongyouquan/received_events", "type": "User", "site_admin": false }
[ { "login": "chongyouquan", "id": 48691403, "node_id": "MDQ6VXNlcjQ4NjkxNDAz", "avatar_url": "https://avatars.githubusercontent.com/u/48691403?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chongyouquan", "html_url": "https://github.com/chongyouquan", "followers_url": "https://api.github.com/users/chongyouquan/followers", "following_url": "https://api.github.com/users/chongyouquan/following{/other_user}", "gists_url": "https://api.github.com/users/chongyouquan/gists{/gist_id}", "starred_url": "https://api.github.com/users/chongyouquan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chongyouquan/subscriptions", "organizations_url": "https://api.github.com/users/chongyouquan/orgs", "repos_url": "https://api.github.com/users/chongyouquan/repos", "events_url": "https://api.github.com/users/chongyouquan/events{/privacy}", "received_events_url": "https://api.github.com/users/chongyouquan/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-08-18T12:13:47"
"2022-08-18T22:42:52"
null
NONE
null
### Environment * KFP version: 1.8.12 GCP Vertex AI pipelines. * KFP SDK version: GCP Vertex AI pipelines. * All dependencies version: ```shell kfp 1.8.12 kfp-pipeline-spec 0.1.16 kfp-server-api 1.8.4 ``` ```shell google-cloud-pipeline-components 1.0.16 ``` ### Steps to reproduce Using yaml configuration for components, one of the inputs is `JsonObject`type i.e.: ```yaml inputs: - {name: Server resources, type: JsonObject, description: 'A dict describing the amount of resources required to run the server', default: '{"cpu": "1", "memory": "3Gi"}'} ``` Also, it can be `String` type: ```yaml inputs: - {name: Server resources, type: String, description: 'A dict describing the amount of resources required to run the server', default: '{"cpu": "1", "memory": "3Gi"}'} ``` To build a pipeline run we are going threw these stages: 1. Load definition using KFP. 2. Update definition using `create_custom_training_job_from_component` from `google-cloud-pipeline-components` package. 3. Submitting run threw Vertex AI SDK. The component fails due to an error: `Failed to parse the container spec json payload to requested prototype.`. In the Payload the invalid part is this: `"{"cpu": "1", "memory": "3Gi"}"`. PS.: If building pipelines run remove the second step (2. Update definition using `create_custom_training_job_from_component` from `google-cloud-pipeline-components` package.), the component doesn't get incompatible JSON format error and it starts. ### Expected result Component job is provisioned/starts. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8166/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8166/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8165
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8165/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8165/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8165/events
https://github.com/kubeflow/pipelines/issues/8165
1,342,431,886
I_kwDOB-71UM5QA-KO
8,165
[bug] Recurring runs not working after re-installing kubeflow-pipelines modules
{ "login": "saemaromoon", "id": 97422066, "node_id": "U_kgDOBc6K8g", "avatar_url": "https://avatars.githubusercontent.com/u/97422066?v=4", "gravatar_id": "", "url": "https://api.github.com/users/saemaromoon", "html_url": "https://github.com/saemaromoon", "followers_url": "https://api.github.com/users/saemaromoon/followers", "following_url": "https://api.github.com/users/saemaromoon/following{/other_user}", "gists_url": "https://api.github.com/users/saemaromoon/gists{/gist_id}", "starred_url": "https://api.github.com/users/saemaromoon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/saemaromoon/subscriptions", "organizations_url": "https://api.github.com/users/saemaromoon/orgs", "repos_url": "https://api.github.com/users/saemaromoon/repos", "events_url": "https://api.github.com/users/saemaromoon/events{/privacy}", "received_events_url": "https://api.github.com/users/saemaromoon/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false }
[ { "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false } ]
null
[ "I figured out that registering a recurring run creates a scheduled workflow crd instance. Removing the pipeline modules from the k8s cluster, also destroyed those crd instances, so the remaining metadata is meaningless. Is there a safer and more efficient way to upgrade the kubeflow-pipeline module without losing those crd objects?", "@saemaromoon you are doing a clean install of the new version which should ideally behave as you told(CRD getting removed). But in order to upgrade the existing installation, you can just run `kustomize build apps/pipeline | kubectl apply -f -` with updated manifests n the directory and it should work without any problems from what I have seen. I hope this helps. πŸ™‚ ", "Hi @saemaromoon, it looks like you can fresh install KFP to resolve this problem. Feel free to re-open this issue if you still have any question." ]
"2022-08-18T01:17:17"
"2022-08-25T22:44:07"
"2022-08-25T22:44:07"
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Using Kubeflow 1.5.1 on EKS and it is updated from version 1.3.1 I did `kustomize build apps/pipeline | kubectl delete -f -` and `kustomize build apps/pipeline | kubectl apply -f -` registering a pipeline and calling a run works fine but registered recurring runs not working <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: gcr.io/ml-pipeline/api-server:1.8.2 gcr.io/ml-pipeline/metadata-writer:1.8.2 gcr.io/ml-pipeline/persistenceagent:1.8.2 gcr.io/ml-pipeline/scheduledworkflow:1.8.2 gcr.io/ml-pipeline/frontend:1.8.2 gcr.io/ml-pipeline/viewer-crd-controller:1.8.2 gcr.io/ml-pipeline/visualization-server:1.8.2 gcr.io/ml-pipeline/workflow-controller:v3.2.3-license-compliance gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 gcr.io/ml-pipeline/cache-server:1.8.2 gcr.io/ml-pipeline/metadata-envoy:1.8.2 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce Hi, is there anyone experienced registered recurring runs not working after updating and installing kubeflow-pipeline? I re-deployed kubeflow-pipeline to update the version from 1.3.1 to 1.5.1 Recurring runs registered previously are not being updated but I see new recurring runs are being updated normally Tested query: SELECT uuid, displayname, name, namespace, serviceaccount, description, from_unixtime(CreatedAtInSec) CreatedAtInSec, from_unixtime(UpdatedAtInSec) UpdatedAtInSec, from_unixtime(CronScheduleStartTimeInSec) CronScheduleStartTimeInSec, from_unixtime(CronScheduleEndTimeInSec) CronScheduleEndTimeInSec FROM mlpipeline.jobs; It seems like a certain demon in pipeline module misses records to trace. <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ### Expected result registered recurring runs updated and triggered normally after upgrading kubeflow-pipeline module <!-- What should the correct behavior be? --> ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8165/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8165/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8164
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8164/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8164/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8164/events
https://github.com/kubeflow/pipelines/issues/8164
1,342,377,594
I_kwDOB-71UM5QAw56
8,164
[bug] <kubeflow/pipelines mysql && workflow-controller pods failing 'Running' stuck in 'Pending' State with several other pods crashing consistently on Apple M1 architecture Deployed in Kind>
{ "login": "HilbertSpecs", "id": 43871228, "node_id": "MDQ6VXNlcjQzODcxMjI4", "avatar_url": "https://avatars.githubusercontent.com/u/43871228?v=4", "gravatar_id": "", "url": "https://api.github.com/users/HilbertSpecs", "html_url": "https://github.com/HilbertSpecs", "followers_url": "https://api.github.com/users/HilbertSpecs/followers", "following_url": "https://api.github.com/users/HilbertSpecs/following{/other_user}", "gists_url": "https://api.github.com/users/HilbertSpecs/gists{/gist_id}", "starred_url": "https://api.github.com/users/HilbertSpecs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/HilbertSpecs/subscriptions", "organizations_url": "https://api.github.com/users/HilbertSpecs/orgs", "repos_url": "https://api.github.com/users/HilbertSpecs/repos", "events_url": "https://api.github.com/users/HilbertSpecs/events{/privacy}", "received_events_url": "https://api.github.com/users/HilbertSpecs/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[]
"2022-08-17T23:17:15"
"2022-08-17T23:17:15"
null
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? Deploy in Kind --image kindest/node:v1.21.2 (Apple M1 Silicon Architecture BigSur) <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.7.0 (Also Failed with 1.8.13 i.e. replace with in command sequence -> % export PIPELINE_VERSION=1.8.13) <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: 1.8.13 <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce Activate Python Virtual Environment and run following commands: % kind create cluster --image kindest/node:v1.21.2 % export PIPELINE_VERSION=1.7.0 % kubectl apply -k "github.com/kubeflow/pipelines/manifests/kustomize/cluster-scoped-resources?ref=$PIPELINE_VERSION&timeout=300" % kubectl wait --for condition=established --timeout=60s crd/applications.app.k8s.io % kubectl apply -k "github.com/kubeflow/pipelines/manifests/kustomize/env/platform-agnostic-pns?ref=$PIPELINE_VERSION&timeout=300" <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ![kfppodfail2](https://user-images.githubusercontent.com/43871228/185258610-3595068f-a934-4997-8abf-d2379cffeee6.png) ### Expected result <!-- What should the correct behavior be? --> All pods/deployments should be Successfully Running and Stable. ### Materials and reference https://community.deeplearning.ai/t/c4w3-lab1-kubeflowpipelines-kubectl-f-apply-fail/176728/19 <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> area backend <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> area components --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8164/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8164/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8159
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8159/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8159/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8159/events
https://github.com/kubeflow/pipelines/issues/8159
1,341,662,790
I_kwDOB-71UM5P-CZG
8,159
[sdk] Incompatibility between conditionals in component definitions and google-cloud-pipeline-components
{ "login": "GytisBraz", "id": 106960916, "node_id": "U_kgDOBmAYFA", "avatar_url": "https://avatars.githubusercontent.com/u/106960916?v=4", "gravatar_id": "", "url": "https://api.github.com/users/GytisBraz", "html_url": "https://github.com/GytisBraz", "followers_url": "https://api.github.com/users/GytisBraz/followers", "following_url": "https://api.github.com/users/GytisBraz/following{/other_user}", "gists_url": "https://api.github.com/users/GytisBraz/gists{/gist_id}", "starred_url": "https://api.github.com/users/GytisBraz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GytisBraz/subscriptions", "organizations_url": "https://api.github.com/users/GytisBraz/orgs", "repos_url": "https://api.github.com/users/GytisBraz/repos", "events_url": "https://api.github.com/users/GytisBraz/events{/privacy}", "received_events_url": "https://api.github.com/users/GytisBraz/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-08-17T12:05:54"
"2022-08-18T22:34:14"
null
NONE
null
### Environment * KFP version: 1.8.12 * All dependencies version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> kfp 1.8.12 kfp-pipeline-spec 0.1.16 kfp-server-api 1.8.4 ### Steps to reproduce Run the following code snippet (not necessarily fully-working, but enough to demonstrate this issue) ``` import kfp from google_cloud_pipeline_components.v1.custom_job import ( create_custom_training_job_from_component, ) component = kfp.components.load_component_from_text( """ name: my-component-name inputs: - { name: Columns, type: String } implementation: container: image: my-custom-image command: [ echo ] args: - if: cond: { isPresent: Columns } then: - --columns - { inputValue: Columns } """ ) job = create_custom_training_job_from_component(component) ``` This is the result I get: ` TypeError: Got unexpected placeholder type for IfPlaceholder(if_structure=IfPlaceholderStructure(condition=IsPresentPlaceholder(input_name='Columns'), then_value=['--columns', InputValuePlaceholder(input_name='Columns')], else_value=None)) ` ### Expected result Expected the conditional argument to be parsed without errors. Seems like the issue comes from here: https://github.com/kubeflow/pipelines/blob/1.8.13/sdk/python/kfp/dsl/dsl_utils.py#L132 - the `IfPlaceholder` type is not handled --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8159/reactions", "total_count": 10, "+1": 10, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8159/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8155
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8155/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8155/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8155/events
https://github.com/kubeflow/pipelines/issues/8155
1,340,854,745
I_kwDOB-71UM5P69HZ
8,155
Critical CVEs found in gcr.io/ml-pipeline/visualization-server:1.8.4
{ "login": "andresmascl", "id": 26497935, "node_id": "MDQ6VXNlcjI2NDk3OTM1", "avatar_url": "https://avatars.githubusercontent.com/u/26497935?v=4", "gravatar_id": "", "url": "https://api.github.com/users/andresmascl", "html_url": "https://github.com/andresmascl", "followers_url": "https://api.github.com/users/andresmascl/followers", "following_url": "https://api.github.com/users/andresmascl/following{/other_user}", "gists_url": "https://api.github.com/users/andresmascl/gists{/gist_id}", "starred_url": "https://api.github.com/users/andresmascl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/andresmascl/subscriptions", "organizations_url": "https://api.github.com/users/andresmascl/orgs", "repos_url": "https://api.github.com/users/andresmascl/repos", "events_url": "https://api.github.com/users/andresmascl/events{/privacy}", "received_events_url": "https://api.github.com/users/andresmascl/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1682717385, "node_id": "MDU6TGFiZWwxNjgyNzE3Mzg1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/status/backlog", "name": "status/backlog", "color": "bc9090", "default": false, "description": "" } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }, { "login": "gkcalat", "id": 35157096, "node_id": "MDQ6VXNlcjM1MTU3MDk2", "avatar_url": "https://avatars.githubusercontent.com/u/35157096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gkcalat", "html_url": "https://github.com/gkcalat", "followers_url": "https://api.github.com/users/gkcalat/followers", "following_url": "https://api.github.com/users/gkcalat/following{/other_user}", "gists_url": "https://api.github.com/users/gkcalat/gists{/gist_id}", "starred_url": "https://api.github.com/users/gkcalat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gkcalat/subscriptions", "organizations_url": "https://api.github.com/users/gkcalat/orgs", "repos_url": "https://api.github.com/users/gkcalat/repos", "events_url": "https://api.github.com/users/gkcalat/events{/privacy}", "received_events_url": "https://api.github.com/users/gkcalat/received_events", "type": "User", "site_admin": false } ]
null
[ "@andresmascl, thank you for your contribution!\r\n\r\nAs these vulnerabilities are not marked as critical on GCR, we will keep this in backlog for now.", "Closing this as we have released KFP v2.0.0" ]
"2022-08-16T20:37:31"
"2023-08-04T23:01:52"
"2023-08-04T23:01:52"
NONE
null
### Environment Critical CVEs reported by the `anchore` scanning service: CVE-2020-36242, GHSA-57wx-m983-2f88, GHSA-8vj2-vxx3-667w, GHSA-9j59-75qj-795w , GHSA-cq27-v7xp-c356, GHSA-h6gw-r52c-724r , GHSA-pw3c-h7wp-cvhx * KFP version: `1.8.4` ### Materials and Reference ![image](https://user-images.githubusercontent.com/26497935/184980842-4648cc62-5bb6-426d-a483-9b898377f89c.png) --- Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8155/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8155/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8154
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8154/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8154/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8154/events
https://github.com/kubeflow/pipelines/issues/8154
1,340,852,010
I_kwDOB-71UM5P68cq
8,154
Critical CVEs found in gcr.io/ml-pipeline/frontend:1.8.4
{ "login": "andresmascl", "id": 26497935, "node_id": "MDQ6VXNlcjI2NDk3OTM1", "avatar_url": "https://avatars.githubusercontent.com/u/26497935?v=4", "gravatar_id": "", "url": "https://api.github.com/users/andresmascl", "html_url": "https://github.com/andresmascl", "followers_url": "https://api.github.com/users/andresmascl/followers", "following_url": "https://api.github.com/users/andresmascl/following{/other_user}", "gists_url": "https://api.github.com/users/andresmascl/gists{/gist_id}", "starred_url": "https://api.github.com/users/andresmascl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/andresmascl/subscriptions", "organizations_url": "https://api.github.com/users/andresmascl/orgs", "repos_url": "https://api.github.com/users/andresmascl/repos", "events_url": "https://api.github.com/users/andresmascl/events{/privacy}", "received_events_url": "https://api.github.com/users/andresmascl/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1682717385, "node_id": "MDU6TGFiZWwxNjgyNzE3Mzg1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/status/backlog", "name": "status/backlog", "color": "bc9090", "default": false, "description": "" } ]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }, { "login": "gkcalat", "id": 35157096, "node_id": "MDQ6VXNlcjM1MTU3MDk2", "avatar_url": "https://avatars.githubusercontent.com/u/35157096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gkcalat", "html_url": "https://github.com/gkcalat", "followers_url": "https://api.github.com/users/gkcalat/followers", "following_url": "https://api.github.com/users/gkcalat/following{/other_user}", "gists_url": "https://api.github.com/users/gkcalat/gists{/gist_id}", "starred_url": "https://api.github.com/users/gkcalat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gkcalat/subscriptions", "organizations_url": "https://api.github.com/users/gkcalat/orgs", "repos_url": "https://api.github.com/users/gkcalat/repos", "events_url": "https://api.github.com/users/gkcalat/events{/privacy}", "received_events_url": "https://api.github.com/users/gkcalat/received_events", "type": "User", "site_admin": false } ]
null
[ "@andresmascl, thank you for your contribution!\r\n\r\nAs these vulnerabilities are not marked as critical on GCR, we will keep this in backlog for now.", "Closing this as we have released KFP v2.0.0" ]
"2022-08-16T20:34:35"
"2023-08-04T23:01:27"
"2023-08-04T23:01:27"
NONE
null
### Environment Critical CVEs found by the `anchore` scanning service: CVE-2020-3681, CVE-2022-28391, GHSA-896r-f27r-55mw, GHSA-xvch-5gv4-984h, * KFP version: `1.8.4` ### Materials and Reference ![image](https://user-images.githubusercontent.com/26497935/184979777-70e25b06-a681-4654-84a6-7fb5973711f8.png) ![image](https://user-images.githubusercontent.com/26497935/184979832-dfc28a0d-2dcc-4ec8-af0f-658598f9f27d.png) ![image](https://user-images.githubusercontent.com/26497935/184979908-d793d8dc-d878-443f-82da-bf6f5cb75954.png) --- Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8154/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8154/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8153
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8153/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8153/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8153/events
https://github.com/kubeflow/pipelines/issues/8153
1,340,844,134
I_kwDOB-71UM5P66hm
8,153
Critical CVEs found in gcr.io/ml-pipeline/metadata-writer:1.8.4
{ "login": "andresmascl", "id": 26497935, "node_id": "MDQ6VXNlcjI2NDk3OTM1", "avatar_url": "https://avatars.githubusercontent.com/u/26497935?v=4", "gravatar_id": "", "url": "https://api.github.com/users/andresmascl", "html_url": "https://github.com/andresmascl", "followers_url": "https://api.github.com/users/andresmascl/followers", "following_url": "https://api.github.com/users/andresmascl/following{/other_user}", "gists_url": "https://api.github.com/users/andresmascl/gists{/gist_id}", "starred_url": "https://api.github.com/users/andresmascl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/andresmascl/subscriptions", "organizations_url": "https://api.github.com/users/andresmascl/orgs", "repos_url": "https://api.github.com/users/andresmascl/repos", "events_url": "https://api.github.com/users/andresmascl/events{/privacy}", "received_events_url": "https://api.github.com/users/andresmascl/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1682717385, "node_id": "MDU6TGFiZWwxNjgyNzE3Mzg1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/status/backlog", "name": "status/backlog", "color": "bc9090", "default": false, "description": "" } ]
closed
false
{ "login": "gkcalat", "id": 35157096, "node_id": "MDQ6VXNlcjM1MTU3MDk2", "avatar_url": "https://avatars.githubusercontent.com/u/35157096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gkcalat", "html_url": "https://github.com/gkcalat", "followers_url": "https://api.github.com/users/gkcalat/followers", "following_url": "https://api.github.com/users/gkcalat/following{/other_user}", "gists_url": "https://api.github.com/users/gkcalat/gists{/gist_id}", "starred_url": "https://api.github.com/users/gkcalat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gkcalat/subscriptions", "organizations_url": "https://api.github.com/users/gkcalat/orgs", "repos_url": "https://api.github.com/users/gkcalat/repos", "events_url": "https://api.github.com/users/gkcalat/events{/privacy}", "received_events_url": "https://api.github.com/users/gkcalat/received_events", "type": "User", "site_admin": false }
[ { "login": "gkcalat", "id": 35157096, "node_id": "MDQ6VXNlcjM1MTU3MDk2", "avatar_url": "https://avatars.githubusercontent.com/u/35157096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gkcalat", "html_url": "https://github.com/gkcalat", "followers_url": "https://api.github.com/users/gkcalat/followers", "following_url": "https://api.github.com/users/gkcalat/following{/other_user}", "gists_url": "https://api.github.com/users/gkcalat/gists{/gist_id}", "starred_url": "https://api.github.com/users/gkcalat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gkcalat/subscriptions", "organizations_url": "https://api.github.com/users/gkcalat/orgs", "repos_url": "https://api.github.com/users/gkcalat/repos", "events_url": "https://api.github.com/users/gkcalat/events{/privacy}", "received_events_url": "https://api.github.com/users/gkcalat/received_events", "type": "User", "site_admin": false } ]
null
[ "@andresmascl, thank you for your contribution!\r\n\r\nAs these vulnerabilities are not marked as critical on GCR, we will keep this in backlog for now.", "Resolving as we have released v2.0.0" ]
"2022-08-16T20:26:18"
"2023-08-04T22:09:46"
"2023-08-04T22:09:46"
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Critical Vulnerabilities reported by `anchore` : CVE-2015-20107, CVE-2017-18342, CVE-2021-30473, CVE-2021-30474, CVE-2021-30475, GHSA-8q59-q68h-6hv4 * KFP version: `1.8.4` ### Materials and Reference ![image](https://user-images.githubusercontent.com/26497935/184978497-52f715b1-8715-4543-a58a-adec4030cf58.png) --- Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8153/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8153/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8141
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8141/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8141/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8141/events
https://github.com/kubeflow/pipelines/issues/8141
1,337,255,332
I_kwDOB-71UM5PtOWk
8,141
[feature] Print deep links outside of Jupyter notebooks
{ "login": "droctothorpe", "id": 24783969, "node_id": "MDQ6VXNlcjI0NzgzOTY5", "avatar_url": "https://avatars.githubusercontent.com/u/24783969?v=4", "gravatar_id": "", "url": "https://api.github.com/users/droctothorpe", "html_url": "https://github.com/droctothorpe", "followers_url": "https://api.github.com/users/droctothorpe/followers", "following_url": "https://api.github.com/users/droctothorpe/following{/other_user}", "gists_url": "https://api.github.com/users/droctothorpe/gists{/gist_id}", "starred_url": "https://api.github.com/users/droctothorpe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/droctothorpe/subscriptions", "organizations_url": "https://api.github.com/users/droctothorpe/orgs", "repos_url": "https://api.github.com/users/droctothorpe/repos", "events_url": "https://api.github.com/users/droctothorpe/events{/privacy}", "received_events_url": "https://api.github.com/users/droctothorpe/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[]
"2022-08-12T14:08:39"
"2022-09-09T00:00:30"
"2022-09-09T00:00:30"
CONTRIBUTOR
null
### Feature Area /area sdk ### What feature would you like to see? Currently, `client.py` has 5 blocks that look like this: ```python if self._is_ipython(): import IPython html = \ f'<a href="{self._get_url_prefix()}/#/experiments/details/{experiment.id}" target="_blank" >Experiment details</a>.' IPython.display.display(IPython.display.HTML(html)) ``` This is a really helpful feature but it's only available when interacting with the SDK from IPython. It would be nice (and trivial to implement) if links were printed outside of IPython. I'll submit a PR momentarily that implements this. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8141/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8141/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8133
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8133/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8133/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8133/events
https://github.com/kubeflow/pipelines/issues/8133
1,335,040,025
I_kwDOB-71UM5PkxgZ
8,133
[frontend] Inaccurate two-level dropdown selection
{ "login": "zpChris", "id": 7987279, "node_id": "MDQ6VXNlcjc5ODcyNzk=", "avatar_url": "https://avatars.githubusercontent.com/u/7987279?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zpChris", "html_url": "https://github.com/zpChris", "followers_url": "https://api.github.com/users/zpChris/followers", "following_url": "https://api.github.com/users/zpChris/following{/other_user}", "gists_url": "https://api.github.com/users/zpChris/gists{/gist_id}", "starred_url": "https://api.github.com/users/zpChris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zpChris/subscriptions", "organizations_url": "https://api.github.com/users/zpChris/orgs", "repos_url": "https://api.github.com/users/zpChris/repos", "events_url": "https://api.github.com/users/zpChris/events{/privacy}", "received_events_url": "https://api.github.com/users/zpChris/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-08-10T18:47:09"
"2022-08-10T18:48:52"
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: 2.0.0-alpha.3 ### Steps to reproduce 1. Create a run which has two Confusion Matrix artifacts of the same type on the same execution - One artifact has the name `Artifact ID #<id>`, and the other artifact has no name but an ID of `<id>` - These confusion matrices should be different - _Note: This may be difficult to accomplish, and is not presented here, but the issue is shown by understanding the underlying code and implementation._ 2. Go to the "Runs" tab on KFP 3. Select that run and one other, and click "Compare Runs" 4. Select the "Confusion Matrix" tab under "Metrics" 5. Select both confusion matrices and view the compare results ### Expected result The Confusion Matrices should be displayed as expected, but instead one of the confusion matrices will be shown twice. ### Materials and Reference On the two-panel layout for Confusion Matrices, HTML, and Markdown, we need to improve the differentiation between display name and ID, likely by adding an underlying value to each dropdown item (rather than requiring each name be unique). This is mentioned in [this GitHub discussion](https://github.com/kubeflow/pipelines/pull/7966#discussion_r913346458); essentially, if an Execution has a displayName of exact value β€œExecution ID #1” and another Execution has no displayName but has an ID of 1, we run into mis-identified artifacts and dropdown items due to the logic implemented at [getLinkedArtifactFromSelectedItem](https://github.com/kubeflow/pipelines/blob/e47a784035820c0d322c5e50562df676fd81ce4c/frontend/src/components/viewers/MetricsDropdown.tsx#L335-L357). --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8133/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8133/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8132
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8132/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8132/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8132/events
https://github.com/kubeflow/pipelines/issues/8132
1,335,029,616
I_kwDOB-71UM5Pku9w
8,132
[frontend] Dropdown and HTML / Markdown artifacts are misaligned
{ "login": "zpChris", "id": 7987279, "node_id": "MDQ6VXNlcjc5ODcyNzk=", "avatar_url": "https://avatars.githubusercontent.com/u/7987279?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zpChris", "html_url": "https://github.com/zpChris", "followers_url": "https://api.github.com/users/zpChris/followers", "following_url": "https://api.github.com/users/zpChris/following{/other_user}", "gists_url": "https://api.github.com/users/zpChris/gists{/gist_id}", "starred_url": "https://api.github.com/users/zpChris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zpChris/subscriptions", "organizations_url": "https://api.github.com/users/zpChris/orgs", "repos_url": "https://api.github.com/users/zpChris/repos", "events_url": "https://api.github.com/users/zpChris/events{/privacy}", "received_events_url": "https://api.github.com/users/zpChris/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[]
"2022-08-10T18:38:40"
"2023-01-18T10:29:44"
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: 2.0.0-alpha.3 ### Steps to reproduce 1. Go to the "Runs" tab on KFP 2. Select multiple runs which have HTML or Markdown artifacts, and click "Compare Runs" 3. Select the "HTML" or "Markdown" tab under "Metrics" 4. Select an artifact which has large width (see "Materials and Reference") ### Expected result The HTML and Markdown artifacts are not left-aligned on the two-panel layout along with the two-level dropdown component. ### Materials and Reference The HTML and Markdown artifacts are not left-aligned alongside the two-level dropdown component. ![HTMLMarkdownArtifactsMisAligned](https://user-images.githubusercontent.com/7987279/183990272-319f3eb5-6167-4837-b934-91acb2e95587.png) The below image shows the artifact is currently aligned in the center of the panel of this two-panel layout. ![HTMLMarkdownArtifactsCenterAligned](https://user-images.githubusercontent.com/7987279/183991015-7bb73563-44a1-4efd-b462-75201b7a7318.png) --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8132/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8132/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8131
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8131/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8131/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8131/events
https://github.com/kubeflow/pipelines/issues/8131
1,335,004,298
I_kwDOB-71UM5PkoyK
8,131
[frontend] Address KFPv2 Run Comparison page scalability
{ "login": "zpChris", "id": 7987279, "node_id": "MDQ6VXNlcjc5ODcyNzk=", "avatar_url": "https://avatars.githubusercontent.com/u/7987279?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zpChris", "html_url": "https://github.com/zpChris", "followers_url": "https://api.github.com/users/zpChris/followers", "following_url": "https://api.github.com/users/zpChris/following{/other_user}", "gists_url": "https://api.github.com/users/zpChris/gists{/gist_id}", "starred_url": "https://api.github.com/users/zpChris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zpChris/subscriptions", "organizations_url": "https://api.github.com/users/zpChris/orgs", "repos_url": "https://api.github.com/users/zpChris/repos", "events_url": "https://api.github.com/users/zpChris/events{/privacy}", "received_events_url": "https://api.github.com/users/zpChris/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-08-10T18:18:40"
"2022-08-11T18:25:26"
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: 2.0.0-alpha.3 ### Steps to reproduce 1. Go to the "Runs" tab on KFP 2. Select multiple runs which have many artifacts (for instance, some over 100+), and click "Compare Runs" ### Expected result You should see the page load all of the artifacts efficiently and accurately. Instead, the page currently does not load the artifacts past 100 per run, and the loading time will likely be slower in relation to comparing artifacts with few to no runs. ### Materials and Reference The main issue is shown in the `TODO` below: https://github.com/kubeflow/pipelines/blob/a7fddf000d5da254ee872574677a974e4ff4b4db/frontend/src/pages/CompareV2.tsx#L315-L316 However, there is also a broader issue of scalability; though we implemented scalable methods and designed the page to handle a large quantity of artifacts, we have not thoroughly tested this page against runs with 100s of artifacts. Although the design and loading states should be able to provide a nice UX, the scalability of this page is nevertheless an important question to answer. Lastly, this is also related to the issue of fetching very large HTML and Markdown files in an efficient manner, as the KFPv2 Run Comparison page will need to do this repeatedly. The existing implementation and `TODO` for this is available here: https://github.com/kubeflow/pipelines/blob/a7fddf000d5da254ee872574677a974e4ff4b4db/frontend/src/components/viewers/MetricsVisualizations.tsx#L890 --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8131/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8131/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8130
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8130/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8130/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8130/events
https://github.com/kubeflow/pipelines/issues/8130
1,334,994,521
I_kwDOB-71UM5PkmZZ
8,130
[frontend] Clarify the two-level dropdown naming schema
{ "login": "zpChris", "id": 7987279, "node_id": "MDQ6VXNlcjc5ODcyNzk=", "avatar_url": "https://avatars.githubusercontent.com/u/7987279?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zpChris", "html_url": "https://github.com/zpChris", "followers_url": "https://api.github.com/users/zpChris/followers", "following_url": "https://api.github.com/users/zpChris/following{/other_user}", "gists_url": "https://api.github.com/users/zpChris/gists{/gist_id}", "starred_url": "https://api.github.com/users/zpChris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zpChris/subscriptions", "organizations_url": "https://api.github.com/users/zpChris/orgs", "repos_url": "https://api.github.com/users/zpChris/repos", "events_url": "https://api.github.com/users/zpChris/events{/privacy}", "received_events_url": "https://api.github.com/users/zpChris/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-08-10T18:10:12"
"2022-08-10T18:32:09"
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: 2.0.0-alpha.3 ### Steps to reproduce 1. Go to the "Runs" tab on KFP 2. Select multiple runs which have Confusion Matrices, and click "Compare Runs" 3. Select the "Confusion Matrix" tab under "Metrics" 4. Click the two-level dropdown ### Expected result You should see the two-level dropdown with some explanation as to the naming schema, but no headers or explanation for this component are provided. ### Materials and Reference The below image shows the existing two-level dropdown used for Confusion Matrices, HTML, and Markdown on the two-panel layout of the KFPv2 Run Comparison page. ![TwoLevelDropdownNamingSchema](https://user-images.githubusercontent.com/7987279/183985460-4128058f-7f96-4138-80af-e5e49c9700af.png) This below image shows one proposed solution to clarify the two-level dropdown naming schema. This styling is not finalized, and another solution would be to add a text explanation above the dropdown component itself. ![TwoLevelDropdownHeaders](https://user-images.githubusercontent.com/7987279/183985530-8691dd95-a5c5-4b2f-a61d-98d51a1499f7.png) --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8130/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8130/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8129
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8129/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8129/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8129/events
https://github.com/kubeflow/pipelines/issues/8129
1,334,987,608
I_kwDOB-71UM5PkktY
8,129
[frontend] Confusion matrix unnecessary spacing
{ "login": "zpChris", "id": 7987279, "node_id": "MDQ6VXNlcjc5ODcyNzk=", "avatar_url": "https://avatars.githubusercontent.com/u/7987279?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zpChris", "html_url": "https://github.com/zpChris", "followers_url": "https://api.github.com/users/zpChris/followers", "following_url": "https://api.github.com/users/zpChris/following{/other_user}", "gists_url": "https://api.github.com/users/zpChris/gists{/gist_id}", "starred_url": "https://api.github.com/users/zpChris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zpChris/subscriptions", "organizations_url": "https://api.github.com/users/zpChris/orgs", "repos_url": "https://api.github.com/users/zpChris/repos", "events_url": "https://api.github.com/users/zpChris/events{/privacy}", "received_events_url": "https://api.github.com/users/zpChris/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-08-10T18:03:41"
"2022-08-10T18:31:49"
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: 2.0.0-alpha.3 ### Steps to reproduce 1. Go to the "Runs" tab on KFP 2. Select multiple runs which have Confusion Matrices, and click "Compare Runs" 3. Select the "Confusion Matrix" tab under "Metrics" 4. Select any confusion matrix ### Expected result You should see the confusion matrix with correct formatting, but it is instead surrounded by some unused space. ### Materials and Reference The below image shows the space in question which is unused, and could be removed so the scroll bar has a more accurate state. ![ConfusionMatrixSpacing](https://user-images.githubusercontent.com/7987279/183982743-2c1cf008-d05d-43fd-b691-c640ad4fda6f.png) This should be achievable by conditionally removing padding on the component here: https://github.com/kubeflow/pipelines/blob/bdff332ac69db0589c8b9eb55e7cdf7e3bee710d/frontend/src/components/viewers/MetricsVisualizations.tsx#L755-L767 --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8129/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8129/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8126
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8126/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8126/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8126/events
https://github.com/kubeflow/pipelines/issues/8126
1,333,948,492
I_kwDOB-71UM5PgnBM
8,126
[feature] Implement comprehensive unit tests for the KFP SDK Client class
{ "login": "droctothorpe", "id": 24783969, "node_id": "MDQ6VXNlcjI0NzgzOTY5", "avatar_url": "https://avatars.githubusercontent.com/u/24783969?v=4", "gravatar_id": "", "url": "https://api.github.com/users/droctothorpe", "html_url": "https://github.com/droctothorpe", "followers_url": "https://api.github.com/users/droctothorpe/followers", "following_url": "https://api.github.com/users/droctothorpe/following{/other_user}", "gists_url": "https://api.github.com/users/droctothorpe/gists{/gist_id}", "starred_url": "https://api.github.com/users/droctothorpe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/droctothorpe/subscriptions", "organizations_url": "https://api.github.com/users/droctothorpe/orgs", "repos_url": "https://api.github.com/users/droctothorpe/repos", "events_url": "https://api.github.com/users/droctothorpe/events{/privacy}", "received_events_url": "https://api.github.com/users/droctothorpe/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hi @droctothorpe feel free to contribute ? JFYI, here're some example code on how we tested the v2 Vertex client (which was moved out of KFP package in the master branch): https://github.com/kubeflow/pipelines/tree/sdk/release-1.8/sdk/python/kfp/v2/google/client" ]
"2022-08-10T01:28:47"
"2022-08-11T22:54:28"
null
CONTRIBUTOR
null
### Feature Area /area sdk ### What feature would you like to see? We would like to see comprehensive unit tests for the KFP SDK [Client](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/client/client.py#L82) class to improve confidence in code changes. The existing coverage looks [quite sparse](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/client/client_test.py). My team and I would be interested in contributing this to the upstream but want to make sure that we're not stepping on anyone's toes / it makes sense to core maintainers. ### What is the use case or pain point? Reduced confidence in code changes / releases. ### Is there a workaround currently? No. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8126/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8126/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8125
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8125/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8125/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8125/events
https://github.com/kubeflow/pipelines/issues/8125
1,333,944,091
I_kwDOB-71UM5Pgl8b
8,125
[feature]Add a KubeFlow pipeline for TensorFlowJS/JavaScript
{ "login": "shivaylamba", "id": 19529592, "node_id": "MDQ6VXNlcjE5NTI5NTky", "avatar_url": "https://avatars.githubusercontent.com/u/19529592?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shivaylamba", "html_url": "https://github.com/shivaylamba", "followers_url": "https://api.github.com/users/shivaylamba/followers", "following_url": "https://api.github.com/users/shivaylamba/following{/other_user}", "gists_url": "https://api.github.com/users/shivaylamba/gists{/gist_id}", "starred_url": "https://api.github.com/users/shivaylamba/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shivaylamba/subscriptions", "organizations_url": "https://api.github.com/users/shivaylamba/orgs", "repos_url": "https://api.github.com/users/shivaylamba/repos", "events_url": "https://api.github.com/users/shivaylamba/events{/privacy}", "received_events_url": "https://api.github.com/users/shivaylamba/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hi @shivaylamba, KFP support running containerized app, meaning you can always package your code into a container, and we support running any container as a KFP component. Would that work for your case?", "/cc @zichuan-scott-xu " ]
"2022-08-10T01:20:30"
"2022-08-11T22:50:16"
null
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> /area javascript /area samples /area pipelines ### What feature would you like to see? We would like to see a dedicated pipeline for TensorFlowJS or even a pipeline for JavaScript. Today TensorFlowJS is being used in a lot of production use cases. And it makes sense to have dedicated KubeFlow Pipeline for JavaScript/NodeJS. ### What is the use case or pain point? Today TensorFlowJS is being used in a lot of production use cases. But we don't have any supported KubeFlow pipeline for JavaScript. ### Is there a workaround currently? Today there isn't a KubeFlow pipeline supported for TensorFlowJS / NodeJS. The workaround could be to have a self serve container. But it would be great to have a dedicated KubeFlow pipeline. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8125/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8125/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8121
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8121/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8121/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8121/events
https://github.com/kubeflow/pipelines/issues/8121
1,333,774,038
I_kwDOB-71UM5Pf8bW
8,121
[frontend] Table sort mechanism broken on multiple tabs
{ "login": "zpChris", "id": 7987279, "node_id": "MDQ6VXNlcjc5ODcyNzk=", "avatar_url": "https://avatars.githubusercontent.com/u/7987279?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zpChris", "html_url": "https://github.com/zpChris", "followers_url": "https://api.github.com/users/zpChris/followers", "following_url": "https://api.github.com/users/zpChris/following{/other_user}", "gists_url": "https://api.github.com/users/zpChris/gists{/gist_id}", "starred_url": "https://api.github.com/users/zpChris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zpChris/subscriptions", "organizations_url": "https://api.github.com/users/zpChris/orgs", "repos_url": "https://api.github.com/users/zpChris/repos", "events_url": "https://api.github.com/users/zpChris/events{/privacy}", "received_events_url": "https://api.github.com/users/zpChris/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[]
"2022-08-09T20:50:06"
"2022-08-09T20:50:06"
null
CONTRIBUTOR
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: `HEAD` ### Steps to reproduce 1. Go to the `Runs`, `Artifacts`, or `Executions` tab on KFP 2. Try to sort by any of the desired and sortable columns ### Expected result You should see the table sort itself based on that column, but either nothing changes or the table contents disappear. ### Materials and Reference The below video shows how the sorting mechanism works on the `Pipelines`, `Experiments`, and `Recurring Runs` tabs, but does not work on the `Runs`, `Artifacts`, or `Executions` tabs. https://user-images.githubusercontent.com/7987279/183758095-4b937e5e-b7fb-4610-b763-e600b90beef7.mp4 --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8121/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8121/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8119
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8119/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8119/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8119/events
https://github.com/kubeflow/pipelines/issues/8119
1,333,206,696
I_kwDOB-71UM5Pdx6o
8,119
[sdk] kfp==2.0.0b1 is incompatible with google-cloud-pipeline-components
{ "login": "GytisBraz", "id": 106960916, "node_id": "U_kgDOBmAYFA", "avatar_url": "https://avatars.githubusercontent.com/u/106960916?v=4", "gravatar_id": "", "url": "https://api.github.com/users/GytisBraz", "html_url": "https://github.com/GytisBraz", "followers_url": "https://api.github.com/users/GytisBraz/followers", "following_url": "https://api.github.com/users/GytisBraz/following{/other_user}", "gists_url": "https://api.github.com/users/GytisBraz/gists{/gist_id}", "starred_url": "https://api.github.com/users/GytisBraz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GytisBraz/subscriptions", "organizations_url": "https://api.github.com/users/GytisBraz/orgs", "repos_url": "https://api.github.com/users/GytisBraz/repos", "events_url": "https://api.github.com/users/GytisBraz/events{/privacy}", "received_events_url": "https://api.github.com/users/GytisBraz/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hi @GytisBraz , this is a known issue and we're working on migrating `google-cloud-pipeline-components` to be compatible with kfp 2.0.", "any updates on this? Also will there be any breaking changes?", "Interested a solution", "Any updates? This incompatibility is impacting me quite badly as a new user. We adopted using GCPC in our pipelines because they are a great convenience and a important feature of the platform which locks us to KFP < 1.8.19. But now I am unable to save components to the Vertex Template or Artifact store as those seem to require KFP > 2.0.0", "@xRagnorokx Ive been using google-cloud-components==2.0.0b1 and it seems to be working ok ", "But these can be yanked or updated at any time right? So I have to use both a unstable version of KFP and a unstable version of GCPC in production to make use of Vertex's full feature suite?", "@xRagnorokx yes - unfortunately, from what I can tell " ]
"2022-08-09T12:45:53"
"2023-04-05T17:04:05"
null
NONE
null
### Environment * KFP version: `2.0.0b1` <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> ### Steps to reproduce `pip3 install kfp==2.0.0b1` followed by `pip3 install google-cloud-pipeline-components` ### Expected result I expect both packages to be compatible. `google-cloud-pipeline-components` is an [SDK](https://pypi.org/project/google-cloud-pipeline-components/) for interacting with Vertex AI, and `kfp>=2.0.0b1` is the version that is suggested/needed to work with Vertex AI - link [here](https://cloud.google.com/vertex-ai/docs/pipelines/create-pipeline-template#before-you-begin) The issue seems to stems from this [line](https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/dependencies.py#L24) --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8119/reactions", "total_count": 7, "+1": 7, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8119/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8117
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8117/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8117/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8117/events
https://github.com/kubeflow/pipelines/issues/8117
1,332,686,916
I_kwDOB-71UM5PbzBE
8,117
[sdk] SlicedClassificationMetrics Artifact load_confusion_matrix has incorrect call
{ "login": "hollyhutson", "id": 52940937, "node_id": "MDQ6VXNlcjUyOTQwOTM3", "avatar_url": "https://avatars.githubusercontent.com/u/52940937?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hollyhutson", "html_url": "https://github.com/hollyhutson", "followers_url": "https://api.github.com/users/hollyhutson/followers", "following_url": "https://api.github.com/users/hollyhutson/following{/other_user}", "gists_url": "https://api.github.com/users/hollyhutson/gists{/gist_id}", "starred_url": "https://api.github.com/users/hollyhutson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hollyhutson/subscriptions", "organizations_url": "https://api.github.com/users/hollyhutson/orgs", "repos_url": "https://api.github.com/users/hollyhutson/repos", "events_url": "https://api.github.com/users/hollyhutson/events{/privacy}", "received_events_url": "https://api.github.com/users/hollyhutson/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-08-09T04:17:56"
"2022-08-11T22:45:43"
null
NONE
null
### Environment * KFP version: N/A * KFP SDK version: `1.8.13` ### Steps to reproduce Use a `SlicedClassificationMetrics` Artifact and try to call `load_confusion_matrix`. Internally this calls `log_confusion_matrix_cell` on the `ClassificationMetrics` object instead of `log_confusion_matrix`, which results in a type error: `TypeError: log_confusion_matrix_cell() missing 1 required positional argument: 'value'` ### Expected result Calling `load_confusion_matrix` on `SlicedClassificationMetrics` doesn't break, and correctly adds the confusion matrix to the right slice in the metadata. ### Materials and Reference Full reproducible sample: ```python from kfp.v2.dsl import component, Output, SlicedClassificationMetrics from kfp.v2 import compiler import kfp @component() def simple_metric_op(sliced_eval_metrics: Output[SlicedClassificationMetrics]): sliced_eval_metrics._sliced_metrics = {} sliced_eval_metrics.load_confusion_matrix('a slice', categories=['cat1', 'cat2'], matrix=[[1,0],[2,4]] ) @kfp.dsl.pipeline(name="test-sliced-metric") def metric_test_pipeline(): m_op = simple_metric_op() compiler.Compiler().compile( pipeline_func=metric_test_pipeline, package_path="evaluation_pipeline.json", ) ``` See the incorrect line of code here: https://github.com/kubeflow/pipelines/blob/061905b6df397c40fbcc4ffafa24d7b3b9daf439/sdk/python/kfp/components/types/artifact_types.py#L452-L464 Where `self._sliced_metrics[slice].log_confusion_matrix_cell( categories, matrix)` Should be `self._sliced_metrics[slice].log_confusion_matrix( categories, matrix)` Since `self._sliced_metrics[slice]` is of type `ClassificationMetrics` after the call to `self._upsert_classification_metrics_for_slice(slice)` As an aside - `_sliced_metrics` is never initialised in the class, leaving it up to the user of the Artifact class, which isn't very intuitive. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8117/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8117/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8111
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8111/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8111/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8111/events
https://github.com/kubeflow/pipelines/issues/8111
1,332,053,763
I_kwDOB-71UM5PZYcD
8,111
[frontend] Artifact API Does not support S3 Regions
{ "login": "kdubovikov", "id": 832185, "node_id": "MDQ6VXNlcjgzMjE4NQ==", "avatar_url": "https://avatars.githubusercontent.com/u/832185?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kdubovikov", "html_url": "https://github.com/kdubovikov", "followers_url": "https://api.github.com/users/kdubovikov/followers", "following_url": "https://api.github.com/users/kdubovikov/following{/other_user}", "gists_url": "https://api.github.com/users/kdubovikov/gists{/gist_id}", "starred_url": "https://api.github.com/users/kdubovikov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kdubovikov/subscriptions", "organizations_url": "https://api.github.com/users/kdubovikov/orgs", "repos_url": "https://api.github.com/users/kdubovikov/repos", "events_url": "https://api.github.com/users/kdubovikov/events{/privacy}", "received_events_url": "https://api.github.com/users/kdubovikov/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[ { "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false } ]
null
[ "My bad, I've found that it actually uses `AWS_REGION` variable here: https://github.com/kubeflow/pipelines/blob/e8abec24fed4c4f8be6f527207b1cec9811ce3e7/frontend/server/configs.ts#L129. Will add that to deployment to test if it works", "It works as expected if I specify `AWS_REGION`, `AWS_SECRET_ACCESS_KEY`, and `AWS_ACCESS_KEY_ID `", "Actually, I think that there is still an issue when we use multi-profile setup. The problem lies in the `ml-pipeline-ui-artifact` deployment which is automatically created for each profile. \r\n\r\nHere, https://github.com/kubeflow/pipelines/blob/a0a8f1da8cb7ca53cde7717aa78e666b634fec75/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py#L304 we sync only `MINIO_*` variables. However, when using s3, this code suggests that we should provide `AWS_REGION`, `AWS_SECRET_ACCESS_KEY`, and `AWS_ACCESS_KEY_ID`.\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/e8abec24fed4c4f8be6f527207b1cec9811ce3e7/frontend/server/configs.ts#L129\r\n\r\nThat means that we should add those variables to `pipelines-profile-controller`. When I add them manually to the `ml-pipeline-ui-artifact` UI fetches all s3 artifacts in the Pipeline UI as expected.\r\n\r\nAnother possible solution would be to add `MINIO_REGION` variable mapping here istead of modifying `pipelines-profile-controller` \r\nhttps://github.com/kubeflow/pipelines/blob/e8abec24fed4c4f8be6f527207b1cec9811ce3e7/frontend/server/configs.ts#L139. Although, I am not entirely sure that it will work.", "Hi @kdubovikov @chensun,\r\n\r\nAlso running into this problem after switching Artifact Store persistence to S3.\r\n\r\nWondering if you've seen the latest **MinioJS v7.0.27** with _Assume Web Identity Role_ support https://github.com/minio/minio-js/pull/960 ?\r\n\r\nThis could be part of the fix, without needing any workaround if I'm not mistaken. \r\n\r\n\r\n" ]
"2022-08-08T15:39:02"
"2022-09-01T13:57:30"
null
NONE
null
<img width="1727" alt="Screenshot 2022-08-08 at 18 16 17" src="https://user-images.githubusercontent.com/832185/183455928-0c6c6c10-da25-4c2b-b19f-13573e43bafe.png"> <img width="1719" alt="Screenshot 2022-08-08 at 18 16 02" src="https://user-images.githubusercontent.com/832185/183455906-ba856df3-d657-4bbb-922f-572c7faf2aae.png"> Artifact fetching API does not seem to support region for s3 resources. If we look here, https://github.com/kubeflow/pipelines/blob/e8abec24fed4c4f8be6f527207b1cec9811ce3e7/frontend/server/minio-helper.ts#L50, the region parameter is not being passed and it is not a part of the artefact API as well. However, MINIO Client API allows you to specify region when creating a client: https://docs.min.io/docs/javascript-client-api-reference.html. I think API should fetch default region using `MINIO_REGION` environment variable which should be passed as a part of `ml-pipeline-ui-artifact` k8s deployment `env` map. ### Environment * How did you deploy Kubeflow Pipelines (KFP)? [kubeflow-manifests](https://github.com/awslabs/kubeflow-manifests) * KFP version: 1.5.1 ### Steps to reproduce 1. Launch any pipeline 2. Go to Run UI 3. Try to download any output 4. Observe the error: Failed to get object in bucket [bucket] at path [path]: S3Error: The authorization header is malformed; the region 'us-east-1' is wrong; expecting '[actual bucket region]' ### Expected result <!-- What should the correct behavior be? --> ### Materials and Reference
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8111/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8111/timeline
null
reopened
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8104
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8104/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8104/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8104/events
https://github.com/kubeflow/pipelines/issues/8104
1,328,941,966
I_kwDOB-71UM5PNguO
8,104
[feature] Make managing the artifact storage and cache available in the UI
{ "login": "TobiasGoerke", "id": 13769461, "node_id": "MDQ6VXNlcjEzNzY5NDYx", "avatar_url": "https://avatars.githubusercontent.com/u/13769461?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TobiasGoerke", "html_url": "https://github.com/TobiasGoerke", "followers_url": "https://api.github.com/users/TobiasGoerke/followers", "following_url": "https://api.github.com/users/TobiasGoerke/following{/other_user}", "gists_url": "https://api.github.com/users/TobiasGoerke/gists{/gist_id}", "starred_url": "https://api.github.com/users/TobiasGoerke/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TobiasGoerke/subscriptions", "organizations_url": "https://api.github.com/users/TobiasGoerke/orgs", "repos_url": "https://api.github.com/users/TobiasGoerke/repos", "events_url": "https://api.github.com/users/TobiasGoerke/events{/privacy}", "received_events_url": "https://api.github.com/users/TobiasGoerke/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Is it also possible to do this from the runs page? E.g. if you delete a run it tries to delete the artifacts and if you delete an experiment it deletes all runs and therefore all corresponding artifacts. This is what most datascientists expect when they delete a run. You are using ml-metadata which is not namespace isolated yet and therefore 100 % insecure https://github.com/kubeflow/pipelines/issues/4790. I am also to open to have it in the insecure ml-metadata part as an addition, but it should be at least available in the currently secure parts, that are structured by runs and experiments.\r\n\r\nThe API belongs in the apiserver and as suggested above we could implement this without UI or API schema changes. We just have to modify the delete run API call to call a new function delete_cache(run) at the end that deletes the corresponding artifacts from the database and or minio as shown in my PR. This is what most datascientists expect when they delete a run. \r\n\r\nIf desired we can also expose in addition a proper AUTHENTICATED https://github.com/kubeflow/pipelines/pull/7819 API delete_cache(run), invalidate_cache(run) for more specialized approaches and the invalidate/delete button in the UI . Then everybody can also automate it as desired. And last but not least maybe environmant variables DELETE_CACHE_ON_RUN_DELETION=TRUE and or INVALIDATE_CACHE_ON_RUN_DELETION=TRUE in pipeline-install-config that are mapped to and respected by the apiserver. \r\n\r\nThis approach covers the basic needs for most users and provides flexibility for advanced usecases.\r\n\r\n@chensun @difince @TobiasGoerke what do you think?", "Any way users can delete artifacts and invalidate caches in the UI is fine for me. Your suggestion to not offer these features separately might even be more user-friendly. As you mentioned, your approach wouldn't require modifying the frontend and is more secure (for now, at least).\r\n\r\nHowever, what about multiple runs sharing the same cache? Deleting any older run would force newer runs to lose their shared cache / artifacts, too\r\n", "> Any way users can delete artifacts and invalidate caches in the UI is fine for me. Your suggestion to not offer these features separately might even be more user-friendly. As you mentioned, your approach wouldn't require modifying the frontend and is more secure (for now, at least).\r\n> \r\n> However, what about multiple runs sharing the same cache? Deleting any older run would force newer runs to lose their shared cache / artifacts, too\r\n\r\nIs this a real problem? The same happens if you would invalidate a single artifact from the ml-metadata UI. It can always affect later runs that used the same artifact as cache.\r\n\r\nYou should definitely present it here Kubeflow Pipelines Community Meeting (PST AM)\r\nhttps://meet.google.com/jkr-dupp-wwm and register it here https://docs.google.com/document/d/1cHAdK1FoGEbuQ-Rl6adBDL5W2YpDiUbnMLIwmoXBoAU/edit first\r\n\r\nMaybe you can reach @chensun or @zijianjoy on slack to discuss it with them first.\r\n", "> E.g. if you delete a run it tries to delete the artifacts and if you delete an experiment it deletes all runs and therefore all corresponding artifacts. \r\n\r\nThis does not happen really, doesn't it? \r\nI have opened a few issues you may find relevant in some ways to the current issue:\r\n- [DeletePipeline does not clean PipelienVersion's data from Minio](https://github.com/kubeflow/pipelines/issues/7368)\r\n- [DeleteExperiment does not clean up all relevant children objects from the DB](https://github.com/kubeflow/pipelines/issues/7982)\r\n\r\nYour two approaches make sense to me, but I'm leaning more toward what Julius suggests because of the existing infrastructure - security /namespace isolation... \r\nBut just to add that for the first time, I took a look at the ml metadata so .. :) \r\n", "> > E.g. if you delete a run it tries to delete the artifacts and if you delete an experiment it deletes all runs and therefore all corresponding artifacts.\r\n> \r\n> This does not happen really, doesn't it? I have opened a few issues you may find relevant in some ways to the current issue:\r\n> \r\n> * [DeletePipeline does not clean PipelienVersion's data from Minio](https://github.com/kubeflow/pipelines/issues/7368)\r\n> * [DeleteExperiment does not clean up all relevant children objects from the DB](https://github.com/kubeflow/pipelines/issues/7982)\r\n> \r\n> Your two approaches make sense to me, but I'm leaning more toward what Julius suggests because of the existing infrastructure - security /namespace isolation... But just to add that for the first time, I took a look at the ml metadata so .. :)\r\n\r\nThanks for your reply, @difince. I've also been inclined to implement @juliusvonkohout's idea. However, turns out it is impossible to deduce cache entries in the db from runs. There simply is no information that could be used to match these. In case you have an idea how this could work, I'd be very glad to hear it!\r\n\r\nGiven this limitation, I've decided to take a different approach and simply make disabling caching entries available in the UI (see [here](https://github.com/kubeflow/pipelines/pull/8177). While this doesn't delete database entries once they were created, there are other open PRs, that should take care of overrunning databases.", "I discussed with @TobiasGoerke that we go for a disable cache switch in the pipeline run UI and a slightly modified version of https://github.com/kubeflow/pipelines/issues/7939#issuecomment-1193910281 . If the maximum cache staleness leads to an empty list from https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L62 then we should scan the database and delete all entries older than max_cache_staleness. This ensures that it is still performant in very large installations and solves another long standing issue of an indefinitely growing cachedb.\r\n\r\nSo TWO environment variables for the cache-server that provide a default (if the pipeline does not have values) AND maximum (larger values in the pipeline are ignored) cache staleness. Then an administrator can set the expiration date on his Minio/S3/GCS storage backend to the same value as the maximum cache staleness and provide a sensible staleness default value for its users pipelines. We limit the user-set value int he pipeline definition to the maximum value from the administrator. The users also do not need to recompile existing pipelines anymore because they can disable the cache from the UI. I think setting the exact cache duration from the UI is overkill and a disable/enable switch is enough. \r\n\r\nI think this covers most usecases, is independent of the storage backend and rather easy to implement. Tobias Goerke already has a POC https://github.com/kubeflow/pipelines/compare/master...TobiasGoerke:pipelines:master https://github.com/kubeflow/pipelines/pull/8177 for the UI change.\r\n\r\n@chensun this is also very much in line what google wants or do you think different?\r\n\r\n" ]
"2022-08-04T17:05:33"
"2022-08-23T09:20:52"
null
NONE
null
I'd like users to be able to manage the artifacts their pipelines have created. This involves disabling or invalidating the cache for certain artifacts so that a pipeline can be re-executed without having to touch code. I've first mentioned this feature [here](https://github.com/kubeflow/pipelines/issues/7939#issuecomment-1204986272)(#7939) and posted the following GIF, exemplifying how such a feature could look like. ![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif) Also, [I've created a design document](https://docs.google.com/document/d/1WdPjTgoDj5AdPNYVzqYwVIWKsWjPPphYD3Ck4DFxSLA/edit#heading=h.c1mjwae174cw) that addresses all details. If you like the feature (or don't), please leave comments here on directly in the document. ### Feature Area /area frontend /area backend /area components --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8104/reactions", "total_count": 7, "+1": 7, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8104/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8103
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8103/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8103/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8103/events
https://github.com/kubeflow/pipelines/issues/8103
1,328,918,911
I_kwDOB-71UM5PNbF_
8,103
[backend] JSONDecodeError: in BigQuery `bigquery_create_model_job` execution
{ "login": "eshaingle", "id": 73927429, "node_id": "MDQ6VXNlcjczOTI3NDI5", "avatar_url": "https://avatars.githubusercontent.com/u/73927429?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eshaingle", "html_url": "https://github.com/eshaingle", "followers_url": "https://api.github.com/users/eshaingle/followers", "following_url": "https://api.github.com/users/eshaingle/following{/other_user}", "gists_url": "https://api.github.com/users/eshaingle/gists{/gist_id}", "starred_url": "https://api.github.com/users/eshaingle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eshaingle/subscriptions", "organizations_url": "https://api.github.com/users/eshaingle/orgs", "repos_url": "https://api.github.com/users/eshaingle/repos", "events_url": "https://api.github.com/users/eshaingle/events{/privacy}", "received_events_url": "https://api.github.com/users/eshaingle/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hi Team,\r\nany suggestions here?" ]
"2022-08-04T16:47:19"
"2022-08-08T15:36:08"
null
NONE
null
### What steps did you take ### Executing big query create model functionality through pipeline using image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.13 command: [python3, -u, -m, google_cloud_pipeline_components.container.v1.gcp_launcher.launcher] with type: `BigqueryCreateModelJob`, Also followed the component.yaml format as mentioned here: https://github.com/kubeflow/pipelines/blob/google-cloud-pipeline-components-1.0.13/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/create_model/component.yaml I get below error when executing the pipeline: `Traceback (most recent call last): File "/opt/python3.7/lib/python3.7/site-packages/google_cloud_pipeline_components/container/v1/gcp_launcher/bigquery_job_remote_runner.py", line 332, in bigquery_create_model_job artifact_util.update_output_artifacts(executor_input, [bqml_model_artifact]) File "/opt/python3.7/lib/python3.7/site-packages/google_cloud_pipeline_components/container/v1/gcp_launcher/utils/artifact_util.py", line 51, in update_output_artifacts executor_input_json = json.loads(executor_input) File "/opt/python3.7/lib/python3.7/json/__init__.py", line 348, in loads return _default_decoder.decode(s) File "/opt/python3.7/lib/python3.7/json/decoder.py", line 337, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/opt/python3.7/lib/python3.7/json/decoder.py", line 353, in raw_decode obj, end = self.scan_once(s, idx) json.decoder.JSONDecodeError: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)` This is because of the `--executor_input, "{{$}}",` param in the component.yaml file It is not an input parameter so it should take and proceed but it fails. ### What did you expect to happen: ### That the pipeline would run successfully Please provide any suggestion/solution how to bypass the error and make the pipeline run success
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8103/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8103/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8097
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8097/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8097/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8097/events
https://github.com/kubeflow/pipelines/issues/8097
1,326,753,840
I_kwDOB-71UM5PFKgw
8,097
[sdk] Compilation with V2_COMPATIBLE mode fails when inputPath/outputPath name contains an empty space
{ "login": "ysk24ok", "id": 3449164, "node_id": "MDQ6VXNlcjM0NDkxNjQ=", "avatar_url": "https://avatars.githubusercontent.com/u/3449164?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ysk24ok", "html_url": "https://github.com/ysk24ok", "followers_url": "https://api.github.com/users/ysk24ok/followers", "following_url": "https://api.github.com/users/ysk24ok/following{/other_user}", "gists_url": "https://api.github.com/users/ysk24ok/gists{/gist_id}", "starred_url": "https://api.github.com/users/ysk24ok/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ysk24ok/subscriptions", "organizations_url": "https://api.github.com/users/ysk24ok/orgs", "repos_url": "https://api.github.com/users/ysk24ok/repos", "events_url": "https://api.github.com/users/ysk24ok/events{/privacy}", "received_events_url": "https://api.github.com/users/ysk24ok/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hi @ysk24ok. We no longer support v2-compatible mode. If you'd like to use this compilation style, please consider using the official KFP v2 pre-release (`pip install kfp --pre`). Otherwise, please use a name without a space for v2 compatible mode.", "> please use a name without a space for v2 compatible mode. \r\n\r\n@connor-mccarthy This is not always possible, looking at the katib component, it currently does not work for V2 Compatible mode due to spaces in the input/outputs https://github.com/kubeflow/pipelines/blob/b9a052319e31385f90eb6c8704474376856d3900/components/kubeflow/katib-launcher/component.yaml#L21 \r\n \r\nIn our case we're using Kubeflow 1.4.1, V2 engine is not supported and we don't want to use V1 Legacy mode so we're sort of stuck in the middle and have to use V2 Compatible Mode. \r\n \r\nWe'd love to upgrade KFP to 2.0 beta but it looks like Instructions on how to upgrade KFP backend for existing Kubeflow installation are Google Cloud Platform specific. \r\nWe're not on GCP and for everyone not on GCP (eg: AWS or Azure etc), we're sort of left with no solution.\r\n \r\nIdeally there should be clear instructions available for any installation, otherwise it makes everyone not on GCP in a sticky situation IMO.", "Thanks for explaining @AlexandreBrown. I'm reopening and labelling this as a KFP SDK feature request to handle the spaces issue.\r\n\r\n@zijianjoy, do you have any more general KFP BE upgrade instructions that you can provide to @AlexandreBrown?", "Hello @AlexandreBrown , can you share more about what your kubernetes environment is (tool and k8s version) and which cloud provider you are using?\r\n\r\nIn general, you can follow https://www.kubeflow.org/docs/components/pipelines/installation/standalone-deployment/#upgrading-kubeflow-pipelines which is platform agnostic. But because you are using full Kubeflow, you need to review the difference between new version and existing version of KFP before upgrading KFP in full kubeflow. \r\n\r\nFinally, for any upgrade instruction for full Kubeflow, you can issue request in https://github.com/kubeflow/manifests#kubeflow-manifests. And feel free to contribute writing upgrade instruction too!", "Hi @zijianjoy , we're using Kubernetes 1.21, running Kubeflow 1.4.1 on AWS (EKS), I believe this version of Kubeflow came with KFP backend 1.8.2, here is the dependencies we have in our notebooks atm : \r\n![image](https://user-images.githubusercontent.com/26939775/183143042-d4570785-a9dd-4e48-bf43-97d6f056f6a7.png)\r\n \r\n@surajkota Can you comment on whether it is safe to follow the standalone-deployment instructions to upgrade KFP for Kubeflow deployed on AWS (using RDS-S3-Cognito integrations) ? ", "@AlexandreBrown For AWS, this is how you deploy vanilla Kubeflow on EKS https://awslabs.github.io/kubeflow-manifests/docs/deployment/vanilla/guide/#kubeflow-pipelines, so you just need to change the version to KFP 1.8.3 and compare the diff. If you are using AWS distribution, then @surajkota can provide more info about it.\r\n\r\nNote that current Kubeflow 1.4.1 won't work on Kubernetes 1.22+. Once Kubeflow 1.6 is released, it will be able to support Kubernetes 1.22+", "@AlexandreBrown no, you cannot use standalone deployment instructions to upgrade KFP when using Kubeflow. KFP deployed with a Kubeflow installation is multi user flavour which depends on other components like Istio, cert-manager etc. Kubeflow-1.4 installs KFP-1.7. @zijianjoy to use v2 and address this issue, user would have to install v2.0-alpha correct?\r\n\r\nUnfortunately as of today I dont think there is clear guidance on Kubeflow upgrade or if you can just upgrade KFP component in particular Kubeflow versions", "@surajkota That is right, for using V2 feature, you need to start using KFP v2.0.0-alpha or above. \r\n\r\nIn general, you maintain a copy of Kubeflow manifests locally, whenever you want to upgrade a kubeflow component, like KFP. You can compare the diff between old version and new version, and use tools like kpt, helm chart, etc. to perform upgrade." ]
"2022-08-03T06:42:59"
"2022-08-24T05:37:13"
null
CONTRIBUTOR
null
### Environment * KFP version: <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP SDK version: 1.8.13 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * All dependencies version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> ``` kfp 1.8.13 kfp-pipeline-spec 0.1.16 kfp-server-api 1.8.3 ``` ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ```py import kfp from kfp.v2 import dsl from kfp.v2.dsl import component from kfp.v2.dsl import Artifact, Output @component def file_generator(path: Output[Artifact]): with open(path, "w") as f: f.write("Hello World!") input_path_op = kfp.components.load_component_from_text(""" name: Input path consumer inputs: - {name: input path} implementation: container: image: alpine command: - sh - -exc - cat $0 - inputPath: input path """) output_path_op = kfp.components.load_component_from_text(""" name: output path generator outputs: - {name: output path} implementation: container: image: alpine command: - sh - -exc - echo Hello World! > $0 - outputPath: output path """) @dsl.pipeline(name='my-pipeline') def my_pipeline(): file_generator_task = file_generator() input_path_op(file_generator_task.outputs["path"]) # Comment this line out when you want to test the behavior of a component with outputs # output_path_op() kfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile( pipeline_func=my_pipeline, package_path='pipeline.yaml') ``` --- Run the code and we will get the following error. ```console % python3 pipeline.py /Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py:79: UserWarning: V2_COMPATIBLE execution mode is at Beta quality. Some pipeline features may not work as expected. warnings.warn('V2_COMPATIBLE execution mode is at Beta quality.' Traceback (most recent call last): File "/Users/y-nishioka/repos/test_kfp/pipeline.py", line 48, in <module> kfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 1175, in compile self._create_and_write_workflow( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 1227, in _create_and_write_workflow workflow = self._create_workflow(pipeline_func, pipeline_name, File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 1058, in _create_workflow workflow = self._create_pipeline_workflow( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 786, in _create_pipeline_workflow templates = self._create_dag_templates(pipeline, op_transformers) File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 751, in _create_dag_templates v2_compat.update_op( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/v2_compat.py", line 158, in update_op "metadataPath": op.input_artifact_paths[artifact_name], KeyError: 'input path' ``` `op.input_artifact_paths` is `{'input-path': '/tmp/inputs/input_path/data'}` and `artifact_name` is `input path` so we're getting this error. --- In the case of `output_path_op`, we will get the following error. ```console % python3 pipeline.py /Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py:79: UserWarning: V2_COMPATIBLE execution mode is at Beta quality. Some pipeline features may not work as exp ected. warnings.warn('V2_COMPATIBLE execution mode is at Beta quality.' Traceback (most recent call last): File "/Users/y-nishioka/repos/test_kfp/pipeline.py", line 48, in <module> kfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 1175, in compile self._create_and_write_workflow( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 1227, in _create_and_write_workflow workflow = self._create_workflow(pipeline_func, pipeline_name, File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 1058, in _create_workflow workflow = self._create_pipeline_workflow( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 786, in _create_pipeline_workflow templates = self._create_dag_templates(pipeline, op_transformers) File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/compiler.py", line 751, in _create_dag_templates v2_compat.update_op( File "/Users/y-nishioka/repos/test_kfp/venv/lib/python3.9/site-packages/kfp/compiler/v2_compat.py", line 187, in update_op "metadataPath": op.file_outputs[artifact_name], KeyError: 'output path' ``` `op.file_outputs` is `{'output-path': '/tmp/outputs/output_path/data'}` and `artifact_name` is `output path` so we're getting this error. ### Expected result <!-- What should the correct behavior be? --> Compilation succeeds. [v2 SDK doc](https://www.kubeflow.org/docs/components/pipelines/sdk-v2/component-development/#using-your-component-in-a-pipeline) says > Input and output names are converted to Pythonic names (spaces and symbols are replaced with underscores and letters are converted to lowercase). For example, an input named Input 1 is converted to input_1. So using a name with empty spaces should be allowed in v2. ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8097/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8097/timeline
null
reopened
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8096
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8096/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8096/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8096/events
https://github.com/kubeflow/pipelines/issues/8096
1,326,701,089
I_kwDOB-71UM5PE9oh
8,096
container not ready status showing
{ "login": "kotalakshman", "id": 81070473, "node_id": "MDQ6VXNlcjgxMDcwNDcz", "avatar_url": "https://avatars.githubusercontent.com/u/81070473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kotalakshman", "html_url": "https://github.com/kotalakshman", "followers_url": "https://api.github.com/users/kotalakshman/followers", "following_url": "https://api.github.com/users/kotalakshman/following{/other_user}", "gists_url": "https://api.github.com/users/kotalakshman/gists{/gist_id}", "starred_url": "https://api.github.com/users/kotalakshman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kotalakshman/subscriptions", "organizations_url": "https://api.github.com/users/kotalakshman/orgs", "repos_url": "https://api.github.com/users/kotalakshman/repos", "events_url": "https://api.github.com/users/kotalakshman/events{/privacy}", "received_events_url": "https://api.github.com/users/kotalakshman/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "@kotalakshman, could you please provide some more information or screenshots to help us diagnose this issue?", "issue got resolved.\r\n\r\nThanks" ]
"2022-08-03T05:33:06"
"2022-08-09T05:22:32"
"2022-08-09T05:22:32"
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.7.1 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ### Expected result <!-- What should the correct behavior be? --> ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8096/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8096/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8095
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8095/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8095/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8095/events
https://github.com/kubeflow/pipelines/issues/8095
1,326,656,451
I_kwDOB-71UM5PEyvD
8,095
[bug] TypeError: Object of type PipelineParam is not JSON serializable
{ "login": "lightingghost", "id": 13108255, "node_id": "MDQ6VXNlcjEzMTA4MjU1", "avatar_url": "https://avatars.githubusercontent.com/u/13108255?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lightingghost", "html_url": "https://github.com/lightingghost", "followers_url": "https://api.github.com/users/lightingghost/followers", "following_url": "https://api.github.com/users/lightingghost/following{/other_user}", "gists_url": "https://api.github.com/users/lightingghost/gists{/gist_id}", "starred_url": "https://api.github.com/users/lightingghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lightingghost/subscriptions", "organizations_url": "https://api.github.com/users/lightingghost/orgs", "repos_url": "https://api.github.com/users/lightingghost/repos", "events_url": "https://api.github.com/users/lightingghost/events{/privacy}", "received_events_url": "https://api.github.com/users/lightingghost/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Hi @lightingghost. Using a component within a list comprehension is not a permitted component usage pattern in the KFP SDK and will result in unpredictable behavior. This is the source of the error.\r\n\r\n`outputs = {i: compute(i).output for i in range(10)}`" ]
"2022-08-03T04:18:31"
"2022-08-04T22:46:55"
"2022-08-04T22:46:55"
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * KFP SDK version: 1.8.13 ### Steps to reproduce ``` import kfp import kfp.v2.dsl as dsl @dsl.component def compute(x: int) -> int: return x @dsl.component def print_op(results: dict): print(results) @dsl.pipeline(name='pipeline') def pipeline(): outputs = {i: compute(i).output for i in range(10)} print_op(outputs) kfp.v2.compiler.Compiler().compile( pipeline_func=pipeline, package_path="/tmp/test_pipeline.json" ) ``` it give error of `TypeError: Object of type PipelineParam is not JSON serializable` ### Expected result it compiles without an error ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> /area backend --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8095/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8095/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8094
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8094/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8094/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8094/events
https://github.com/kubeflow/pipelines/issues/8094
1,326,651,262
I_kwDOB-71UM5PExd-
8,094
[feature] Nested component
{ "login": "lightingghost", "id": 13108255, "node_id": "MDQ6VXNlcjEzMTA4MjU1", "avatar_url": "https://avatars.githubusercontent.com/u/13108255?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lightingghost", "html_url": "https://github.com/lightingghost", "followers_url": "https://api.github.com/users/lightingghost/followers", "following_url": "https://api.github.com/users/lightingghost/following{/other_user}", "gists_url": "https://api.github.com/users/lightingghost/gists{/gist_id}", "starred_url": "https://api.github.com/users/lightingghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lightingghost/subscriptions", "organizations_url": "https://api.github.com/users/lightingghost/orgs", "repos_url": "https://api.github.com/users/lightingghost/repos", "events_url": "https://api.github.com/users/lightingghost/events{/privacy}", "received_events_url": "https://api.github.com/users/lightingghost/received_events", "type": "User", "site_admin": false }
[ { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Hi @lightingghost. Thanks for your feature request. This is concept of using a pipeline as a task is actually currently in development. You can look forward to this in an upcoming release.", "heyyyy @connor-mccarthy, are there any updates or planned release date for that particular feature?\r\nHow could we help to move it forward?\r\n\r\nYou mentioned that it's in development, if there are any unofficial ways to test it, please let me know πŸ˜„ \r\n\r\n", "This was released in [`2.0.0-beta.4`](https://github.com/kubeflow/pipelines/blob/39e4c3686acb75d2137cc1e17e19837fd85c3632/sdk/RELEASE.md#200-beta4). I suggest using [`2.0.0-beta.12`](https://github.com/kubeflow/pipelines/blob/39e4c3686acb75d2137cc1e17e19837fd85c3632/sdk/RELEASE.md#200-beta12), which includes other fixes/features since.\r\n\r\nYou can use pipelines in pipelines exactly as if they are components.\r\n\r\nHere's an example of the usage: https://github.com/kubeflow/pipelines/blob/39e4c3686acb75d2137cc1e17e19837fd85c3632/sdk/python/test_data/pipelines/pipeline_in_pipeline.py", "Hi there, is there a way to gather outputs of these nested pipelines as in component.outputs?", "@hugzee3, you can do this much like you would with components. [Here](https://www.kubeflow.org/docs/components/pipelines/v2/pipelines/pipeline-basics/#pipelines-as-components) is an example from the documentation.", "@connor-mccarthy , thank you! ", "Hi! can I nest pipelines/components in V1 as well? or any work around you know?\r\nI've seen there is a native way to do so in V2 but unfortunately need to use the older version.\r\n\r\nCurrently was not able to do such thing. Thanks!", "@OmriLevyTau, v1 does not support the pipeline-in-pipeline authoring style.", "thank you!" ]
"2022-08-03T04:10:03"
"2023-08-30T17:14:25"
"2023-03-21T17:48:59"
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area samples --> /area components ### What feature would you like to see? I would like to be able to do nested call to components, for example ```python @component def foo(): pass @component def bar(): foo() ``` ### What is the use case or pain point? * It helps reuse the code. ### Is there a workaround currently? No. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8094/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8094/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8087
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8087/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8087/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8087/events
https://github.com/kubeflow/pipelines/issues/8087
1,324,028,715
I_kwDOB-71UM5O6xMr
8,087
failed to authenticate with kubeflow pipeline [bug] <HTTP 500 error>
{ "login": "LeBoyOrion", "id": 72968952, "node_id": "MDQ6VXNlcjcyOTY4OTUy", "avatar_url": "https://avatars.githubusercontent.com/u/72968952?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LeBoyOrion", "html_url": "https://github.com/LeBoyOrion", "followers_url": "https://api.github.com/users/LeBoyOrion/followers", "following_url": "https://api.github.com/users/LeBoyOrion/following{/other_user}", "gists_url": "https://api.github.com/users/LeBoyOrion/gists{/gist_id}", "starred_url": "https://api.github.com/users/LeBoyOrion/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LeBoyOrion/subscriptions", "organizations_url": "https://api.github.com/users/LeBoyOrion/orgs", "repos_url": "https://api.github.com/users/LeBoyOrion/repos", "events_url": "https://api.github.com/users/LeBoyOrion/events{/privacy}", "received_events_url": "https://api.github.com/users/LeBoyOrion/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "the second error message :\r\n\r\n HTTP response body: {\"error\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).canAccessRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:390\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:151\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize the request\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:153\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\",\"code\":13,\"message\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).canAccessRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:390\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:151\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize the request\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:153\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\",\"details\":[{\"@type\":\"type.googleapis.com/api.Error\",\"error_message\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).canAccessRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:390\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:151\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize the request\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:153\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\",\"error_details\":\"Internal error: Unauthenticated: Request header error: there is no user identity header.: Request header error: there is no user identity header.\\nFailed to authorize with API resource references\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).canAccessRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:390\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:151\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\\nFailed to authorize the request\\ngithub.com/kubeflow/pipelines/backend/src/common/util.Wrap\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/common/util/error.go:275\\ngithub.com/kubeflow/pipelines/backend/src/apiserver/server.(*RunServer).GetRun\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/server/run_server.go:153\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler.func1\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1519\\nmain.apiServerInterceptor\\n\\t/go/src/github.com/kubeflow/pipelines/backend/src/apiserver/interceptor.go:30\\ngithub.com/kubeflow/pipelines/backend/api/go_client._RunService_GetRun_Handler\\n\\t/go/src/github.com/kubeflow/pipelines/backend/api/go_client/run.pb.go:1521\\ngoogle.golang.org/grpc.(*Server).processUnaryRPC\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1210\\ngoogle.golang.org/grpc.(*Server).handleStream\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:1533\\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.2\\n\\t/go/pkg/mod/google.golang.org/grpc@v1.34.0/server.go:871\\nruntime.goexit\\n\\t/usr/local/go/src/runtime/asm_amd64.s:1357\"}]}\r\n", "/cc @zijianjoy ", "Question: Which endpoint are you trying to reach within this `load` step? It looks like you need to authenticate before making request: Some documentation might be relevant in terms of making network call within cluster: https://www.kubeflow.org/docs/components/pipelines/sdk/connect-api/#multi-user-mode", "@LeBoyOrion \r\nIf you are trying to access Kubeflow Pipelines SDK from a Notebook instance inside Kubeflow, then I think I know what the problem is. \r\n\r\nYou can create this `PodDefault` definition:\r\n\r\n```yaml\r\napiVersion: kubeflow.org/v1alpha1\r\nkind: PodDefault\r\nmetadata:\r\n name: access-ml-pipeline\r\n namespace: kubeflow-user-example-com\r\nspec:\r\n desc: Allow access to Kubeflow Pipelines\r\n selector:\r\n matchLabels:\r\n access-ml-pipeline: \"true\"\r\n volumes:\r\n - name: volume-kf-pipeline-token\r\n projected:\r\n sources:\r\n - serviceAccountToken:\r\n path: token\r\n expirationSeconds: 7200\r\n audience: pipelines.kubeflow.org\r\n volumeMounts:\r\n - mountPath: /var/run/secrets/kubeflow/pipelines\r\n name: volume-kf-pipeline-token\r\n readOnly: true\r\n env:\r\n - name: KF_PIPELINES_SA_TOKEN_PATH\r\n value: /var/run/secrets/kubeflow/pipelines/token\r\n```\r\n\r\nThen, select \"Allow access to Kubeflow Pipelines\" configuration when creatine a Notebook in Kubeflow. Afterwards, you can create Kubeflow Pipelines client like this: \r\n\r\n```python\r\nwith open(os.environ['KF_PIPELINES_SA_TOKEN_PATH'], \"r\") as f:\r\n TOKEN = f.read()\r\n\r\nclient = kfp.Client(\r\n host='http://ml-pipeline.kubeflow.svc.cluster.local:8888',\r\n existing_token=TOKEN,\r\n```", "Hi, i am trying to build my own api on top of the sdk client. My kubeflow is full deployment on GCP. I am attempted to call client.list_experiments(), but it shows the same error as LeBoyOrion. Any suggestion?\r\n\r\nNote: I am using FastAPI and built an website in the same cluster. I am able to call the list_pipelines API but failed to list runs or experiments." ]
"2022-08-01T08:23:46"
"2022-08-15T09:12:42"
null
NONE
null
* How do you deploy Kubeflow Pipelines (KFP)? Kubeflow is deployed on MiniKF locally downloaded on windows using vagrant and Virtual Box downloading steps : [here](https://v0-7.kubeflow.org/docs/started/workstation/getting-started-windows/) * KFP version: i cant fin it, neither on the dahsboard nor on the minikf window. ### Steps to reproduce The problem can be reproduced just by using Kale to create a pipeline and after dividing any notebook into different steps, clicking on compile and run shows an error that i can find by going to the pipeline window logs, the logs show me this : ![Capture](https://user-images.githubusercontent.com/72968952/182102208-f37c8cc1-46b9-4497-95a3-65aee79bb2ba.PNG) the second error message is really long, if someone needs it to maybe understand my issue you will find it as a comment to this post ### Expected result After seeing a lot of videos that implements kale and uses it to create pipelines, the expected result is just a pipeline that runs every cells of the notebook contained in each step using the data of the step before. Thank you in advance for your help. <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8087/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8087/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8076
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8076/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8076/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8076/events
https://github.com/kubeflow/pipelines/issues/8076
1,320,093,507
I_kwDOB-71UM5OrwdD
8,076
[bug] Kubeflow Pipelines Standalone Kustomize incompatible with K8s 1.22+
{ "login": "dsiegel", "id": 746470, "node_id": "MDQ6VXNlcjc0NjQ3MA==", "avatar_url": "https://avatars.githubusercontent.com/u/746470?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dsiegel", "html_url": "https://github.com/dsiegel", "followers_url": "https://api.github.com/users/dsiegel/followers", "following_url": "https://api.github.com/users/dsiegel/following{/other_user}", "gists_url": "https://api.github.com/users/dsiegel/gists{/gist_id}", "starred_url": "https://api.github.com/users/dsiegel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dsiegel/subscriptions", "organizations_url": "https://api.github.com/users/dsiegel/orgs", "repos_url": "https://api.github.com/users/dsiegel/repos", "events_url": "https://api.github.com/users/dsiegel/events{/privacy}", "received_events_url": "https://api.github.com/users/dsiegel/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "I suspect that upgrading from KFP 1.7.1 fixes this. Never mind.", "Hi, Im facing the same issue with Kubernetes 1.22, doesnt seem like KFP 1.7.1+ fixes this" ]
"2022-07-27T20:34:01"
"2023-05-03T10:03:40"
"2022-07-27T20:42:12"
NONE
null
KFP Kustomize setup does not work with Kubernetes 1.22+. The [kustomization.yaml](https://github.com/kubeflow/pipelines/blob/master/manifests/kustomize/cluster-scoped-resources/kustomization.yaml) references an API that was removed (apiextensions.k8s.io/v1beta1). ### Environment * How do you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines Standalone * KFP version: Version: [1.7.1](https://www.github.com/kubeflow/pipelines/commit/1d53de98c731d3cf6ef9017ad19bbfcf19787de3) ### Steps to reproduce ``` > kubectl diff --kustomize "github.com/kubeflow/pipelines/manifests/kustomize/cluster-scoped-resources?ref=1.7.1" error: resource mapping not found for name: "applications.app.k8s.io" namespace: "" from "github.com/kubeflow/pipelines/manifests/kustomize/cluster-scoped-resources?ref=1.7.1": no matches for kind "CustomResourceDefinition" in version "apiextensions.k8s.io/v1beta1" ensure CRDs are installed first ``` ### Expected result It should diff with my current kfp state. ### Materials and reference The beta CustomResourceDefinition API (apiextensions.k8s.io/v1beta1) has been removed. See [Kubernetes 1.22 API changes](https://kubernetes.io/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/). ### Labels backend --- Impacted by this bug? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8076/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8076/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8074
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8074/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8074/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8074/events
https://github.com/kubeflow/pipelines/issues/8074
1,319,210,049
I_kwDOB-71UM5OoYxB
8,074
[feature] Add authorization to all functions in ReportServer
{ "login": "difince", "id": 11557050, "node_id": "MDQ6VXNlcjExNTU3MDUw", "avatar_url": "https://avatars.githubusercontent.com/u/11557050?v=4", "gravatar_id": "", "url": "https://api.github.com/users/difince", "html_url": "https://github.com/difince", "followers_url": "https://api.github.com/users/difince/followers", "following_url": "https://api.github.com/users/difince/following{/other_user}", "gists_url": "https://api.github.com/users/difince/gists{/gist_id}", "starred_url": "https://api.github.com/users/difince/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/difince/subscriptions", "organizations_url": "https://api.github.com/users/difince/orgs", "repos_url": "https://api.github.com/users/difince/repos", "events_url": "https://api.github.com/users/difince/events{/privacy}", "received_events_url": "https://api.github.com/users/difince/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
{ "login": "difince", "id": 11557050, "node_id": "MDQ6VXNlcjExNTU3MDUw", "avatar_url": "https://avatars.githubusercontent.com/u/11557050?v=4", "gravatar_id": "", "url": "https://api.github.com/users/difince", "html_url": "https://github.com/difince", "followers_url": "https://api.github.com/users/difince/followers", "following_url": "https://api.github.com/users/difince/following{/other_user}", "gists_url": "https://api.github.com/users/difince/gists{/gist_id}", "starred_url": "https://api.github.com/users/difince/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/difince/subscriptions", "organizations_url": "https://api.github.com/users/difince/orgs", "repos_url": "https://api.github.com/users/difince/repos", "events_url": "https://api.github.com/users/difince/events{/privacy}", "received_events_url": "https://api.github.com/users/difince/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }, { "login": "difince", "id": 11557050, "node_id": "MDQ6VXNlcjExNTU3MDUw", "avatar_url": "https://avatars.githubusercontent.com/u/11557050?v=4", "gravatar_id": "", "url": "https://api.github.com/users/difince", "html_url": "https://github.com/difince", "followers_url": "https://api.github.com/users/difince/followers", "following_url": "https://api.github.com/users/difince/following{/other_user}", "gists_url": "https://api.github.com/users/difince/gists{/gist_id}", "starred_url": "https://api.github.com/users/difince/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/difince/subscriptions", "organizations_url": "https://api.github.com/users/difince/orgs", "repos_url": "https://api.github.com/users/difince/repos", "events_url": "https://api.github.com/users/difince/events{/privacy}", "received_events_url": "https://api.github.com/users/difince/received_events", "type": "User", "site_admin": false } ]
null
[ "cc: @juliusvonkohout ", "/assign @difince ", "For the next KFP meeting ;-)", "Hello @difince , the `ReportWorkflow` and `ReportScheduledWorkflow` are used by single persistent-agent instance for monitoring the status of workflow. persistent-agent itself cannot and shouldn't authenticate as a user. \r\n/assign @chensun ", "Thank you @zijianjoy for your feedback. How services are supposed to authenticate themself then? Any suggestion?\r\nI guess this reflects on the implementation of https://github.com/kubeflow/pipelines/pull/7819 as well ? ", "> Hello @difince , the `ReportWorkflow` and `ReportScheduledWorkflow` are used by single persistent-agent instance for monitoring the status of workflow. persistent-agent itself cannot and shouldn't authenticate as a user. /assign @chensun\r\n\r\nBy default every kubeflow user can hijack them, since these endpoints are unauthenticated. So first they need ANY kind of authentication.", "> Thank you @zijianjoy for your feedback. How services are supposed to authenticate themself then? Any suggestion? I guess this reflects on the implementation of #7819 as well ?\r\n\r\n@chensun (@zijianjoy college at google) already approved #7819 so i do not think that it is affected. Readartifact etc. is really called by users for a proper reason. Reportworkflow might only be abused for no reason.", "Fixed by #9699 \r\nThe persistence agent authenticates itself via a service account token, meanwhile, the pipeline API server has enabled authentication and authorization logic. " ]
"2022-07-27T08:33:39"
"2023-08-17T19:23:24"
"2023-08-17T19:23:24"
MEMBER
null
### Feature Area There are two API-server endpoints that still miss authorization - [ReportWorkflow](https://github.com/kubeflow/pipelines/blob/d48776fb2d9752ca48af14ea6256791d2b167a71/backend/src/apiserver/server/report_server.go#L32) and [ReportScheduledWorkflow](https://github.com/kubeflow/pipelines/blob/d48776fb2d9752ca48af14ea6256791d2b167a71/backend/src/apiserver/server/report_server.go#L45). This is a security issue. Each endpoint should validate that the user has permission to call them. **persistent-agent** service calls these endpoints. Once authorization is enabled, the persistent-agent need to authorize itself by providing user information in the request headers. This issue is a related/ follow-up issue to PR https://github.com/kubeflow/pipelines/pull/7819 <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? <!-- Provide a description of this feature and the user experience. --> ### What is the use case or pain point? <!-- It helps us understand the benefit of this feature for your use case. --> ### Is there a workaround currently? no. A security issue exists in the current implementation. <!-- Without this feature, how do you accomplish your task today? --> --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8074/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8074/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8067
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8067/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8067/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8067/events
https://github.com/kubeflow/pipelines/issues/8067
1,316,994,341
I_kwDOB-71UM5Of70l
8,067
[question] How does `importer` work under the hood?
{ "login": "deepyaman", "id": 14007150, "node_id": "MDQ6VXNlcjE0MDA3MTUw", "avatar_url": "https://avatars.githubusercontent.com/u/14007150?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deepyaman", "html_url": "https://github.com/deepyaman", "followers_url": "https://api.github.com/users/deepyaman/followers", "following_url": "https://api.github.com/users/deepyaman/following{/other_user}", "gists_url": "https://api.github.com/users/deepyaman/gists{/gist_id}", "starred_url": "https://api.github.com/users/deepyaman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/deepyaman/subscriptions", "organizations_url": "https://api.github.com/users/deepyaman/orgs", "repos_url": "https://api.github.com/users/deepyaman/repos", "events_url": "https://api.github.com/users/deepyaman/events{/privacy}", "received_events_url": "https://api.github.com/users/deepyaman/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @chensun ", "I'm not certain if the current KFP backend implementation on importer is correct or complete at this point. Concept-wise, importer doesn't download an artifact per se, it only retrieves or registers (if not found) an artifact record from MLMD. \r\nIt is launcher that handles downloading from artifact's remote storage URI: https://github.com/kubeflow/pipelines/blob/e312d95aeda209b5314bfb8e9e10077aa0589c2c/backend/src/v2/component/launcher_v2.go#L397\r\n\r\nTo importer, an artifact is always an output of the component, and downloading should happen only when an artifact is a component input.", "@chensun Thanks for the reply (and sorry for the delayed response on my end)! I think that makes sense, and I understand that it's closely tied with MLMD, and therefore there wouldn't be a pure Argo equivalent." ]
"2022-07-25T15:19:13"
"2022-08-09T22:12:36"
"2022-08-09T22:12:36"
CONTRIBUTOR
null
I'm trying to find/understand the underlying Argo YAML for an `importer`. Is https://github.com/kubeflow/pipelines/blob/3e734ed19146f569e910f75627d12239ec2e86dc/backend/src/v2/compiler/argocompiler/testdata/importer.yaml the right place to look (for an example, of how https://github.com/kubeflow/pipelines/blob/c6125ffc44df0eced27ae83451c6243b8fc8d73f/sdk/python/kfp/compiler/test_data/pipelines/pipeline_with_importer.py gets converted)? If so, where is the logic to actually download the artifact from Google Cloud Storage? I feel like, looking at definitions for importer nodes and importer specs, I'm not able to trace back to the place where the downloading is actually happening. My current thought is it's happening on some `kfp-launcher` Docker image, but I'm not sure if that's right, and--even if so--I would like to be able to see the underlying logic getting called. edit: I've looked into this more over the past few days, and I've understood that: 1. [The `artifactUri` is extracted from the `ImporterSpec` and returned from `ImportSpecToMLMDArtifact`](https://github.com/kubeflow/pipelines/blob/master/backend/src/v2/component/importer_launcher.go#L191-L207). 2. [Find and return a matched artifact (if any)](https://github.com/kubeflow/pipelines/blob/master/backend/src/v2/component/importer_launcher.go#L159-L164). Is the implementation of finding the matched artifact dependent on the execution backend? I got a bit lost trying to trace back in there. 3. [Execute](https://github.com/kubeflow/pipelines/blob/master/backend/src/v2/component/importer_launcher.go#L143-L145). I'm still not sure, for example, if you have an `importer` like https://github.com/kubeflow/pipelines/blob/c6125ffc44df0eced27ae83451c6243b8fc8d73f/sdk/python/kfp/compiler/test_data/pipelines/pipeline_with_importer.py#L55-L59 * How does it know how to download an artifact from `gs://` vs `s3://` vs `http://`, etc.? I'm sure there must be some logic to determine protocol, etc., but it's happening somewhere I can't identify. * Is there a reasonable approximation in Argo? Maybe something like: ```yaml arguments: parameters: - name: raw_green_data_url value: https://raw.githubusercontent.com/deepyaman/azureml-examples/main/sdk/jobs/pipelines/2c_nyc_taxi_data_regression/data/greenTaxiData.csv templates: - name: nyc-taxi-data-regression steps: - - name: download-raw-green-data template: importer arguments: artifacts: - name: uri http: # The artifact parameter (this line and the one below) have to be generated programmatically by parsing the URI string. url: "{{workflow.parameters.raw_green_data_url}}" - name: importer inputs: artifacts: - name: uri path: /tmp/data outputs: artifacts: - name: artifact path: /tmp/data container: image: hello-world ```
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8067/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8067/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8061
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8061/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8061/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8061/events
https://github.com/kubeflow/pipelines/issues/8061
1,314,925,604
I_kwDOB-71UM5OYCwk
8,061
[bug] open /tmp/outputs/Output/data: permission denied
{ "login": "szymonbcoding", "id": 63589837, "node_id": "MDQ6VXNlcjYzNTg5ODM3", "avatar_url": "https://avatars.githubusercontent.com/u/63589837?v=4", "gravatar_id": "", "url": "https://api.github.com/users/szymonbcoding", "html_url": "https://github.com/szymonbcoding", "followers_url": "https://api.github.com/users/szymonbcoding/followers", "following_url": "https://api.github.com/users/szymonbcoding/following{/other_user}", "gists_url": "https://api.github.com/users/szymonbcoding/gists{/gist_id}", "starred_url": "https://api.github.com/users/szymonbcoding/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/szymonbcoding/subscriptions", "organizations_url": "https://api.github.com/users/szymonbcoding/orgs", "repos_url": "https://api.github.com/users/szymonbcoding/repos", "events_url": "https://api.github.com/users/szymonbcoding/events{/privacy}", "received_events_url": "https://api.github.com/users/szymonbcoding/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hello @szymonbcoding, `V2_COMPATIBLE` is not supported any more. You can start using KFP SDK 2.0.0-beta and see if this resolves your problem. Note that there are breaking changes when you use the new v2.0+ SDK. Breaking change: https://docs.google.com/document/d/1nCUUVRXexXbQ0LDkGHsMIBDSu1WvJA9Upy1JzybNVMk/edit", "Hey @zijianjoy, is this document also hosted somewhere else? The doc says I need to request access. ", "Sorry about that, the doc requires Kubeflow community member permission. Here is the public announcement: https://github.com/kubeflow/pipelines/issues/7238" ]
"2022-07-22T12:08:01"
"2023-02-22T17:43:49"
null
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? with Kubeflow on prem * KFP version: No info, just "Build: dev local | Dashboard v0.0.2- | Isolation-Mode: multi-user" in left corner of Kubeflow UI. I got non-admin access to Kubeflow platform. * KFP SDK version: 1.8.13 ### Steps to reproduce I created some KFP's components: https://gist.github.com/szymonbcoding/faa2b5c3d46a4fd99ba1659410c06ffc And then upload created pipeline's yaml file. Unfortunatelly I got error during run of my pipeline: _This step is in Failed state with this message: Error (exit code 64): failed to open /tmp/outputs/Output/data: open /tmp/outputs/Output/data: permission denied_ Does it mean that I have to root permission to use KFP? ### Expected result Successfully execution of my run
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8061/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8061/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8060
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8060/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8060/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8060/events
https://github.com/kubeflow/pipelines/issues/8060
1,314,332,015
I_kwDOB-71UM5OVx1v
8,060
This step is in Error state with this message: Error (exit code 1): failed to put file: You did not provide the number of bytes specified by the Content-Length HTTP header.
{ "login": "grapefruitL", "id": 12489866, "node_id": "MDQ6VXNlcjEyNDg5ODY2", "avatar_url": "https://avatars.githubusercontent.com/u/12489866?v=4", "gravatar_id": "", "url": "https://api.github.com/users/grapefruitL", "html_url": "https://github.com/grapefruitL", "followers_url": "https://api.github.com/users/grapefruitL/followers", "following_url": "https://api.github.com/users/grapefruitL/following{/other_user}", "gists_url": "https://api.github.com/users/grapefruitL/gists{/gist_id}", "starred_url": "https://api.github.com/users/grapefruitL/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/grapefruitL/subscriptions", "organizations_url": "https://api.github.com/users/grapefruitL/orgs", "repos_url": "https://api.github.com/users/grapefruitL/repos", "events_url": "https://api.github.com/users/grapefruitL/events{/privacy}", "received_events_url": "https://api.github.com/users/grapefruitL/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "/assign @Linchin \r\n\r\nIt might be something related to argoworkflow: https://github.com/argoproj/argo-workflows/issues?q=is%3Aissue+%22+failed+to+put+file%22+is%3Aclosed.", "We deployed a Kubeflow system with Minio on a NFS Storage and the same error happened.\r\nIt might be related to slow disk speed on Minio, or any timeout configuration?\r\nPlease update if there is any news. Thank you!", "Hi @grapefruitL and @vietanhdev, could you provide a minimum code that will reproduce the error?", "Unfortunately it can't be reproduced and many times a retry will fix it @Linchin ", "I think this issue may be due to slow disk io. The system was installed on an NFS drive <-> HDD." ]
"2022-07-22T05:44:30"
"2023-08-28T20:58:53"
null
NONE
null
My pipeline contains several components, these components have OutputPath type variables, so there will be files stored in minio, but when running, a random component will generate an error: `This step is in Error state with this message: Error (exit code 1): failed to put file: You did not provide the number of bytes specified by the Content-Length HTTP header.` Sometimes Pipeline can run normally again, how is this? What's the matter? ![image](https://user-images.githubusercontent.com/12489866/180371662-36a448dc-2fa0-439d-861e-b39fab0c4b09.png) ![image](https://user-images.githubusercontent.com/12489866/180371896-efb760bd-8f38-4af1-9893-15a610dd6dc4.png)
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8060/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8060/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8053
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8053/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8053/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8053/events
https://github.com/kubeflow/pipelines/issues/8053
1,311,620,760
I_kwDOB-71UM5OLb6Y
8,053
[feature] Re-enable extra_code parameter for Python components.
{ "login": "casassg", "id": 6912589, "node_id": "MDQ6VXNlcjY5MTI1ODk=", "avatar_url": "https://avatars.githubusercontent.com/u/6912589?v=4", "gravatar_id": "", "url": "https://api.github.com/users/casassg", "html_url": "https://github.com/casassg", "followers_url": "https://api.github.com/users/casassg/followers", "following_url": "https://api.github.com/users/casassg/following{/other_user}", "gists_url": "https://api.github.com/users/casassg/gists{/gist_id}", "starred_url": "https://api.github.com/users/casassg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/casassg/subscriptions", "organizations_url": "https://api.github.com/users/casassg/orgs", "repos_url": "https://api.github.com/users/casassg/repos", "events_url": "https://api.github.com/users/casassg/events{/privacy}", "received_events_url": "https://api.github.com/users/casassg/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @connor-mccarthy ", "@casassg, thanks for creating this issue. Since our previous discussion, I want to provide a bit more context about how this functionality will be supported in the v2 KFP SDK, but through a slightly different approach than the `extra_code` parameter.\r\n\r\nIn v2, we're adding support for \"Bring your own container\", which adds a `@dsl.container_component` decorator [[usage example](https://docs.google.com/presentation/d/17A5bk175Nst95U5-Zj7ufWKMjGLNWY1i/edit#slide=id.p13)] for constructing arbitrary containerized tasks via Python code. An arbitrary command can be passed to the `command` parameter.\r\n\r\nThe idea here is that while `extra_code` does add value for some uses-cases, it makes the lightweight Python components slightly less \"lightweight\". There is a simplicity-flexibility tradeoff here and I can see compelling arguments for either approach.\r\n\r\nLet's leave this issue here for a bit to gauge community interest in this feature to \"bridge the gap\" between lightweight and container components.\r\n\r\ncc @zichuan-scott-xu @chensun ", "@connor-mccarthy I think this won't likely work. Essentially what you are asking is users to redo the entire lightweight python decorator just to support non-native types. This seems quite the added overhead for something that seems rather a common use case imho.", "You may be right, @casassg, but I'm not sure I understand a few parts of your response.\r\n\r\nCan you describe what you mean by redo the entire lightweight Python decorator? And can you describe what you mean by non-native types?", "- Redo entire lightweigh decorator means that we would basically need to reimplement much of the logic that parses the signature of a function to generate the input/output spec, extracts function body, load function body and executes it, etc. \r\n- Non-native types means anything that is not imported by default in https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/components/component_factory.py#L314 for example if I suddenly want to use a default based on a typing_extension I cant really use it. or if I want to use a default which is an custom artifact class we cant use it\r\n\r\nInternally for example, we have customized @ component decorator for KFP v1 such that we can do patterns like this:\r\n\r\n```\r\n@component()\r\ndef store_metadata(metadata_path: str = output_path(str, privacy_compliant=True)):\r\n with open(metadata_path, 'w') as f:\r\n f.write(\"SOME IMPORTANT METADATA\")\r\n```\r\n\r\nHowever without extra_code this won't work as `output_path` is not a valid symbol\r\n\r\nThe only way we could make this work in KFP v2 is if somehow we recreate much of the logic in component_factory and add a new import to those lines (or make it a mock function).\r\n\r\nThere may be an easier way that Im missing in v2 btw, but at least from my knowledge of v2 this is not easy to do.", "Thanks, @casassg. That explanation is very helpful and makes a compelling case for including an `extra_code` parameter. \r\n\r\nBTW: Support in lightweight components for custom artifact types from a third-party library is a feature that will be added soon. It doesn't solve for all of the use cases you're describing, but would for the `Input[MyCustomArtifact]` use case." ]
"2022-07-20T18:02:17"
"2022-07-22T17:18:31"
null
CONTRIBUTOR
null
### Feature Area /area sdk ### What feature would you like to see? In KFP v1, there's an option to extend lightweight python components via `extra_code` which enables to have non-primitive values as defaults supported. However in KFPv2 this seems to be largely unsupported. Even in KFP v1, seems to be discouraged to use as `create_component_from_func` does not support it and `func_to_container_op` seems to be officially discouraged by maintainers (see: #7794 ) ### What is the use case or pain point? Extending python lightweigh decorator functionality to use custom defaults that are not primitives. ### Is there a workaround currently? `func_to_container_op` in KFP v1 does seem to work fine. However this ask is more related to keep this functionality moving forward. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8053/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8053/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8052
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8052/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8052/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8052/events
https://github.com/kubeflow/pipelines/issues/8052
1,310,920,049
I_kwDOB-71UM5OIw1x
8,052
[feature] Ability to add Runtime Class name to Kubeflow Pipeline
{ "login": "jayanthp-intellect", "id": 78070105, "node_id": "MDQ6VXNlcjc4MDcwMTA1", "avatar_url": "https://avatars.githubusercontent.com/u/78070105?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jayanthp-intellect", "html_url": "https://github.com/jayanthp-intellect", "followers_url": "https://api.github.com/users/jayanthp-intellect/followers", "following_url": "https://api.github.com/users/jayanthp-intellect/following{/other_user}", "gists_url": "https://api.github.com/users/jayanthp-intellect/gists{/gist_id}", "starred_url": "https://api.github.com/users/jayanthp-intellect/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jayanthp-intellect/subscriptions", "organizations_url": "https://api.github.com/users/jayanthp-intellect/orgs", "repos_url": "https://api.github.com/users/jayanthp-intellect/repos", "events_url": "https://api.github.com/users/jayanthp-intellect/events{/privacy}", "received_events_url": "https://api.github.com/users/jayanthp-intellect/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @jayanthp-intellect, can you provide the reference for changing the default Runtime?", "/assign @chensun ", "@jlyaoyuli Refer to the following containerd config file auto-generated and managed by KOPS 1.23.x ([KOPS GPU support](https://github.com/kubernetes/kops/blob/master/docs/gpu.md)). This is on a GPU node with NVIDIA Drivers + CUDA 11.1. By default, KOPS sets and maintains the `default_runtime_name` to be **runc** which forces us to specify the Kubernetes Runtime Class as **\"nvidia\"** to override this and ensure that the Pod runs using the **nvidia** runtime name instead of **runc** runtime name.\r\n```\r\nversion = 2\r\n\r\n[plugins]\r\n\r\n [plugins.\"io.containerd.grpc.v1.cri\"]\r\n sandbox_image = \"k8s.gcr.io/pause:3.6\"\r\n\r\n [plugins.\"io.containerd.grpc.v1.cri\".containerd]\r\n default_runtime_name = \"runc\"\r\n\r\n [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes]\r\n\r\n [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia]\r\n privileged_without_host_devices = false\r\n runtime_engine = \"\"\r\n runtime_root = \"\"\r\n runtime_type = \"io.containerd.runc.v1\"\r\n\r\n [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia.options]\r\n BinaryName = \"/usr/bin/nvidia-container-runtime\"\r\n SystemdCgroup = true\r\n\r\n [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runc]\r\n runtime_type = \"io.containerd.runc.v2\"\r\n\r\n [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runc.options]\r\n SystemdCgroup = true\r\n```\r\nInorder to be able to set this dynamically in the KubeFlow Pipeline, we need a method similar to `add_toleration` method [here](https://kubeflow-pipelines.readthedocs.io/en/stable/source/kfp.dsl.html)", "@chensun @jlyaoyuli Is the information I provided relevant/sufficient for you guys to proceed or do you need more clarification? Let me know. Thanks.", "@chensun , @jlyaoyuli , @jayanthp-intellect \r\nPlease Find the solution needed : \r\n\r\n[Here](https://kubeflow-pipelines.readthedocs.io/en/1.8.13/_modules/kfp/dsl/_container_op.html#ContainerOp) with kfp sdk we are adding toleration in pod specification similarly we want to add runtime class request in the pod using kfp. \r\n```\r\nclass BaseOp(object):\r\n\r\n def add_toleration(self, tolerations: V1Toleration):\r\n Add K8s tolerations.\r\n\r\n Args:\r\n tolerations: Kubernetes toleration For detailed spec, check toleration\r\n definition\r\n https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_toleration.py\r\n\r\n self.tolerations.append(tolerations)\r\n return self\r\n``` \r\n\r\nWe want this method to be implemented so , we can provide runtime class request directly. \r\nThis mtd is not present yet , I have created to make it clear what we want. \r\n```\r\nclass BaseOp(object):\r\n\r\n def add_runtime_class(self, runtime_class: V1beta1RuntimeClass):\r\n self.runtime_class.append(runtime_class)\r\n return self\r\n\r\n```\r\n\r\nSo once you generate the podspec it should have runtimeclass present in it !\r\n\r\n```\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: mypod\r\nspec:\r\n runtimeClassName: myclass\r\n```\r\n\r\n\r\n\r\n\r\n\r\n", "I am talking about this feature: \r\nhttps://github.com/argoproj/argo-workflows/issues/7519", "@surajkota ", "kOps will also install a RuntimeClass nvidia. As the nvidia runtime is not the default runtime, you will need to add runtimeClassName: nvidia to any Pod spec you want to use for GPU workloads. The RuntimeClass also configures the appropriate node selectors and tolerations to run on GPU Nodes. \r\n\r\nIn kubeflow we cant specify runtime class for pod specification: \r\n\r\n\r\n" ]
"2022-07-20T11:45:08"
"2023-04-12T10:33:59"
null
NONE
null
### Feature Area /area backend /area sdk ### What feature would you like to see? Would like to have a separate method in kfp py package to add Runtime class (https://kubernetes.io/docs/concepts/containers/runtime-class/) to KF Pipeline Config and/or KFP Components. This is already supported in kubernetes py package (https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1RuntimeClass.md) ### What is the use case or pain point? The latest upgrade to kops 1.23.x is altering the default runtime in containerd to **runc** instead of **nvidia**. This is forcing us to use the Runtime Class name to choose **nvidia** runtime for GPU workloads. ### Is there a workaround currently? We manually override the default runtime set in the Containerd to nvidia and recycle the Containerd service (using some script customisation in the Kops Instance Group config) --- Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8052/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8052/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8051
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8051/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8051/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8051/events
https://github.com/kubeflow/pipelines/issues/8051
1,310,444,964
I_kwDOB-71UM5OG82k
8,051
Not able to use Kubeflow PyTorchJob launcher as a Kubeflow Pipeline component - Always waiting
{ "login": "kanwaljitkhurmi", "id": 8376568, "node_id": "MDQ6VXNlcjgzNzY1Njg=", "avatar_url": "https://avatars.githubusercontent.com/u/8376568?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kanwaljitkhurmi", "html_url": "https://github.com/kanwaljitkhurmi", "followers_url": "https://api.github.com/users/kanwaljitkhurmi/followers", "following_url": "https://api.github.com/users/kanwaljitkhurmi/following{/other_user}", "gists_url": "https://api.github.com/users/kanwaljitkhurmi/gists{/gist_id}", "starred_url": "https://api.github.com/users/kanwaljitkhurmi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kanwaljitkhurmi/subscriptions", "organizations_url": "https://api.github.com/users/kanwaljitkhurmi/orgs", "repos_url": "https://api.github.com/users/kanwaljitkhurmi/repos", "events_url": "https://api.github.com/users/kanwaljitkhurmi/events{/privacy}", "received_events_url": "https://api.github.com/users/kanwaljitkhurmi/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "anyone else faced this issue ?", "/cc @jagadeeshi2i Would you like to help with this issue? Thank you!", "@kanwaljitkhurmi did the launcher start worker and master pods ? Can you share the logs or describe the pod.\r\n\r\nI could launch pytorch job for the example - https://github.com/kubeflow/pipelines/blob/master/samples/contrib/pytorch-samples/Pipeline-Bert-Dist.ipynb\r\n\r\n![image](https://user-images.githubusercontent.com/46392704/184810125-7ba04cce-b5b1-45ef-a75a-17d404e53974.png)\r\n" ]
"2022-07-20T05:08:37"
"2022-08-16T06:13:16"
null
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? As part of the Kubeflow Manifest 1.4 * KFP version: * KFP SDK version: 1.8.4 ### Steps to reproduce Trying to use Kubeflow PyTorchJob launcher component in the kubeflow pipeline ,however the pipeline component endlessly waits at the main thread with the following logs and does not proceed further with creation of main and worker pods. ``` Generating job template. Creating launcher client. Submitting CR. Creating kubeflow.org/pytorchjobs pytorch-cnn-dist-file-c3 in namespace kubeflow-user-example-com. Created kubeflow.org/pytorchjobs pytorch-cnn-dist-file-c3 in namespace kubeflow-user-example-com. Monitoring job until status is any of ['Succeeded', 'Failed']. ``` Code: ``` !pip install kfp==1.8.4 pytorch_job_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/pytorch-launcher/component.yaml') @dsl.pipeline(name="PyTorch Training pipeline", description="Sample training job test") def pytorch_cnn_n_b_yaml( namespace=kanwal_namespace ): train_task = pytorch_job_op( name='pytorch-cnn-dist-file-c3', namespace='kubeflow-user-example-com', master_spec='{ \ "replicas": 1, \ "restartPolicy": "OnFailure", \ "template": { \ "metadata": { \ "annotations": { \ "sidecar.istio.io/inject": "false" \ } \ }, \ "spec": { \ "containers": [ \ { \ "name": "pytorch1", \ "image": "763104351884.dkr.ecr.us-west-2.amazonaws.com/pytorch-training:1.12.0-gpu-py38-cu116-ubuntu20.04-e3", \ "args": [ \ "python", \ "./sc-claim-dlc/mnist.py", \ "--epochs", "5", \ "--seed", "7", \ "--log-interval", "60" \ ], \ "resources": { \ "limits": { \ "nvidia.com/gpu": 2 \ } \ }, \ "volumeMounts": [ \ { \ "mountPath": "/sc-claim-dlc", \ "name": "sc-claim-dlc" \ } \ ] \ } \ ], \ "volumes": [ \ { \ "name": "sc-claim-dlc", \ "persistentVolumeClaim": { \ "claimName": "sc-claim-dlc" \ } \ } \ ] \ } \ } \ }', worker_spec='{ \ "replicas": 1, \ "restartPolicy": "OnFailure", \ "template": { \ "metadata": { \ "annotations": { \ "sidecar.istio.io/inject": "false" \ } \ }, \ "spec": { \ "containers": [ \ { \ "name": "pytorch2", \ "image": "763104351884.dkr.ecr.us-west-2.amazonaws.com/pytorch-training:1.12.0-gpu-py38-cu116-ubuntu20.04-e3", \ "args": [ \ "python", \ "./sc-claim-dlc/mnist.py", \ "--epochs", "5", \ "--seed", "7", \ "--log-interval", "60" \ ], \ "resources": { \ "limits": { \ "nvidia.com/gpu": 1 \ } \ }, \ "volumeMounts": [ \ { \ "mountPath": "/sc-claim-dlc", \ "name": "sc-claim-dlc" \ } \ ] \ } \ ], \ "volumes": [ \ { \ "name": "sc-claim-dlc", \ "persistentVolumeClaim": { \ "claimName": "sc-claim-dlc" \ } \ } \ ] \ } \ } \ }', delete_after_done=False ) ``` Can you help ? ### Expected result <!-- What should the correct behavior be? --> ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8051/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8051/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8038
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8038/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8038/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8038/events
https://github.com/kubeflow/pipelines/issues/8038
1,306,982,279
I_kwDOB-71UM5N5veH
8,038
Unable to set memory for a Kubeflow component using yaml
{ "login": "patrickmead", "id": 859673, "node_id": "MDQ6VXNlcjg1OTY3Mw==", "avatar_url": "https://avatars.githubusercontent.com/u/859673?v=4", "gravatar_id": "", "url": "https://api.github.com/users/patrickmead", "html_url": "https://github.com/patrickmead", "followers_url": "https://api.github.com/users/patrickmead/followers", "following_url": "https://api.github.com/users/patrickmead/following{/other_user}", "gists_url": "https://api.github.com/users/patrickmead/gists{/gist_id}", "starred_url": "https://api.github.com/users/patrickmead/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickmead/subscriptions", "organizations_url": "https://api.github.com/users/patrickmead/orgs", "repos_url": "https://api.github.com/users/patrickmead/repos", "events_url": "https://api.github.com/users/patrickmead/events{/privacy}", "received_events_url": "https://api.github.com/users/patrickmead/received_events", "type": "User", "site_admin": false }
[]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "In the interim, it appears that I can set the memory in the pipeline json.", "/assign @chensun ", "Hi @patrickmead, are you using KFP SDK v1.8.*? \r\nYou can still set memory limit for components load from file. Example:\r\nhttps://github.com/kubeflow/pipelines/blob/d5bc8ddd6250d90b38cff5759e856f73e71e7d03/sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_resource_spec.py#L25-L44\r\n\r\n`set_memory_request` is not available/applicable for Vertex AI though. \r\n" ]
"2022-07-17T04:14:45"
"2022-07-28T23:09:37"
null
NONE
null
I'm using yaml for my Kubeflow components which run on Vertex AI. I know that I have alternatives for creating components such as the python SDK, but I would like to use yaml directly if possible (rather than generating it). I see methods like set_memory_limit and set_memory_request for the ContainerOp but when I use methods such as load_component_from_file I have a TaskSpec. Is there a way to set memory and other resources via the Kubeflow component yaml? Thank you.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8038/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8038/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8034
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8034/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8034/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8034/events
https://github.com/kubeflow/pipelines/issues/8034
1,305,520,428
I_kwDOB-71UM5N0Kks
8,034
[frontend] Couldn't get Logs, Pod, and Events of any pipeline components in Runs
{ "login": "wyljpn", "id": 37242439, "node_id": "MDQ6VXNlcjM3MjQyNDM5", "avatar_url": "https://avatars.githubusercontent.com/u/37242439?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wyljpn", "html_url": "https://github.com/wyljpn", "followers_url": "https://api.github.com/users/wyljpn/followers", "following_url": "https://api.github.com/users/wyljpn/following{/other_user}", "gists_url": "https://api.github.com/users/wyljpn/gists{/gist_id}", "starred_url": "https://api.github.com/users/wyljpn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wyljpn/subscriptions", "organizations_url": "https://api.github.com/users/wyljpn/orgs", "repos_url": "https://api.github.com/users/wyljpn/repos", "events_url": "https://api.github.com/users/wyljpn/events{/privacy}", "received_events_url": "https://api.github.com/users/wyljpn/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "There is a similar issue https://github.com/kubeflow/kubeflow/issues/5605 \r\nAfter updating kubernetes-client, it works in his case, but for me, even though I upgraded it to 0.11.2 which supports IPv6, it didn't work.", "Hi @vsk2015 , do you have any idea about this issue?", "Hi @ConverJens , do you have any idea about this issue?", "logs of metadata-writer pod, looks fine\r\n```\r\nConnected to the metadata store\r\nStart watching Kubernetes Pods created by Argo\r\nStart watching Kubernetes Pods created by Argo\r\nStart watching Kubernetes Pods created by Argo\r\nKubernetes Pod event: ADDED demo-07-fbhp2-3334604618 1424528299\r\nKubernetes Pod event: MODIFIED demo-07-fbhp2-3334604618 1424528305\r\nKubernetes Pod event: ADDED demo-07-fbhp2-892013884 1424528307\r\nKubernetes Pod event: MODIFIED demo-07-fbhp2-3334604618 1424528308\r\nKubernetes Pod event: MODIFIED demo-07-fbhp2-892013884 1424528311\r\nKubernetes Pod event: MODIFIED demo-07-fbhp2-892013884 1424528316\r\nKubernetes Pod event: MODIFIED demo-07-fbhp2-892013884 1424528396\r\n...\r\n```", "logs of **metadata-grpc**. It has some errors. The errors happen when running a pipeline. \r\nNot sure whether it's related to this issue. \r\nAnd when checking the logs, pod, and event in components of a pipeline, it print \"GET request without QUERY\"\r\n\r\n**Server listening on 0.0.0.0:8080**\r\nDo I need to change *0.0.0.0* to \"[::]\"? If it is necessary, where I can change it?\r\n\r\n```\r\n2022-07-15 06:59:18.056924: I ml_metadata/metadata_store/metadata_store_server_main.cc:241] Server listening on 0.0.0.0:8080\r\nE0715 06:59:18.630467747 10 http_server_filter.cc:271] GET request without QUERY\r\nE0715 06:59:28.629603359 10 http_server_filter.cc:271] GET request without QUERY\r\nE0715 06:59:38.629599937 10 http_server_filter.cc:271] GET request without QUERY\r\n2022-07-15 07:02:37.560926: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 2\r\nname: \"demo-0715\"\r\nproperties {\r\nkey: \"pipeline_name\"\r\nvalue {\r\nstring_value: \"demo-0715\"\r\n}\r\n}\r\nInternal: mysql_query failed: errno: 1062, error: Duplicate entry '2-demo-0715' for key 'type_id'\r\n2022-07-15 07:02:37.685622: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 3\r\nname: \"demo-0715.demo-0715-qrqqs\"\r\nproperties {\r\nkey: \"pipeline_name\"\r\nvalue {\r\nstring_value: \"demo-0715\"\r\n}\r\n}\r\nproperties {\r\nkey: \"run_id\"\r\nvalue {\r\nstring_value: \"demo-0715-qrqqs\"\r\n}\r\n}\r\nInternal: mysql_query failed: errno: 1062, error: Duplicate entry '3-demo-0715.demo-0715-qrqqs' for key 'type_id'\r\nE0715 07:03:58.629671167 16 http_server_filter.cc:271] GET request without QUERY\r\n2022-07-15 07:04:08.099095: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 2\r\nname: \"demo-0715\"\r\nproperties {\r\nkey: \"pipeline_name\"\r\nvalue {\r\nstring_value: \"demo-0715\"\r\n}\r\n}\r\nInternal: mysql_query failed: errno: 1062, error: Duplicate entry '2-demo-0715' for key 'type_id'\r\n2022-07-15 07:04:08.183685: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 3\r\nname: \"demo-0715.demo-0715-qrqqs\"\r\nproperties {\r\nkey: \"pipeline_name\"\r\nvalue {\r\nstring_value: \"demo-0715\"\r\n}\r\n}\r\nproperties {\r\nkey: \"run_id\"\r\nvalue {\r\nstring_value: \"demo-0715-qrqqs\"\r\n}\r\n}\r\nInternal: mysql_query failed: errno: 1062, error: Duplicate entry '3-demo-0715.demo-0715-qrqqs' for key 'type_id'\r\n```", "Begining part of ml-pipeline-ui logs\r\n```\r\n{\r\n argo: {\r\n archiveArtifactory: 'minio',\r\n archiveBucketName: 'mlpipeline',\r\n archiveLogs: false,\r\n archivePrefix: 'logs'\r\n },\r\n artifacts: 'Artifacts config contains credentials, so it is omitted',\r\n metadata: {\r\n envoyService: { host: '[ipv6 address]', port: '9090' }\r\n },\r\n pipeline: { host: '[ipv6 address]', port: '8888' },\r\n server: {\r\n apiVersionPrefix: 'apis/v1beta1',\r\n basePath: '/pipeline',\r\n deployment: 'KUBEFLOW',\r\n hideSideNav: true,\r\n port: 3000,\r\n staticDir: '/client'\r\n },\r\n viewer: {\r\n tensorboard: { podTemplateSpec: [Object], tfImageName: 'tensorflow/tensorflow' }\r\n },\r\n visualizations: { allowCustomVisualizations: true },\r\n gkeMetadata: { disabled: false },\r\n auth: {\r\n enabled: true,\r\n kubeflowUserIdHeader: 'kubeflow-userid',\r\n kubeflowUserIdPrefix: ''\r\n }\r\n}\r\n[HPM] Proxy created: [Function] -> /artifacts\r\n[HPM] Proxy created: / -> http://[ipv6 ip address]:9090\r\n[HPM] Proxy created: / -> http://127.0.0.1\r\n[HPM] Subscribed to http-proxy events: [ 'error', 'close' ]\r\n[HPM] Proxy created: / -> http://127.0.0.1\r\n[HPM] Subscribed to http-proxy events: [ 'error', 'close' ]\r\n[HPM] Proxy created: / -> http://[ipv6 ip address]:8888\r\n[HPM] Subscribed to http-proxy events: [ 'proxyReq', 'error', 'close' ]\r\n[HPM] Proxy created: / -> http://[ipv6 ip address]:8888\r\n[HPM] Subscribed to http-proxy events: [ 'proxyReq', 'error', 'close' ]\r\nServer listening at http://localhost:3000\r\nGET /apis/v1beta1/healthz\r\n```", "logs of metadata-envoy-deployment:\r\n**admin address: 0.0.0.0:9901**\r\n```\r\n[2022-04-27 17:18:01.576][1][info][main] [source/server/server.cc:249] initializing epoch 0 (hot restart version=11.104)\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:251] statically linked extensions:\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:253] access_loggers: envoy.file_access_log,envoy.http_grpc_access_log,envoy.tcp_grpc_access_log\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:256] filters.http: envoy.buffer,envoy.cors,envoy.csrf,envoy.ext_authz,envoy.fault,envoy.filters.http.adaptive_concurrency,envoy.filters.http.dynamic_forward_proxy,envoy.filters.http.grpc_http1_reverse_bridge,envoy.filters.http.grpc_stats,envoy.filters.http.header_to_metadata,envoy.filters.http.jwt_authn,envoy.filters.http.original_src,envoy.filters.http.rbac,envoy.filters.http.tap,envoy.grpc_http1_bridge,envoy.grpc_json_transcoder,envoy.grpc_web,envoy.gzip,envoy.health_check,envoy.http_dynamo_filter,envoy.ip_tagging,envoy.lua,envoy.rate_limit,envoy.router,envoy.squash\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:259] filters.listener: envoy.listener.http_inspector,envoy.listener.original_dst,envoy.listener.original_src,envoy.listener.proxy_protocol,envoy.listener.tls_inspector\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:262] filters.network: envoy.client_ssl_auth,envoy.echo,envoy.ext_authz,envoy.filters.network.dubbo_proxy,envoy.filters.network.mysql_proxy,envoy.filters.network.rbac,envoy.filters.network.sni_cluster,envoy.filters.network.thrift_proxy,envoy.filters.network.zookeeper_proxy,envoy.http_connection_manager,envoy.mongo_proxy,envoy.ratelimit,envoy.redis_proxy,envoy.tcp_proxy\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:264] stat_sinks: envoy.dog_statsd,envoy.metrics_service,envoy.stat_sinks.hystrix,envoy.statsd\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:266] tracers: envoy.dynamic.ot,envoy.lightstep,envoy.tracers.datadog,envoy.tracers.opencensus,envoy.tracers.xray,envoy.zipkin\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:269] transport_sockets.downstream: envoy.transport_sockets.alts,envoy.transport_sockets.raw_buffer,envoy.transport_sockets.tap,envoy.transport_sockets.tls,raw_buffer,tls\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:272] transport_sockets.upstream: envoy.transport_sockets.alts,envoy.transport_sockets.raw_buffer,envoy.transport_sockets.tap,envoy.transport_sockets.tls,raw_buffer,tls\r\n[2022-04-27 17:18:01.577][1][info][main] [source/server/server.cc:278] buffer implementation: new\r\n[2022-04-27 17:18:01.581][1][warning][misc] [source/common/protobuf/utility.cc:282] Using deprecated option 'envoy.api.v2.listener.Filter.config' from file listener.proto. This configuration will be removed from Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated for details.\r\n[2022-04-27 17:18:01.583][1][info][main] [source/server/server.cc:344] admin address: 0.0.0.0:9901\r\n[2022-04-27 17:18:01.688][1][info][main] [source/server/server.cc:458] runtime: layers:\r\n- name: base\r\nstatic_layer:\r\n{}\r\n- name: admin\r\nadmin_layer:\r\n{}\r\n[2022-04-27 17:18:01.688][1][info][config] [source/server/configuration_impl.cc:62] loading 0 static secret(s)\r\n[2022-04-27 17:18:01.688][1][info][config] [source/server/configuration_impl.cc:68] loading 1 cluster(s)\r\n[2022-04-27 17:18:01.691][1][info][config] [source/server/configuration_impl.cc:72] loading 1 listener(s)\r\n[2022-04-27 17:18:01.693][1][warning][misc] [source/common/protobuf/utility.cc:282] Using deprecated option 'envoy.api.v2.route.CorsPolicy.allow_origin' from file route.proto. This configuration will be removed from Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated for details.\r\n[2022-04-27 17:18:01.695][1][info][config] [source/server/configuration_impl.cc:97] loading tracing configuration\r\n[2022-04-27 17:18:01.695][1][info][config] [source/server/configuration_impl.cc:117] loading stats sink configuration\r\n[2022-04-27 17:18:01.695][1][info][main] [source/server/server.cc:549] starting main dispatch loop\r\n[2022-04-27 17:18:01.696][1][info][upstream] [source/common/upstream/cluster_manager_impl.cc:161] cm init: all clusters initialized\r\n[2022-04-27 17:18:01.696][1][info][main] [source/server/server.cc:528] all clusters initialized. initializing init manager\r\n[2022-04-27 17:18:01.696][1][info][config] [source/server/listener_manager_impl.cc:578] all dependencies initialized. starting workers\r\n[2022-04-27 17:33:01.695][1][info][main] [source/server/drain_manager_impl.cc:63] shutting down parent after drain\r\n```", "> logs of **metadata-grpc**. It has some errors. The errors happen when running a pipeline. Not sure whether it's related to this issue. And when checking the logs, pod, and event in components of a pipeline, it print \"GET request without QUERY\"\r\n> \r\n> **Server listening on 0.0.0.0:8080** Do I need to change _0.0.0.0_ to \"[::]\"? If it is necessary, where I can change it?\r\n> \r\n> ```\r\n> 2022-07-15 06:59:18.056924: I ml_metadata/metadata_store/metadata_store_server_main.cc:241] Server listening on 0.0.0.0:8080\r\n> E0715 06:59:18.630467747 10 http_server_filter.cc:271] GET request without QUERY\r\n> E0715 06:59:28.629603359 10 http_server_filter.cc:271] GET request without QUERY\r\n> E0715 06:59:38.629599937 10 http_server_filter.cc:271] GET request without QUERY\r\n> 2022-07-15 07:02:37.560926: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 2\r\n> name: \"demo-0715\"\r\n> properties {\r\n> key: \"pipeline_name\"\r\n> value {\r\n> string_value: \"demo-0715\"\r\n> }\r\n> }\r\n> Internal: mysql_query failed: errno: 1062, error: Duplicate entry '2-demo-0715' for key 'type_id'\r\n> 2022-07-15 07:02:37.685622: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 3\r\n> name: \"demo-0715.demo-0715-qrqqs\"\r\n> properties {\r\n> key: \"pipeline_name\"\r\n> value {\r\n> string_value: \"demo-0715\"\r\n> }\r\n> }\r\n> properties {\r\n> key: \"run_id\"\r\n> value {\r\n> string_value: \"demo-0715-qrqqs\"\r\n> }\r\n> }\r\n> Internal: mysql_query failed: errno: 1062, error: Duplicate entry '3-demo-0715.demo-0715-qrqqs' for key 'type_id'\r\n> E0715 07:03:58.629671167 16 http_server_filter.cc:271] GET request without QUERY\r\n> 2022-07-15 07:04:08.099095: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 2\r\n> name: \"demo-0715\"\r\n> properties {\r\n> key: \"pipeline_name\"\r\n> value {\r\n> string_value: \"demo-0715\"\r\n> }\r\n> }\r\n> Internal: mysql_query failed: errno: 1062, error: Duplicate entry '2-demo-0715' for key 'type_id'\r\n> 2022-07-15 07:04:08.183685: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 3\r\n> name: \"demo-0715.demo-0715-qrqqs\"\r\n> properties {\r\n> key: \"pipeline_name\"\r\n> value {\r\n> string_value: \"demo-0715\"\r\n> }\r\n> }\r\n> properties {\r\n> key: \"run_id\"\r\n> value {\r\n> string_value: \"demo-0715-qrqqs\"\r\n> }\r\n> }\r\n> Internal: mysql_query failed: errno: 1062, error: Duplicate entry '3-demo-0715.demo-0715-qrqqs' for key 'type_id'\r\n> ```\r\n\r\nChanged metadata-grpc\r\n```\r\nconst string server_address = absl::StrCat(\"0.0.0.0:\", FLAGS_grpc_port); \r\n```\r\nin https://github.com/google/ml-metadata/blob/v0.25.1/ml_metadata/metadata_store/metadata_store_server_main.cc#L228 to\r\n```\r\nconst string server_address = absl::StrCat(\"[::]:\", FLAGS_grpc_port);\r\n```\r\nNow server listening on [::]:8080.\r\n\r\n```\r\n2022-07-19 10:06:19.401711: I ml_metadata/metadata_store/metadata_store_server_main.cc:242] Server listening on [::]:8080\r\nE0719 10:06:24.795761300 10 http_server_filter.cc:271] GET request without QUERY\r\nE0719 10:23:44.770060179 10 http_server_filter.cc:271] GET request without QUERY\r\n2022-07-19 10:23:48.265681: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 2\r\nname: \"demo-0719\"\r\nproperties {\r\n key: \"pipeline_name\"\r\n value {\r\n string_value: \"demo-0719\"\r\n }\r\n}\r\nInternal: mysql_query failed: errno: 1062, error: Duplicate entry '2-demo-0719' for key 'type_id'\r\n2022-07-19 10:23:48.281169: W ml_metadata/metadata_store/metadata_store_service_impl.cc:602] PutContexts failed: Given node already exists: type_id: 2\r\nname: \"demo-0719\"\r\nproperties {\r\n key: \"pipeline_name\"\r\n value {\r\n string_value: \"demo-0719\"\r\n }\r\n}\r\n```\r\n\r\nAnd changed metadata-envoy-deployment\r\n```\r\nsocket_address: { address: 0.0.0.0, port_value: 9901 }\r\nsocket_address: { address: 0.0.0.0, port_value: 9090 }\r\n```\r\nin https://github.com/kubeflow/pipelines/blob/1.5.1/third_party/metadata_envoy/envoy.yaml to \r\n```\r\nsocket_address: { address: [::], port_value: 9901 }\r\nsocket_address: { address: [::], port_value: 9090 }\r\n```\r\nlogs\r\n```\r\n[2022-07-20 01:36:07.486][1][info][main] [source/server/server.cc:249] initializing epoch 0 (hot restart version=11.104)\r\n[2022-07-20 01:36:07.486][1][info][main] [source/server/server.cc:251] statically linked extensions:\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:253] access_loggers: envoy.file_access_log,envoy.http_grpc_access_log,envoy.tcp_grpc_access_log\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:256] filters.http: envoy.buffer,envoy.cors,envoy.csrf,envoy.ext_authz,envoy.fault,envoy.filters.http.adaptive_concurrency,envoy.filters.http.dynamic_forward_proxy,envoy.filters.http.grpc_http1_reverse_bridge,envoy.filters.http.grpc_stats,envoy.filters.http.header_to_metadata,envoy.filters.http.jwt_authn,envoy.filters.http.original_src,envoy.filters.http.rbac,envoy.filters.http.tap,envoy.grpc_http1_bridge,envoy.grpc_json_transcoder,envoy.grpc_web,envoy.gzip,envoy.health_check,envoy.http_dynamo_filter,envoy.ip_tagging,envoy.lua,envoy.rate_limit,envoy.router,envoy.squash\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:259] filters.listener: envoy.listener.http_inspector,envoy.listener.original_dst,envoy.listener.original_src,envoy.listener.proxy_protocol,envoy.listener.tls_inspector\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:262] filters.network: envoy.client_ssl_auth,envoy.echo,envoy.ext_authz,envoy.filters.network.dubbo_proxy,envoy.filters.network.mysql_proxy,envoy.filters.network.rbac,envoy.filters.network.sni_cluster,envoy.filters.network.thrift_proxy,envoy.filters.network.zookeeper_proxy,envoy.http_connection_manager,envoy.mongo_proxy,envoy.ratelimit,envoy.redis_proxy,envoy.tcp_proxy\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:264] stat_sinks: envoy.dog_statsd,envoy.metrics_service,envoy.stat_sinks.hystrix,envoy.statsd\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:266] tracers: envoy.dynamic.ot,envoy.lightstep,envoy.tracers.datadog,envoy.tracers.opencensus,envoy.tracers.xray,envoy.zipkin\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:269] transport_sockets.downstream: envoy.transport_sockets.alts,envoy.transport_sockets.raw_buffer,envoy.transport_sockets.tap,envoy.transport_sockets.tls,raw_buffer,tls\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:272] transport_sockets.upstream: envoy.transport_sockets.alts,envoy.transport_sockets.raw_buffer,envoy.transport_sockets.tap,envoy.transport_sockets.tls,raw_buffer,tls\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:278] buffer implementation: new\r\n```\r\nIt didn't print information like *admin address 0.0.0.0:9901* as before. \r\n```\r\n[2022-04-27 17:18:01.581][1][warning][misc] [source/common/protobuf/utility.cc:282] Using deprecated option 'envoy.api.v2.listener.Filter.config' from file listener.proto. This configuration will be removed from Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated for details.\r\n[2022-04-27 17:18:01.583][1][info][main] [source/server/server.cc:344] admin address: 0.0.0.0:9901\r\n[2022-04-27 17:18:01.688][1][info][main] [source/server/server.cc:458] runtime: layers:\r\n- name: base\r\nstatic_layer:\r\n{}\r\n- name: admin\r\nadmin_layer:\r\n{}\r\n[2022-04-27 17:18:01.688][1][info][config] [source/server/configuration_impl.cc:62] loading 0 static secret(s)\r\n[2022-04-27 17:18:01.688][1][info][config] [source/server/configuration_impl.cc:68] loading 1 cluster(s)\r\n[2022-04-27 17:18:01.691][1][info][config] [source/server/configuration_impl.cc:72] loading 1 listener(s)\r\n[2022-04-27 17:18:01.693][1][warning][misc] [source/common/protobuf/utility.cc:282] Using deprecated option 'envoy.api.v2.route.CorsPolicy.allow_origin' from file route.proto. This configuration will be removed from Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated for details.\r\n[2022-04-27 17:18:01.695][1][info][config] [source/server/configuration_impl.cc:97] loading tracing configuration\r\n[2022-04-27 17:18:01.695][1][info][config] [source/server/configuration_impl.cc:117] loading stats sink configuration\r\n[2022-04-27 17:18:01.695][1][info][main] [source/server/server.cc:549] starting main dispatch loop\r\n[2022-04-27 17:18:01.696][1][info][upstream] [source/common/upstream/cluster_manager_impl.cc:161] cm init: all clusters initialized\r\n[2022-04-27 17:18:01.696][1][info][main] [source/server/server.cc:528] all clusters initialized. initializing init manager\r\n[2022-04-27 17:18:01.696][1][info][config] [source/server/listener_manager_impl.cc:578] all dependencies initialized. starting workers\r\n[2022-04-27 17:33:01.695][1][info][main] [source/server/drain_manager_impl.cc:63] shutting down parent after drain\r\n```\r\nIt stuck at \r\n```\r\n[2022-07-20 01:36:07.487][1][info][main] [source/server/server.cc:278] buffer implementation: new.\r\n```\r\n\r\nAfter changing them, still couldn't get logs, pod, and events...\r\n\r\n\r\nAnd the same issue there.", "I found it is a **kubernetes-client** issue.\r\nhttps://github.com/kubeflow/pipelines/blob/1.5.1/frontend/server/k8s-helper.ts#L57\r\n\r\nEven I upgrade kubernetes-client to 0.15.0, although basePath is correct kubernetes_service_host, _basePath: https://[240b:ipv6:8001]:443', \r\n```\r\n***************KubeConfig: KubeConfig {\r\n contexts: [\r\n {\r\n cluster: 'inCluster',\r\n name: 'inClusterContext',\r\n user: 'inClusterUser'\r\n }\r\n ],\r\n clusters: [\r\n {\r\n name: 'inCluster',\r\n caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',\r\n server: 'https://[240b:ipv6::8001]:443',\r\n skipTLSVerify: false\r\n }\r\n ],\r\n users: [ { name: 'inClusterUser', authProvider: [Object] } ],\r\n currentContext: 'inClusterContext'\r\n}\r\n***************clusters: [\r\n {\r\n name: 'inCluster',\r\n caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',\r\n server: 'https://[240b:ipv6::8001]:443',\r\n skipTLSVerify: false\r\n }\r\n]\r\n***************users: [\r\n {\r\n name: 'inClusterUser',\r\n authProvider: { name: 'tokenFile', config: [Object] }\r\n }\r\n]\r\n***************contexts: [\r\n {\r\n cluster: 'inCluster',\r\n name: 'inClusterContext',\r\n user: 'inClusterUser'\r\n }\r\n]\r\n***************currentContext: inClusterContext\r\n***************k8sV1Client: CoreV1Api {\r\n _basePath: 'https://[240b:ipv6::8001]:443',\r\n _defaultHeaders: {},\r\n _useQuerystring: false,\r\n authentications: {\r\n default: KubeConfig {\r\n contexts: [Array],\r\n clusters: [Array],\r\n users: [Array],\r\n currentContext: 'inClusterContext'\r\n },\r\n BearerToken: ApiKeyAuth {\r\n location: 'header',\r\n paramName: 'authorization',\r\n apiKey: ''\r\n }\r\n },\r\n interceptors: []\r\n}\r\n``` \r\nThe error still there\r\n![image](https://user-images.githubusercontent.com/37242439/180197458-807394a5-1706-438c-8d2b-b14fdef83a78.png)\r\n", "The issue is caused by a bug in request. \r\n\r\n> As workaround adding a KUBERNETES_SERVICE_HOST=kubernetes.default.svc env parameter in the deployment config helped for us.\r\n\r\nhttps://github.com/kubernetes-client/javascript/issues/599\r\n" ]
"2022-07-15T03:11:38"
"2022-12-27T06:06:11"
"2022-07-21T11:25:17"
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Following https://github.com/kubeflow/manifests/tree/v1.3.1 * Versions: Kubeflow:1.3.1, Pipeline:1.5.1, "@kubernetes/client-node": "^0.11.2". Original version is "^0.8.2", tried to upgrade to "^0.11.2", but still the same errors. "portable-fetch": "^3.0.0", https://github.com/kubeflow/pipelines/blob/1.5.1/frontend/server/package.json#L16 "node-fetch":1.7.3, portable-fetch require node-fetch:1.7.3 that seems not support IPv6 BTW, using IPv6 in the cluster. I could see Pipelines, Experiment, and Runs list, but couldn't see Logs, Pod, and Events of any pipeline components in Runs. ### Steps to reproduce <img width="1439" alt="Screen Shot 2022-12-27 at 14 51 16" src="https://user-images.githubusercontent.com/37242439/209619418-2e4aebe2-ebb8-4a8a-8c96-aff826ec5ab8.png"> <img width="1418" alt="Screen Shot 2022-12-27 at 14 52 45" src="https://user-images.githubusercontent.com/37242439/209619426-3eab6885-f3d3-4e2c-a12e-58c2347d616f.png"> <img width="1495" alt="Screen Shot 2022-12-27 at 14 53 36" src="https://user-images.githubusercontent.com/37242439/209619432-79ca88b8-7ee5-47f8-b3ad-9fce91f91edc.png"> ### Expected result See Logs, Pod, and Events of pipeline components in Runs ### Materials and Reference npm list in ml-pipeline-ui pod ![image](https://user-images.githubusercontent.com/37242439/179141671-e9f261b4-95ae-44c1-9faa-f24fd456357d.png) Error logs of ml-pipeline-ui pod Hostname/IP does not match certificate's altnames: Host: **240b** ``` { argo: { archiveArtifactory: 'minio', archiveBucketName: 'mlpipeline', archiveLogs: false, archivePrefix: 'logs' }, artifacts: 'Artifacts config contains credentials, so it is omitted', metadata: { envoyService: { host: '[ipv6]', port: '9090' } }, pipeline: { host: '[ipv6]', port: '8888' }, server: { apiVersionPrefix: 'apis/v1beta1', basePath: '/pipeline', deployment: 'KUBEFLOW', hideSideNav: true, port: 3000, staticDir: '/client' }, viewer: { tensorboard: { podTemplateSpec: [Object], tfImageName: 'tensorflow/tensorflow' } }, visualizations: { allowCustomVisualizations: true }, gkeMetadata: { disabled: false }, auth: { enabled: true, kubeflowUserIdHeader: 'kubeflow-userid', kubeflowUserIdPrefix: '' } } [HPM] Proxy created: [Function] -> /artifacts [HPM] Proxy created: / -> http://[ipv6]:9090 [HPM] Proxy created: / -> http://127.0.0.1 [HPM] Subscribed to http-proxy events: [ 'error', 'close' ] [HPM] Proxy created: / -> http://127.0.0.1 [HPM] Subscribed to http-proxy events: [ 'error', 'close' ] [HPM] Proxy created: / -> http://[ipv6]:8888 [HPM] Subscribed to http-proxy events: [ 'proxyReq', 'error', 'close' ] [HPM] Proxy created: / -> http://[ipv6]:8888 [HPM] Subscribed to http-proxy events: [ 'proxyReq', 'error', 'close' ] Server listening at http://localhost:3000 GET /apis/v1beta1/healthz ... GET /pipeline/k8s/pod?podname=rflow-demo-07-qfjnq-3523444001&podnamespace=rflow-test Could not get pod demo-07-qfjnq-3523444001 in namespace test: Hostname/IP does not match certificate's altnames: Host: 240b. is not in the cert's altnames: DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:uhn7kls2drbms001.test.local, IP Address:240B:C0E0:202:5404:B4D4:2:0:8001, IP Address:240B:..., IP Address:240B:... Error [ERR_TLS_CERT_ALTNAME_INVALID]: Hostname/IP does not match certificate's altnames: Host: 240b. is not in the cert's altnames: DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:uhn7kls2drbms001.test.local, IP Address:240B:...:8001, IP Address:240B:..., IP Address:240B:... at Object.checkServerIdentity (tls.js:283:12) at TLSSocket.onConnectSecure (_tls_wrap.js:1331:27) at TLSSocket.emit (events.js:223:5) at TLSSocket._finishInit (_tls_wrap.js:794:8) at TLSWrap.ssl.onhandshakedone (_tls_wrap.js:608:12) { reason: "Host: 240b. is not in the cert's altnames: DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:uhn7kls2drbms001.test.local, IP Address:240B:...:8001, IP Address:240B:..., IP Address:240B:...", host: '240b', cert: { ... ``` config.ts https://github.com/kubeflow/pipelines/blob/1.5.1/frontend/server/configs.ts ``` const { staticDir, port } = parseArgs(argv); /** All configurable environment variables can be found here. */ const { /** minio client use these to retrieve minio objects/artifacts */ MINIO_ACCESS_KEY = 'minio', MINIO_SECRET_KEY = 'minio123', MINIO_PORT = '9000', MINIO_HOST = 'minio-service', MINIO_NAMESPACE = 'kubeflow', MINIO_SSL = 'false', /** minio client use these to retrieve s3 objects/artifacts */ AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, /** http/https base URL */ HTTP_BASE_URL = '', // tried to use '[::]', but make no difference. /** http/https fetch with this authorization header key (for example: 'Authorization') */ HTTP_AUTHORIZATION_KEY = '', /** http/https fetch with this authorization header value by default when absent in client request at above key */ HTTP_AUTHORIZATION_DEFAULT_VALUE = '', /** API service will listen to this host */ ML_PIPELINE_SERVICE_HOST = '[::]', /** API service will listen to this port */ ML_PIPELINE_SERVICE_PORT = '8888', /** path to viewer:tensorboard pod template spec */ VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH, /** Tensorflow image used for tensorboard viewer */ VIEWER_TENSORBOARD_TF_IMAGE_NAME = 'tensorflow/tensorflow', /** Whether custom visualizations are allowed to be generated by the frontend */ ALLOW_CUSTOM_VISUALIZATIONS = 'false', /** Envoy service will listen to this host */ METADATA_ENVOY_SERVICE_SERVICE_HOST = '[::]', /** Envoy service will listen to this port */ METADATA_ENVOY_SERVICE_SERVICE_PORT = '9090', /** Is Argo log archive enabled? */ ARGO_ARCHIVE_LOGS = 'false', /** Use minio or s3 client to retrieve archives. */ ARGO_ARCHIVE_ARTIFACTORY = 'minio', /** Bucket to retrive logs from */ ARGO_ARCHIVE_BUCKETNAME = 'mlpipeline', /** Prefix to logs. */ ARGO_ARCHIVE_PREFIX = 'logs', /** Should use server API for log streaming? */ STREAM_LOGS_FROM_SERVER_API = 'false', /** Disables GKE metadata endpoint. */ DISABLE_GKE_METADATA = 'true', /** Enable authorization checks for multi user mode. */ ENABLE_AUTHZ = 'false', /** Deployment type. */ DEPLOYMENT: DEPLOYMENT_STR = '', /** * Set to true to hide the SideNav. When DEPLOYMENT is KUBEFLOW, HIDE_SIDENAV * defaults to true if not explicitly set to false. */ HIDE_SIDENAV, /** * A header user requests have when authenticated. It carries user identity information. * The default value works with Google Cloud IAP. */ KUBEFLOW_USERID_HEADER = 'x-goog-authenticated-user-email', /** * KUBEFLOW_USERID_HEADER's value may have a prefix before user identity. * Use this header to specify what the prefix is. * * e.g. a valid header value for default values can be like `accounts.google.com:user@gmail.com`. */ KUBEFLOW_USERID_PREFIX = 'accounts.google.com:', } = env; ``` --- ``` kind: Deployment apiVersion: apps/v1 metadata: name: ml-pipeline-ui namespace: kubeflow labels: app: ml-pipeline-ui app.kubernetes.io/component: ml-pipeline app.kubernetes.io/instance: ml-pipeline app.kubernetes.io/name: kubeflow-pipelines application-crd-id: kubeflow-pipelines annotations: … managedFields: - manager: argocd-application-controller operation: Update apiVersion: apps/v1 time: '2022-04-13T06:18:49Z' fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:kubectl.kubernetes.io/last-applied-configuration: {} f:labels: .: {} f:app: {} f:app.kubernetes.io/component: {} f:app.kubernetes.io/instance: {} f:app.kubernetes.io/name: {} f:application-crd-id: {} f:spec: f:progressDeadlineSeconds: {} f:replicas: {} f:revisionHistoryLimit: {} f:selector: {} f:strategy: f:rollingUpdate: .: {} f:maxSurge: {} f:maxUnavailable: {} f:type: {} f:template: f:metadata: f:annotations: .: {} f:cluster-autoscaler.kubernetes.io/safe-to-evict: {} f:labels: .: {} f:app: {} f:app.kubernetes.io/component: {} f:app.kubernetes.io/name: {} f:application-crd-id: {} f:spec: f:containers: k:{"name":"ml-pipeline-ui"}: .: {} f:env: .: {} k:{"name":"ALLOW_CUSTOM_VISUALIZATIONS"}: .: {} f:name: {} f:value: {} k:{"name":"ARTIFACTS_SERVICE_PROXY_ENABLED"}: .: {} f:name: {} f:value: {} k:{"name":"ARTIFACTS_SERVICE_PROXY_NAME"}: .: {} f:name: {} f:value: {} k:{"name":"ARTIFACTS_SERVICE_PROXY_PORT"}: .: {} f:name: {} f:value: {} k:{"name":"DEPLOYMENT"}: .: {} f:name: {} f:value: {} k:{"name":"ENABLE_AUTHZ"}: .: {} f:name: {} f:value: {} k:{"name":"KUBEFLOW_USERID_HEADER"}: .: {} f:name: {} f:value: {} k:{"name":"KUBEFLOW_USERID_PREFIX"}: .: {} f:name: {} k:{"name":"MINIO_ACCESS_KEY"}: .: {} f:name: {} f:valueFrom: .: {} f:secretKeyRef: .: {} f:key: {} f:name: {} k:{"name":"MINIO_NAMESPACE"}: .: {} f:name: {} f:valueFrom: .: {} f:fieldRef: .: {} f:apiVersion: {} f:fieldPath: {} k:{"name":"MINIO_SECRET_KEY"}: .: {} f:name: {} f:valueFrom: .: {} f:secretKeyRef: .: {} f:key: {} f:name: {} k:{"name":"VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH"}: .: {} f:name: {} f:value: {} f:livenessProbe: .: {} f:exec: .: {} f:command: {} f:failureThreshold: {} f:initialDelaySeconds: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:name: {} f:ports: .: {} k:{"containerPort":3000,"protocol":"TCP"}: .: {} f:containerPort: {} f:protocol: {} f:readinessProbe: .: {} f:exec: .: {} f:command: {} f:failureThreshold: {} f:initialDelaySeconds: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/etc/config"}: .: {} f:mountPath: {} f:name: {} f:readOnly: {} f:dnsPolicy: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:serviceAccount: {} f:serviceAccountName: {} f:terminationGracePeriodSeconds: {} f:volumes: .: {} k:{"name":"config-volume"}: .: {} f:configMap: .: {} f:defaultMode: {} f:name: {} f:name: {} - manager: kubectl-edit operation: Update apiVersion: apps/v1 time: '2022-04-19T04:22:29Z' fieldsType: FieldsV1 fieldsV1: f:spec: f:template: f:spec: f:containers: k:{"name":"ml-pipeline-ui"}: f:env: k:{"name":"METADATA_ENVOY_SERVICE_SERVICE_HOST"}: .: {} f:name: {} f:value: {} k:{"name":"ML_PIPELINE_SERVICE_HOST"}: .: {} f:name: {} f:value: {} f:imagePullPolicy: {} - manager: kubectl-rollout operation: Update apiVersion: apps/v1 time: '2022-05-09T02:59:57Z' fieldsType: FieldsV1 fieldsV1: f:spec: f:template: f:metadata: f:annotations: f:kubectl.kubernetes.io/restartedAt: {} - manager: dashboard operation: Update apiVersion: apps/v1 time: '2022-07-15T01:59:57Z' fieldsType: FieldsV1 fieldsV1: f:spec: f:template: f:spec: f:containers: k:{"name":"ml-pipeline-ui"}: f:env: k:{"name":"HTTP_BASE_URL"}: .: {} f:name: {} f:value: {} f:image: {} - manager: kube-controller-manager operation: Update apiVersion: apps/v1 time: '2022-07-15T02:01:01Z' fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: f:deployment.kubernetes.io/revision: {} f:status: f:availableReplicas: {} f:conditions: .: {} k:{"type":"Available"}: .: {} f:lastTransitionTime: {} f:lastUpdateTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Progressing"}: .: {} f:lastTransitionTime: {} f:lastUpdateTime: {} f:message: {} f:reason: {} f:status: {} f:type: {} f:observedGeneration: {} f:readyReplicas: {} f:replicas: {} f:updatedReplicas: {} spec: replicas: 1 selector: matchLabels: app: ml-pipeline-ui app.kubernetes.io/component: ml-pipeline app.kubernetes.io/name: kubeflow-pipelines application-crd-id: kubeflow-pipelines template: metadata: creationTimestamp: null labels: app: ml-pipeline-ui app.kubernetes.io/component: ml-pipeline app.kubernetes.io/name: kubeflow-pipelines application-crd-id: kubeflow-pipelines annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' kubectl.kubernetes.io/restartedAt: '2022-07-13T06:30:53Z' spec: volumes: - name: config-volume configMap: name: ml-pipeline-ui-configmap defaultMode: 420 containers: - name: ml-pipeline-ui image: >- frontend:1.5.1-build10 ports: - containerPort: 3000 protocol: TCP env: - name: VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH value: /etc/config/viewer-pod-template.json - name: HTTP_BASE_URL value: abcd - name: ML_PIPELINE_SERVICE_HOST value: '[IPv6 address]' - name: METADATA_ENVOY_SERVICE_SERVICE_HOST value: '[IPv6 address]' - name: DEPLOYMENT value: KUBEFLOW - name: ARTIFACTS_SERVICE_PROXY_NAME value: ml-pipeline-ui-artifact - name: ARTIFACTS_SERVICE_PROXY_PORT value: '80' - name: ARTIFACTS_SERVICE_PROXY_ENABLED value: 'true' - name: ENABLE_AUTHZ value: 'true' - name: KUBEFLOW_USERID_HEADER value: kubeflow-userid - name: KUBEFLOW_USERID_PREFIX - name: MINIO_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: MINIO_ACCESS_KEY valueFrom: secretKeyRef: name: mlpipeline-minio-artifact key: accesskey - name: MINIO_SECRET_KEY valueFrom: secretKeyRef: name: mlpipeline-minio-artifact key: secretkey - name: ALLOW_CUSTOM_VISUALIZATIONS value: 'true' ``` <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8034/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8034/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8014
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8014/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8014/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8014/events
https://github.com/kubeflow/pipelines/issues/8014
1,302,410,636
I_kwDOB-71UM5NoTWM
8,014
[feature] Add primary key to DB tables
{ "login": "ca-scribner", "id": 48125859, "node_id": "MDQ6VXNlcjQ4MTI1ODU5", "avatar_url": "https://avatars.githubusercontent.com/u/48125859?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ca-scribner", "html_url": "https://github.com/ca-scribner", "followers_url": "https://api.github.com/users/ca-scribner/followers", "following_url": "https://api.github.com/users/ca-scribner/following{/other_user}", "gists_url": "https://api.github.com/users/ca-scribner/gists{/gist_id}", "starred_url": "https://api.github.com/users/ca-scribner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ca-scribner/subscriptions", "organizations_url": "https://api.github.com/users/ca-scribner/orgs", "repos_url": "https://api.github.com/users/ca-scribner/repos", "events_url": "https://api.github.com/users/ca-scribner/events{/privacy}", "received_events_url": "https://api.github.com/users/ca-scribner/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
{ "login": "Linchin", "id": 12806577, "node_id": "MDQ6VXNlcjEyODA2NTc3", "avatar_url": "https://avatars.githubusercontent.com/u/12806577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Linchin", "html_url": "https://github.com/Linchin", "followers_url": "https://api.github.com/users/Linchin/followers", "following_url": "https://api.github.com/users/Linchin/following{/other_user}", "gists_url": "https://api.github.com/users/Linchin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Linchin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Linchin/subscriptions", "organizations_url": "https://api.github.com/users/Linchin/orgs", "repos_url": "https://api.github.com/users/Linchin/repos", "events_url": "https://api.github.com/users/Linchin/events{/privacy}", "received_events_url": "https://api.github.com/users/Linchin/received_events", "type": "User", "site_admin": false }
[ { "login": "Linchin", "id": 12806577, "node_id": "MDQ6VXNlcjEyODA2NTc3", "avatar_url": "https://avatars.githubusercontent.com/u/12806577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Linchin", "html_url": "https://github.com/Linchin", "followers_url": "https://api.github.com/users/Linchin/followers", "following_url": "https://api.github.com/users/Linchin/following{/other_user}", "gists_url": "https://api.github.com/users/Linchin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Linchin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Linchin/subscriptions", "organizations_url": "https://api.github.com/users/Linchin/orgs", "repos_url": "https://api.github.com/users/Linchin/repos", "events_url": "https://api.github.com/users/Linchin/events{/privacy}", "received_events_url": "https://api.github.com/users/Linchin/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @Linchin ", "@ca-scribner Hi, we do create each DB with a column as primary key, see examples [here](https://github.com/kubeflow/pipelines/blob/061905b6df397c40fbcc4ffafa24d7b3b9daf439/backend/src/apiserver/model/run.go#L26) and [here](https://github.com/kubeflow/pipelines/blob/061905b6df397c40fbcc4ffafa24d7b3b9daf439/backend/src/apiserver/model/experiment.go#L4). But indeed, in the example DB you gave (DefaultExperiment) we didn't set a column to primary key. Did you mean you want to set a primary key for this specific table? \r\n\r\nAlso, I'm not sure if a primary key is necessary for DefaultExperiment table. Looks like the table has only one entry, which is the default experiment.", "Sorry, I'm working as a bit of a proxy here. @shayancanonical or @paulomach can you comment?", "\r\nHi @Linchin,\r\n\r\n> Did you mean you want to set a primary key for this specific table?\r\n\r\nYes, and for any other table without a PK. \r\n\r\n> Also, I'm not sure if a primary key is necessary for DefaultExperiment table. Looks like the table has only one entry, which is the default experiment.\r\n\r\nEven though we understand that the application does not need a PK to this table, in order to use MySQL with high availability (i.e. group replication plugin), it's required that *every* table have a PK.\r\n\r\nThis won't hurt the application, and will enable it to have a fault tolerant database, hence the proposal. ", "I see. I will go ahead and create a PR for this.", "@Linchin we already opened a [PR](https://github.com/kubeflow/pipelines/pull/8023) with the requested changes (adding primary keys to `default_experiments` and `db_statuses` table). please feel free to close it or add to it if we missed anything!", "Thanks! I created another PR so I will close this one.", "@Linchin given the two PRs are identical, I'd suggest we give credit to @shayancanonical and merge the PR that was created first. It's also our responsibility to actively monitor and review PRs from community contributors.\r\n\r\n@shayancanonical a small tip to do avoid duplicate effort in the future: while you have \"Fixes issue#\" in the PR title which can close the issue upon PR merged, only when you have the same text in the PR description or comment section, the open PR will then be linked to the issue, so that those who look at the issue can easily see there's a proposed fix already." ]
"2022-07-12T17:28:22"
"2022-07-26T09:03:18"
"2022-07-26T09:03:18"
CONTRIBUTOR
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? That kfp create [its database](https://github.com/kubeflow/pipelines/tree/5a56a9399908033ce97573abf85098fd77f649ae/backend/src/apiserver/model) with a primary key (probably [here](https://github.com/kubeflow/pipelines/blob/5a56a9399908033ce97573abf85098fd77f649ae/backend/src/apiserver/model/default_experiment.go#L18) and [here](https://github.com/kubeflow/pipelines/tree/5a56a9399908033ce97573abf85098fd77f649ae/backend/src/apiserver/model), with something like: ``` DefaultExperimentId string `gorm:"column:DefaultExperimentId; not null; primary key"` ``` ### What is the use case or pain point? Primary keys are required for group replication (high availability) of tables in mysql. Enabling this would enable using more resilient data storage. ### Is there a workaround currently? I don't know if this is safe, but perhaps I can separately create the database following kfp's schema (but with a primary key) and then have kfp connect to that existing DB? I'm not sure what the consequences of this would be. **update:** This appears to work (application starts without errors, etc). Haven't stress tested it yet --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8014/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8014/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8012
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8012/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8012/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8012/events
https://github.com/kubeflow/pipelines/issues/8012
1,301,678,349
I_kwDOB-71UM5NlgkN
8,012
[bug] executor attempts to kill already terminated kfp-launcher init container
{ "login": "jazzsir", "id": 4714923, "node_id": "MDQ6VXNlcjQ3MTQ5MjM=", "avatar_url": "https://avatars.githubusercontent.com/u/4714923?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jazzsir", "html_url": "https://github.com/jazzsir", "followers_url": "https://api.github.com/users/jazzsir/followers", "following_url": "https://api.github.com/users/jazzsir/following{/other_user}", "gists_url": "https://api.github.com/users/jazzsir/gists{/gist_id}", "starred_url": "https://api.github.com/users/jazzsir/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jazzsir/subscriptions", "organizations_url": "https://api.github.com/users/jazzsir/orgs", "repos_url": "https://api.github.com/users/jazzsir/repos", "events_url": "https://api.github.com/users/jazzsir/events{/privacy}", "received_events_url": "https://api.github.com/users/jazzsir/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Hi @jazzsir , can you provide what kubernetes version you used? We are moving toward the emissary executor. Here is the doc for executor: https://www.kubeflow.org/docs/components/pipelines/installation/choose-executor/" ]
"2022-07-12T07:49:14"
"2022-07-21T22:44:57"
null
NONE
null
### Environment <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? [standalone-deployment](https://www.kubeflow.org/docs/components/pipelines/installation/standalone-deployment/#deploying-kubeflow-pipelines) <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.8.2 <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: 1.8.2 <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Steps to reproduce The KFPv2 pipelines are completed, but errors occur as shown below, it seems to be a problem caused by attempting to terminate the already terminated the kfp-launcher init container. ``` $ kubectl -n kubeflow-pipeline logs -f pipeline-test-1-gtvhr-2680780413 -c wait time="2022-07-12T06:48:44.957Z" level=info msg="Starting Workflow Executor" executorType=docker version=v3.2.3 time="2022-07-12T06:48:44.961Z" level=info msg="Creating a docker executor" time="2022-07-12T06:48:44.961Z" level=info msg="Executor initialized" deadline="0001-01-01 00:00:00 +0000 UTC" includeScriptOutput=false namespace=kubeflow-pipeline podName=pipeline-test-1-gtvhr-2680780413 template="{\"name\":\"get-data\",\"inputs\":{\"parameters\":[{\"name\":\"pipeline-name\",\"value\":\"pipeline/pipeline-test-1\"},{\"name\":\"pipeline-root\",\"value\":\"\"}]},\"outputs\":{\"artifacts\":[{\"name\":\"get-data-dataset_test\",\"path\":\"/tmp/outputs/dataset_test/data\"},{\"name\":\"get-data-dataset_train\",\"path\":\"/tmp/outputs/dataset_train/data\"}]},\"metadata\":{\"annotations\":{\"pipelines.kubeflow.org/component_ref\":\"{}\",\"pipelines.kubeflow.org/v2_component\":\"true\",\"sidecar.istio.io/inject\":\"false\"},\"labels\":{\"pipelines.kubeflow.org/cache_enabled\":\"true\",\"pipelines.kubeflow.org/enable_caching\":\"false\",\"pipelines.kubeflow.org/kfp_sdk_version\":\"1.8.2\",\"pipelines.kubeflow.org/pipeline-sdk-type\":\"kfp\",\"pipelines.kubeflow.org/v2_component\":\"true\"}},\"container\":{\"name\":\"\",\"image\":\"python:3.7\",\"command\":[\"/kfp-launcher/launch\",\"--mlmd_server_address\",\"$(METADATA_GRPC_SERVICE_HOST)\",\"--mlmd_server_port\",\"$(METADATA_GRPC_SERVICE_PORT)\",\"--runtime_info_json\",\"$(KFP_V2_RUNTIME_INFO)\",\"--container_image\",\"$(KFP_V2_IMAGE)\",\"--task_name\",\"get-data\",\"--pipeline_name\",\"pipeline/pipeline-test-1\",\"--run_id\",\"$(KFP_RUN_ID)\",\"--run_resource\",\"workflows.argoproj.io/$(WORKFLOW_ID)\",\"--namespace\",\"$(KFP_NAMESPACE)\",\"--pod_name\",\"$(KFP_POD_NAME)\",\"--pod_uid\",\"$(KFP_POD_UID)\",\"--pipeline_root\",\"\",\"--enable_caching\",\"$(ENABLE_CACHING)\",\"--\",\"--\"],\"args\":[\"sh\",\"-c\",\"(python3 -m ensurepip || python3 -m ensurepip --user) \\u0026\\u0026 (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location 'pandas' 'sklearn' 'kfp==1.8.2' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location 'pandas' 'sklearn' 'kfp==1.8.2' --user) \\u0026\\u0026 \\\"$0\\\" \\\"$@\\\"\",\"sh\",\"-ec\",\"program_path=$(mktemp -d)\\nprintf \\\"%s\\\" \\\"$0\\\" \\u003e \\\"$program_path/ephemeral_component.py\\\"\\npython3 -m kfp.v2.components.executor_main --component_module_path \\\"$program_path/ephemeral_component.py\\\" \\\"$@\\\"\\n\",\"\\nfrom kfp.v2.dsl import *\\nfrom typing import *\\n\\ndef get_data(\\n dataset_train: Output[Dataset],\\n dataset_test: Output[Dataset]\\n\\n):\\n\\n from sklearn import datasets\\n from sklearn.model_selection import train_test_split as tts\\n import pandas as pd\\n # import some data to play with\\n\\n data_raw = datasets.load_breast_cancer()\\n data = pd.DataFrame(data_raw.data, columns=data_raw.feature_names)\\n data[\\\"target\\\"] = data_raw.target\\n\\n train, test = tts(data, test_size=0.3)\\n\\n train.to_csv(dataset_train.path)\\n test.to_csv(dataset_test.path)\\n\\n\",\"--executor_input\",\"{{$}}\",\"--function_to_execute\",\"get_data\"],\"envFrom\":[{\"configMapRef\":{\"name\":\"metadata-grpc-configmap\",\"optional\":true}}],\"env\":[{\"name\":\"KFP_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}},{\"name\":\"KFP_POD_UID\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.uid\"}}},{\"name\":\"KFP_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}},{\"name\":\"WORKFLOW_ID\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.labels['workflows.argoproj.io/workflow']\"}}},{\"name\":\"KFP_RUN_ID\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.labels['pipeline/runid']\"}}},{\"name\":\"ENABLE_CACHING\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.labels['pipelines.kubeflow.org/enable_caching']\"}}},{\"name\":\"KFP_V2_IMAGE\",\"value\":\"python:3.7\"},{\"name\":\"KFP_V2_RUNTIME_INFO\",\"value\":\"{\\\"inputParameters\\\": {}, \\\"inputArtifacts\\\": {}, \\\"outputParameters\\\": {}, \\\"outputArtifacts\\\": {\\\"dataset_test\\\": {\\\"schemaTitle\\\": \\\"system.Dataset\\\", \\\"instanceSchema\\\": \\\"\\\", \\\"schemaVersion\\\": \\\"0.0.1\\\", \\\"metadataPath\\\": \\\"/tmp/outputs/dataset_test/data\\\"}, \\\"dataset_train\\\": {\\\"schemaTitle\\\": \\\"system.Dataset\\\", \\\"instanceSchema\\\": \\\"\\\", \\\"schemaVersion\\\": \\\"0.0.1\\\", \\\"metadataPath\\\": \\\"/tmp/outputs/dataset_train/data\\\"}}}\"}],\"resources\":{},\"volumeMounts\":[{\"name\":\"kfp-launcher\",\"mountPath\":\"/kfp-launcher\"}]},\"volumes\":[{\"name\":\"kfp-launcher\"}],\"initContainers\":[{\"name\":\"kfp-launcher\",\"image\":\"gcr.io/ml-pipeline/kfp-launcher:1.8.2\",\"command\":[\"launcher\",\"--copy\",\"/kfp-launcher/launch\"],\"resources\":{},\"mirrorVolumeMounts\":true}],\"archiveLocation\":{\"archiveLogs\":true,\"s3\":{\"endpoint\":\"minio-service.kubeflow-pipeline:9000\",\"bucket\":\"mlpipeline\",\"insecure\":true,\"accessKeySecret\":{\"name\":\"clops-minio-artifact\",\"key\":\"accessKey\"},\"secretKeySecret\":{\"name\":\"clops-minio-artifact\",\"key\":\"secretKey\"},\"key\":\"pipeline-test-1-gtvhr/pipeline-test-1-gtvhr-2680780413\"}}}" version="&Version{Version:v3.2.3,BuildDate:2021-10-27T01:09:16Z,GitCommit:e5dc961b7846efe0fe36ab3a0964180eaedd2672,GitTag:v3.2.3,GitTreeState:clean,GoVersion:go1.16.9,Compiler:gc,Platform:linux/amd64,}" time="2022-07-12T06:48:44.961Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:44.961Z" level=info msg="Starting deadline monitor" time="2022-07-12T06:48:45.006Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Created {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:45.006Z" level=info msg="mapped container name \"wait\" to container ID \"b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c\" (created at 2022-07-12 06:48:44 +0000 UTC, status Up)" time="2022-07-12T06:48:45.006Z" level=info msg="mapped container name \"kfp-launcher\" to container ID \"3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b\" (created at 2022-07-12 06:48:43 +0000 UTC, status Exited)" time="2022-07-12T06:48:45.006Z" level=info msg="mapped container name \"main\" to container ID \"e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c\" (created at 2022-07-12 06:48:44 +0000 UTC, status Created)" time="2022-07-12T06:48:45.962Z" level=info msg="docker wait e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c" time="2022-07-12T06:48:46.007Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:46.036Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:47.036Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:47.068Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:48.069Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:48.099Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:49.100Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:49.130Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:50.131Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:50.172Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:51.173Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:51.204Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:52.204Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:52.231Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:53.231Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:53.265Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:54.266Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:54.294Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:55.295Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:55.328Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:56.328Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:56.357Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:57.357Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:57.386Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:58.387Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:58.421Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:48:59.422Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:48:59.449Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:00.449Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:00.477Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:01.477Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:01.530Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:02.530Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:02.565Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:03.565Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:03.607Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:04.607Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:04.638Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:05.640Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:05.693Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:06.693Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:06.749Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:07.749Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:07.783Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:08.784Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:08.848Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:09.849Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:09.883Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:10.884Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:10.934Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:11.935Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:11.963Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:12.964Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:12.997Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:13.997Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:14.027Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:15.028Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:15.058Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Up {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:22.093Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:22.195Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Exited {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:22.195Z" level=info msg="Main container completed" time="2022-07-12T06:49:22.195Z" level=info msg="No Script output reference in workflow. Capturing script output ignored" time="2022-07-12T06:49:22.195Z" level=info msg="Saving logs" time="2022-07-12T06:49:22.195Z" level=info msg="[docker logs e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c]" time="2022-07-12T06:49:22.247Z" level=info msg="S3 Save path: /tmp/argo/outputs/logs/main.log, key: pipeline-test-1-gtvhr/pipeline-test-1-gtvhr-2680780413/main.log" time="2022-07-12T06:49:22.247Z" level=info msg="Creating minio client using static credentials" endpoint="minio-service.kubeflow-pipeline:9000" time="2022-07-12T06:49:22.247Z" level=info msg="Saving file to s3" bucket=mlpipeline endpoint="minio-service.kubeflow-pipeline:9000" key=pipeline-test-1-gtvhr/pipeline-test-1-gtvhr-2680780413/main.log path=/tmp/argo/outputs/logs/main.log time="2022-07-12T06:49:24.735Z" level=info msg="not deleting local artifact" localArtPath=/tmp/argo/outputs/logs/main.log time="2022-07-12T06:49:24.735Z" level=info msg="Successfully saved file: /tmp/argo/outputs/logs/main.log" time="2022-07-12T06:49:24.735Z" level=info msg="No output parameters" time="2022-07-12T06:49:24.735Z" level=info msg="Saving output artifacts" time="2022-07-12T06:49:24.735Z" level=info msg="Staging artifact: get-data-dataset_test" time="2022-07-12T06:49:24.735Z" level=info msg="Copying /tmp/outputs/dataset_test/data from container base image layer to /tmp/argo/outputs/artifacts/get-data-dataset_test.tgz" time="2022-07-12T06:49:24.735Z" level=info msg="Archiving main:/tmp/outputs/dataset_test/data to /tmp/argo/outputs/artifacts/get-data-dataset_test.tgz" time="2022-07-12T06:49:24.735Z" level=info msg="sh -c docker cp -a e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c:/tmp/outputs/dataset_test/data - | gzip > /tmp/argo/outputs/artifacts/get-data-dataset_test.tgz" time="2022-07-12T06:49:24.915Z" level=info msg="Archiving completed" time="2022-07-12T06:49:24.915Z" level=info msg="S3 Save path: /tmp/argo/outputs/artifacts/get-data-dataset_test.tgz, key: pipeline-test-1-gtvhr/pipeline-test-1-gtvhr-2680780413/get-data-dataset_test.tgz" time="2022-07-12T06:49:24.915Z" level=info msg="Creating minio client using static credentials" endpoint="minio-service.kubeflow-pipeline:9000" time="2022-07-12T06:49:24.915Z" level=info msg="Saving file to s3" bucket=mlpipeline endpoint="minio-service.kubeflow-pipeline:9000" key=pipeline-test-1-gtvhr/pipeline-test-1-gtvhr-2680780413/get-data-dataset_test.tgz path=/tmp/argo/outputs/artifacts/get-data-dataset_test.tgz time="2022-07-12T06:49:25.058Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:25.089Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Exited {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:26.424Z" level=info msg="not deleting local artifact" localArtPath=/tmp/argo/outputs/artifacts/get-data-dataset_test.tgz time="2022-07-12T06:49:26.424Z" level=info msg="Successfully saved file: /tmp/argo/outputs/artifacts/get-data-dataset_test.tgz" time="2022-07-12T06:49:26.424Z" level=info msg="Staging artifact: get-data-dataset_train" time="2022-07-12T06:49:26.424Z" level=info msg="Copying /tmp/outputs/dataset_train/data from container base image layer to /tmp/argo/outputs/artifacts/get-data-dataset_train.tgz" time="2022-07-12T06:49:26.424Z" level=info msg="Archiving main:/tmp/outputs/dataset_train/data to /tmp/argo/outputs/artifacts/get-data-dataset_train.tgz" time="2022-07-12T06:49:26.424Z" level=info msg="sh -c docker cp -a e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c:/tmp/outputs/dataset_train/data - | gzip > /tmp/argo/outputs/artifacts/get-data-dataset_train.tgz" time="2022-07-12T06:49:26.559Z" level=info msg="Archiving completed" time="2022-07-12T06:49:26.571Z" level=info msg="S3 Save path: /tmp/argo/outputs/artifacts/get-data-dataset_train.tgz, key: pipeline-test-1-gtvhr/pipeline-test-1-gtvhr-2680780413/get-data-dataset_train.tgz" time="2022-07-12T06:49:26.571Z" level=info msg="Creating minio client using static credentials" endpoint="minio-service.kubeflow-pipeline:9000" time="2022-07-12T06:49:26.571Z" level=info msg="Saving file to s3" bucket=mlpipeline endpoint="minio-service.kubeflow-pipeline:9000" key=pipeline-test-1-gtvhr/pipeline-test-1-gtvhr-2680780413/get-data-dataset_train.tgz path=/tmp/argo/outputs/artifacts/get-data-dataset_train.tgz time="2022-07-12T06:49:28.158Z" level=info msg="not deleting local artifact" localArtPath=/tmp/argo/outputs/artifacts/get-data-dataset_train.tgz time="2022-07-12T06:49:28.158Z" level=info msg="Successfully saved file: /tmp/argo/outputs/artifacts/get-data-dataset_train.tgz" time="2022-07-12T06:49:28.158Z" level=info msg="Annotating pod with output" time="2022-07-12T06:49:28.211Z" level=info msg="Patch pods 200" time="2022-07-12T06:49:28.218Z" level=info msg="docker ps --all --no-trunc --format={{.Status}}|{{.Label \"io.kubernetes.container.name\"}}|{{.ID}}|{{.CreatedAt}} --filter=label=io.kubernetes.pod.namespace=kubeflow-pipeline --filter=label=io.kubernetes.pod.name=pipeline-test-1-gtvhr-2680780413" time="2022-07-12T06:49:28.250Z" level=info msg="listed containers" containers="map[kfp-launcher:{3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b Exited {0 63793205323 <nil>}} main:{e9f48e33f389b70943f7a04eaed064a646265b4c832216a47bf6c7e20a6aca9c Exited {0 63793205324 <nil>}} wait:{b17b4bd21c798e1cb684b62b80435c92705737169f69e51a09e56e9c980a373c Up {0 63793205324 <nil>}}]" time="2022-07-12T06:49:28.250Z" level=info msg="Killing sidecars [\"kfp-launcher\"]" time="2022-07-12T06:49:28.252Z" level=info msg="Get pods 403" time="2022-07-12T06:49:28.252Z" level=warning msg="Non-transient error: pods \"pipeline-test-1-gtvhr-2680780413\" is forbidden: User \"system:serviceaccount:kubeflow-pipeline:default\" cannot get resource \"pods\" in API group \"\" in the namespace \"kubeflow-pipeline\"" time="2022-07-12T06:49:28.252Z" level=info msg="docker kill --signal TERM 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b" time="2022-07-12T06:49:28.306Z" level=error msg="`docker kill --signal TERM 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b` failed: Error response from daemon: Cannot kill container: 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b: Container 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b is not running\n" time="2022-07-12T06:49:28.306Z" level=warning msg="Ignored error from 'docker kill --signal TERM': Error response from daemon: Cannot kill container: 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b: Container 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b is not running" time="2022-07-12T06:49:28.306Z" level=info msg="[docker wait 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b]" time="2022-07-12T06:49:28.307Z" level=info msg="Timed out (0s) for containers to terminate gracefully. Killing forcefully" time="2022-07-12T06:49:28.307Z" level=info msg="[docker kill --signal KILL 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b]" time="2022-07-12T06:49:28.405Z" level=warning msg="Ignored error from 'docker kill --signal KILL': exit status 1" time="2022-07-12T06:49:28.405Z" level=error msg="executor error: Error response from daemon: Cannot kill container: 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b: Container 3910ce0b5e1470c0f11ccc5d1e9895feec6e9bec565b7eb0cade8e3f053b843b is not running\ngithub.com/argoproj/argo-workflows/v3/errors.Wrap\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:88\ngithub.com/argoproj/argo-workflows/v3/errors.InternalWrapError\n\t/go/src/github.com/argoproj/argo-workflows/errors/errors.go:71\ngithub.com/argoproj/argo-workflows/v3/workflow/executor/docker.(*DockerExecutor).Kill\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/docker/docker.go:410\ngithub.com/argoproj/argo-workflows/v3/workflow/executor.(*WorkflowExecutor).KillSidecars\n\t/go/src/github.com/argoproj/argo-workflows/workflow/executor/executor.go:989\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.waitContainer.func1\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:35\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.waitContainer\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:72\ngithub.com/argoproj/argo-workflows/v3/cmd/argoexec/commands.NewWaitCommand.func1\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/commands/wait.go:18\ngithub.com/spf13/cobra.(*Command).execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.2.1/command.go:860\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/go/pkg/mod/github.com/spf13/cobra@v1.2.1/command.go:974\ngithub.com/spf13/cobra.(*Command).Execute\n\t/go/pkg/mod/github.com/spf13/cobra@v1.2.1/command.go:902\nmain.main\n\t/go/src/github.com/argoproj/argo-workflows/cmd/argoexec/main.go:15\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:225\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1371" time="2022-07-12T06:49:28.405Z" level=info msg="Alloc=6364 TotalAlloc=22190 Sys=73297 NumGC=6 Goroutines=15" ``` <!-- Specify how to reproduce the problem. This may include information such as: a description of the process, code snippets, log output, or screenshots. --> ### Expected result <!-- What should the correct behavior be? --> ### Materials and reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> - my workflow-controller-configmap ``` apiVersion: v1 data: artifactRepository: | archiveLogs: true s3: accessKeySecret: key: accesskey name: mlpipeline-minio-artifact bucket: mlpipeline endpoint: minio-service.kubeflow-pipeline:9000 insecure: true secretKeySecret: key: secretkey name: mlpipeline-minio-artifact containerRuntimeExecutor: docker executor: | imagePullPolicy: IfNotPresent resources: limits: cpu: "0.5" memory: 512Mi requests: cpu: "0.01" memory: 32Mi parallelism: "50" kind: ConfigMap metadata: name: workflow-controller-configmap namespace: kubeflow-pipeline ``` - KFPv2 example I tried: https://github.com/kubeflow/pipelines/blob/master/samples/v2/lightweight_python_functions_v2_pipeline/lightweight_python_functions_v2_pipeline.py ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> /area backend /area sdk <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8012/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8012/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/8001
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/8001/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/8001/comments
https://api.github.com/repos/kubeflow/pipelines/issues/8001/events
https://github.com/kubeflow/pipelines/issues/8001
1,300,882,413
I_kwDOB-71UM5NiePt
8,001
Does Kubeflow (1.2) has dependency with any specific kfp python library version?
{ "login": "JebasinghLuccas", "id": 37059495, "node_id": "MDQ6VXNlcjM3MDU5NDk1", "avatar_url": "https://avatars.githubusercontent.com/u/37059495?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JebasinghLuccas", "html_url": "https://github.com/JebasinghLuccas", "followers_url": "https://api.github.com/users/JebasinghLuccas/followers", "following_url": "https://api.github.com/users/JebasinghLuccas/following{/other_user}", "gists_url": "https://api.github.com/users/JebasinghLuccas/gists{/gist_id}", "starred_url": "https://api.github.com/users/JebasinghLuccas/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JebasinghLuccas/subscriptions", "organizations_url": "https://api.github.com/users/JebasinghLuccas/orgs", "repos_url": "https://api.github.com/users/JebasinghLuccas/repos", "events_url": "https://api.github.com/users/JebasinghLuccas/events{/privacy}", "received_events_url": "https://api.github.com/users/JebasinghLuccas/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @connor-mccarthy ", "@JebasinghLuccas, I do not believe there is any strict dependency on a KFP SDK version, so long as you're using a kfp SDK 1.x version, as we've adhered closely to the semantic versioning standards. The most recent is `1.8.13`.\r\n\r\nContext: Kubeflow 1.2 uses Kubeflow Pipelines 1.0.4" ]
"2022-07-11T15:32:55"
"2022-07-22T15:54:48"
"2022-07-22T15:54:48"
NONE
null
i am wondering if there any dependencies in kfp python library version when i want to use it in kubeflow
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/8001/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/8001/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7995
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7995/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7995/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7995/events
https://github.com/kubeflow/pipelines/issues/7995
1,298,512,597
I_kwDOB-71UM5NZbrV
7,995
[frontend] AWS S3 artifacts handler hardcode for s3 endpoint.
{ "login": "zorrofox", "id": 1528417, "node_id": "MDQ6VXNlcjE1Mjg0MTc=", "avatar_url": "https://avatars.githubusercontent.com/u/1528417?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zorrofox", "html_url": "https://github.com/zorrofox", "followers_url": "https://api.github.com/users/zorrofox/followers", "following_url": "https://api.github.com/users/zorrofox/following{/other_user}", "gists_url": "https://api.github.com/users/zorrofox/gists{/gist_id}", "starred_url": "https://api.github.com/users/zorrofox/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zorrofox/subscriptions", "organizations_url": "https://api.github.com/users/zorrofox/orgs", "repos_url": "https://api.github.com/users/zorrofox/repos", "events_url": "https://api.github.com/users/zorrofox/events{/privacy}", "received_events_url": "https://api.github.com/users/zorrofox/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
closed
false
{ "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false }
[ { "login": "surajkota", "id": 22246703, "node_id": "MDQ6VXNlcjIyMjQ2NzAz", "avatar_url": "https://avatars.githubusercontent.com/u/22246703?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surajkota", "html_url": "https://github.com/surajkota", "followers_url": "https://api.github.com/users/surajkota/followers", "following_url": "https://api.github.com/users/surajkota/following{/other_user}", "gists_url": "https://api.github.com/users/surajkota/gists{/gist_id}", "starred_url": "https://api.github.com/users/surajkota/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surajkota/subscriptions", "organizations_url": "https://api.github.com/users/surajkota/orgs", "repos_url": "https://api.github.com/users/surajkota/repos", "events_url": "https://api.github.com/users/surajkota/events{/privacy}", "received_events_url": "https://api.github.com/users/surajkota/received_events", "type": "User", "site_admin": false } ]
null
[ "I've created a PR for this change here: https://github.com/kubeflow/pipelines/pull/8033\r\n" ]
"2022-07-08T05:34:00"
"2022-08-02T22:54:44"
"2022-08-02T22:54:44"
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Install with Kubeflow * KFP version: 1.8.2 ### Steps to reproduce Access the S3 artifaces locate in China regions(Beijing&Ningxia) the UI will run into errors: ``` S3Error: The provided token is malformed or otherwise invalid. at Object.parseError (/server/node_modules/minio/dist/main/xml-parsers.js:86:11) at /server/node_modules/minio/dist/main/transformers.js:156:22 at DestroyableTransform._flush (/server/node_modules/minio/dist/main/transformers.js:80:10) at DestroyableTransform.prefinish (/server/node_modules/readable-stream/lib/_stream_transform.js:138:10) at DestroyableTransform.emit (events.js:400:28) at prefinish (/server/node_modules/readable-stream/lib/_stream_writable.js:619:14) at finishMaybe (/server/node_modules/readable-stream/lib/_stream_writable.js:627:5) at endWritable (/server/node_modules/readable-stream/lib/_stream_writable.js:638:3) at DestroyableTransform.Writable.end (/server/node_modules/readable-stream/lib/_stream_writable.js:594:41) at IncomingMessage.onend (internal/streams/readable.js:670:10) { code: 'InvalidToken', 'token-0': 'FwoDYXdzEOz//////////wEaDOx63B/ao8DmF9iVFyL5A4EzyM2ug93zLfjE41tSTrkDXmZ6mAliMAhw3WSfahZNBUapHZtSc5VYuXYrIXqoY+z+Cb/ZpRgbZ84DaVjzZpSC6ih6gtNxv+NQ5hwHVarg1TJdvMLWTy7TjKhLvRaG5TNjJFm5K6ygeJ5vc8xo7lxOlIgek3SD/ygcmGgGi9RhGDflrrkSDn7Fg1dUr17j4cOVJBu8fbzKNl6P8dwvkuRt3UvaN3IKE55BVEjOe5JLz5Y3eoLXWlIzZ12R28Tm86H8f/JL2TRgUJjvYFGcpyWT0fuz3dEif72AKkgn4IIv6GW5hJz1uXGUUn9QSjAf/RMu2vU4ngSmC+1+iB8Q4FhxKpzvm6+1QFiQ09DQLfW/YyC2VurTftFVIq1GctyI4la0stLaqqsuDxwW/DqtHIuCtVZSB5wLvbPXSXoM4TvtD7Lg31iyamGb85IfHxJ4mmCSH07JYtuZEj7xRMqy3OGDB7F9lexARBgsYOkHJgG2CdJu/5rOHHECYZkQsnQ3cLkNZSHBeydye4rHiYCiqdfAhnzKDXLSQYZtrfwtga/E0CrSJq/o075HzTc4KmSAGaPm9i36/TSGAhK8t9YiejwQm9dMbmrbKZtkk7+97Dls/PDJwGWN9n+TO5nEp1IBuAC7tNmtA+KNuP7aPpwP6zefumxgiL0Dh20oq8WelgYyOVKbcO07neJLatkRxqSkU08I0uIbythCzXHzpkT9LTDq4ycbgxltFElVM04GqRhb0n1gCTvLkFjfdg==', requestid: '5QP07VQ89YKHCCJP', hostid: 'ESrdZWeNF0v1J0iHKGzswkjkXgoIOpt7P1oViJf89PMeKZStjrhNtlK2RjBpjMaovANV+dIBj6o=', amzRequestid: null, amzId2: null, amzBucketRegion: null } ``` ### Expected result Use the pipeline UI to explore the S3 artifacts in China regions. ### Materials and Reference I think [this line](https://github.com/kubeflow/pipelines/blob/086f0c960dcae0a7fd3d75b05e3e4f93445b2341/frontend/server/configs.ts#L127) should not be hardcoded. <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7995/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7995/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7990
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7990/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7990/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7990/events
https://github.com/kubeflow/pipelines/issues/7990
1,296,556,062
I_kwDOB-71UM5NR-Ae
7,990
[backend] discussion - make jobs and runs top level resources?
{ "login": "Linchin", "id": 12806577, "node_id": "MDQ6VXNlcjEyODA2NTc3", "avatar_url": "https://avatars.githubusercontent.com/u/12806577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Linchin", "html_url": "https://github.com/Linchin", "followers_url": "https://api.github.com/users/Linchin/followers", "following_url": "https://api.github.com/users/Linchin/following{/other_user}", "gists_url": "https://api.github.com/users/Linchin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Linchin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Linchin/subscriptions", "organizations_url": "https://api.github.com/users/Linchin/orgs", "repos_url": "https://api.github.com/users/Linchin/repos", "events_url": "https://api.github.com/users/Linchin/events{/privacy}", "received_events_url": "https://api.github.com/users/Linchin/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1682717377, "node_id": "MDU6TGFiZWwxNjgyNzE3Mzc3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/discussion", "name": "kind/discussion", "color": "ecfc15", "default": false, "description": "" } ]
open
false
null
[]
null
[ "/cc @zijianjoy @chensun @difince @IronPan @annajung \r\nWould be great to see your thoughts on this.", "The decision is already taken in v2beta design [doc](https://docs.google.com/document/d/19oL11PfTZyuevgoMR00JlwMdJuR6U8to0N3CoMmtqeg/edit#) \r\nThe choisen approach is the experiment to be passed as an argument: \r\n`/apis/v2beta1/experiments/{experiment_id}/runs?experiment_id={experiment_id}`\r\nLets close this issue. \r\ncc: @Linchin " ]
"2022-07-06T22:28:53"
"2022-12-08T12:16:12"
null
COLLABORATOR
null
Currently we have jobs and runs as sub-resources of experiments, i.e., a job or a run must belong to one and only one experiment. This is reflected in creating runs and jobs by specifying resource references. In v2 API, we are planning to [retire resource references](https://docs.google.com/document/d/19OfU-hIsY4xBKA6b_F3dYIbSgrLMrVshucUSsyqotQ4/edit#), and reflect the resources (mostly `namespace` and `experiment`) in URLs. We are considering two options to specify experiments: Option 1: `GET "/apis/v2beta1/experiments/{exp_id}/runs"` Option 2: `GET "/apis/v2beta1/runs?experiment={exp_id}"` [Option 1] implies hierarchy in the URL call, while [Option 2] has experiment id as a required parameter. [Option 2] gives us more leeway in future changes in case we want to alter the hierarchical structure between jobs/runs and experiments (For example, maybe runs/jobs do not have to belong to any experiment). I wanted to gauge interest in the community in having runs/jobs as top level resources, and see if there is any further complication that needs to be addressed. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7990/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7990/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7986
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7986/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7986/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7986/events
https://github.com/kubeflow/pipelines/issues/7986
1,295,339,675
I_kwDOB-71UM5NNVCb
7,986
[emissary] LibreOffice fails with emissary runtime.
{ "login": "amosaini", "id": 95207359, "node_id": "U_kgDOBay_vw", "avatar_url": "https://avatars.githubusercontent.com/u/95207359?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amosaini", "html_url": "https://github.com/amosaini", "followers_url": "https://api.github.com/users/amosaini/followers", "following_url": "https://api.github.com/users/amosaini/following{/other_user}", "gists_url": "https://api.github.com/users/amosaini/gists{/gist_id}", "starred_url": "https://api.github.com/users/amosaini/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amosaini/subscriptions", "organizations_url": "https://api.github.com/users/amosaini/orgs", "repos_url": "https://api.github.com/users/amosaini/repos", "events_url": "https://api.github.com/users/amosaini/events{/privacy}", "received_events_url": "https://api.github.com/users/amosaini/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "With emissary executor, container cannot run commands with root privilege. Is that possible that `libreoffice` relies on some root privilege to run? Seems like this is out of control of KFP, and you have created https://github.com/argoproj/argo-workflows/issues/9117. " ]
"2022-07-06T07:26:51"
"2022-07-28T06:41:25"
"2022-07-14T23:12:28"
NONE
null
## Summary What happened/what you expected to happen? I am need to use libreoffice headless to convert docx file to pdf. This is working execellent in Vanilla k8s and Databricks but when i do the same in Kubeflow which uses argo workflow at its backend it does not produce any output. What version are you running? argoproj.io/v1alpha1 Kubeflow 1.4 ## Diagnostics Paste the smallest workflow that reproduces the bug. We must be able to run the workflow. ``` apiVersion: v1 kind: PersistentVolumeClaim metadata: name: libreoffice-pv-claim spec: storageClassName: gp2 accessModes: - ReadWriteOnce resources: requests: storage: 2Gi --- apiVersion: v1 kind: Pod metadata: name: libreoffice spec: containers: - name: libreoffice-container image: domnulnopcea/libreoffice-headless:latest command: ["libreoffice", "--headless", "--convert-to","pdf" ,"/tests/288.pptx","--outdir", "/tests"] volumeMounts: - mountPath: "/tests" name: libreoffice-storage volumes: - name: libreoffice-storage persistentVolumeClaim: claimName: libreoffice-pv-claim tolerations: - key: project operator: Equal value: cd-msr effect: NoSchedule --- apiVersion: v1 kind: Pod metadata: name: libreoffice-bash spec: containers: - name: libreoffice-container image: ubuntu:18.04 command: ["/bin/sleep", "3650d"] volumeMounts: - mountPath: "/tests" name: libreoffice-storage volumes: - name: libreoffice-storage persistentVolumeClaim: claimName: libreoffice-pv-claim tolerations: - key: project operator: Equal value: cd-msr effect: NoSchedule ``` This is the yaml I am using. I am then manually copying the input files ``` kubectl cp ./288.pptx libreoffice-bash:/tests/ kubectl cp ./dummy.pptx libreoffice-bash:/tests/ ``` This is working but when I tries to do the same in Kubeflow it doesn't was. The script executes without producing any output file. ``` import kfp import kfp.components as components import kfp.dsl as dsl from kfp.components import InputPath, OutputPath @components.create_component_from_func def download_file(s3_folder_path,object_name): input_file_path=s3_folder_path+"/"+object_name import subprocess subprocess.run('pip install boto3'.split()) # Download file import boto3 s3=boto3.client('s3') s3.download_file('qa-cd-msr-20220524050318415700000001', input_file_path, '/tmp/input.pptx') print(input_file_path + " file is downloaded...Executing libreoffice conversion") subprocess.run("ls -ltr /tmp".split()) def convert_to_pdf(): import subprocess def exec_cmd(cmd)->(any,str): print("Executing "+cmd) result=subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=result.stdout.decode('utf-8') + '\n'+ result.stderr.decode('utf-8') print("stdout: "+stdout) return stdout exec_cmd("libreoffice --headless --convert-to pdf /files/input.pptx --outdir /files") exec_cmd("ls -ltr /files") convert_to_pdf_op = components.func_to_container_op(convert_to_pdf, base_image= "domnulnopcea/libreoffice-headless:latest") @dsl.pipeline( name="Libreoffice", description="Libreoffice", ) def sample_pipeline(s3_folder_path:str="/mpsr/decks", object_name:str="Adcetris_master_40.pptx"): vop = dsl.VolumeOp( name="create-pvc", resource_name="my-pvc", modes=dsl.VOLUME_MODE_RWO, size="1Gi" ) download = download_file(s3_folder_path,object_name).add_pvolumes({"/tmp": vop.volume}) convert = convert_to_pdf_op().add_pvolumes({"/files": download.pvolume}) convert.execution_options.caching_strategy.max_cache_staleness = "P0D" convert.after(download) client = kfp.Client() experiment = client.create_experiment( name="Libreoffice", description="Libreoffice", namespace="cd-msr" ) client.create_run_from_pipeline_func( sample_pipeline, arguments={"s3_folder_path":"/mpsr/decks","object_name":"dummy1.pptx"}, run_name="libreoffice", experiment_name="Libreoffice" ) ``` Output : <img width="565" alt="image" src="https://user-images.githubusercontent.com/95207359/177474695-f31b195e-2358-4823-abe3-0b4276a5bd63.png"> ignore the error here. I was also getting this in vanilla k8s but it gives the output there. It succeed with docker runtime. <img width="766" alt="image" src="https://user-images.githubusercontent.com/95207359/177497023-83c67169-de05-43ff-bcbb-531a14e89d9e.png"> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7986/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7986/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7984
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7984/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7984/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7984/events
https://github.com/kubeflow/pipelines/issues/7984
1,295,052,551
I_kwDOB-71UM5NMO8H
7,984
[bug] TFJob launcher pipeline task fails when `delete_finished_tfjob` flag is `True`
{ "login": "eundonglee", "id": 67428295, "node_id": "MDQ6VXNlcjY3NDI4Mjk1", "avatar_url": "https://avatars.githubusercontent.com/u/67428295?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eundonglee", "html_url": "https://github.com/eundonglee", "followers_url": "https://api.github.com/users/eundonglee/followers", "following_url": "https://api.github.com/users/eundonglee/following{/other_user}", "gists_url": "https://api.github.com/users/eundonglee/gists{/gist_id}", "starred_url": "https://api.github.com/users/eundonglee/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eundonglee/subscriptions", "organizations_url": "https://api.github.com/users/eundonglee/orgs", "repos_url": "https://api.github.com/users/eundonglee/repos", "events_url": "https://api.github.com/users/eundonglee/events{/privacy}", "received_events_url": "https://api.github.com/users/eundonglee/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "I found this PR in `kubeflow/training-operator` repo about similar problem.\r\nhttps://github.com/kubeflow/training-operator/pull/1281\r\n\r\nI think those changes above should be applied here and also to `nikenano/launchernew` container image.\r\nhttps://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/components/kubeflow/common/launch_crd.py#L117-L123", "The code might have been fixed, but the image in the component.yaml has not been updated so the changes are not propagated." ]
"2022-07-06T02:58:07"
"2022-10-06T20:40:44"
"2022-07-13T07:37:03"
CONTRIBUTOR
null
https://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/components/kubeflow/launcher/component.yaml#L19 When `delete_finished_tfjob` flag is `True`, TFJob launcher task fails with error below. ``` Traceback (most recent call last): File "/ml/launch_tfjob.py", line 136, in <module> main() File "/ml/launch_tfjob.py", line 133, in main tfjob.delete(args.name, args.namespace) File "/ml/launch_crd.py", line 115, in delete body) TypeError: delete_namespaced_custom_object() takes exactly 6 arguments (7 given) ``` I think it's some kind of kubernetes client SDK version issue in `nikenano/launchernew:latest` container image.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7984/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7984/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7983
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7983/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7983/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7983/events
https://github.com/kubeflow/pipelines/issues/7983
1,294,522,107
I_kwDOB-71UM5NKNb7
7,983
[sdk] Cut a new release that supports `typing_extensions 4.0`?
{ "login": "alanhdu", "id": 1914111, "node_id": "MDQ6VXNlcjE5MTQxMTE=", "avatar_url": "https://avatars.githubusercontent.com/u/1914111?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alanhdu", "html_url": "https://github.com/alanhdu", "followers_url": "https://api.github.com/users/alanhdu/followers", "following_url": "https://api.github.com/users/alanhdu/following{/other_user}", "gists_url": "https://api.github.com/users/alanhdu/gists{/gist_id}", "starred_url": "https://api.github.com/users/alanhdu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alanhdu/subscriptions", "organizations_url": "https://api.github.com/users/alanhdu/orgs", "repos_url": "https://api.github.com/users/alanhdu/repos", "events_url": "https://api.github.com/users/alanhdu/events{/privacy}", "received_events_url": "https://api.github.com/users/alanhdu/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "think this is a dup of https://github.com/kubeflow/pipelines/pull/7801#issuecomment-1151064795 ?\r\nWith that said, any chance of getting a rough ETA of when 1.8.13 will be cut? =)", "Can prob close this, kfp 1.8.13 was cut yesterday. \r\nThanks!" ]
"2022-07-05T16:34:35"
"2022-07-12T15:54:51"
"2022-07-12T15:54:51"
NONE
null
Currently, the latest `kfp=1.8.12` pins `typing_extensions to ` 'typing-extensions>=3.7.4,<4;python_version<"3.9"',`. This is a bit of a problem for us, since we have another dependency (PyTorch Lightning) that requires `typing-extensions >= 4`. Would it be possible to cut a new release that supports the latest typing extensions?
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7983/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/kubeflow/pipelines/issues/7983/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7982
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7982/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7982/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7982/events
https://github.com/kubeflow/pipelines/issues/7982
1,294,232,642
I_kwDOB-71UM5NJGxC
7,982
[backend] DeleteExperiment does not clean up all relevant children objects from the DB
{ "login": "difince", "id": 11557050, "node_id": "MDQ6VXNlcjExNTU3MDUw", "avatar_url": "https://avatars.githubusercontent.com/u/11557050?v=4", "gravatar_id": "", "url": "https://api.github.com/users/difince", "html_url": "https://github.com/difince", "followers_url": "https://api.github.com/users/difince/followers", "following_url": "https://api.github.com/users/difince/following{/other_user}", "gists_url": "https://api.github.com/users/difince/gists{/gist_id}", "starred_url": "https://api.github.com/users/difince/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/difince/subscriptions", "organizations_url": "https://api.github.com/users/difince/orgs", "repos_url": "https://api.github.com/users/difince/repos", "events_url": "https://api.github.com/users/difince/events{/privacy}", "received_events_url": "https://api.github.com/users/difince/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "I just saw this [comment](https://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/backend/api/experiment.proto#L84-L87) in the code. It looks that the current behavior is intended to behave this way - it is not a bug. But IMO, the server should self-validate that all runs/jobs are deleted prior to letting the experiment be deleted, and thus prevent the DB goes in an inconsistent state. \r\n\r\nThe proposed above \"fix/behavior\" still makes sense to me. ", "As you found out this is currently by design, I can see arguments from both side. We can probably keep this issue for seeking opinions." ]
"2022-07-05T12:27:54"
"2022-07-14T22:57:48"
null
MEMBER
null
### Steps to reproduce When [deleteExperiment](https://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/backend/src/apiserver/server/experiment_server.go#L173) is called, not all children objects ( like Runs/ resource_references) are deleted from MySQL. DeleteExperiment cannot be called from the front-end/GUI, but the user still could execute a cURL request. After an experiment is deleted the DB could get in an inconsistent state - having Runs that belong to an un-existing experiment - this leads to following up 404 exceptions in the frond-end. ### Proposed fix / behaviour - IMO, delete experiments should be enabled only if the experiments are in an **Archived** state. So, validation needs to be added prior experiment to be deleted - In order to clean up all Experiment's resource_references : [s.resourceReferenceStore.DeleteResourceReferences(tx, id, common.Run)](https://github.com/kubeflow/pipelines/blob/e3678e1aadcb3be7bbd4f27059ae64db5631222e/backend/src/apiserver/storage/experiment_store.go#L238) > should be changed to `s.resourceReferenceStore.DeleteResourceReferences(tx, id, common.Experiment) ` - All related Runs to be cleaned up. <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7982/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7982/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7972
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7972/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7972/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7972/events
https://github.com/kubeflow/pipelines/issues/7972
1,290,369,750
I_kwDOB-71UM5M6XrW
7,972
[backend] Caching does not work when using volumes for the data_passing_method
{ "login": "vereba", "id": 55053898, "node_id": "MDQ6VXNlcjU1MDUzODk4", "avatar_url": "https://avatars.githubusercontent.com/u/55053898?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vereba", "html_url": "https://github.com/vereba", "followers_url": "https://api.github.com/users/vereba/followers", "following_url": "https://api.github.com/users/vereba/following{/other_user}", "gists_url": "https://api.github.com/users/vereba/gists{/gist_id}", "starred_url": "https://api.github.com/users/vereba/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vereba/subscriptions", "organizations_url": "https://api.github.com/users/vereba/orgs", "repos_url": "https://api.github.com/users/vereba/repos", "events_url": "https://api.github.com/users/vereba/events{/privacy}", "received_events_url": "https://api.github.com/users/vereba/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[]
"2022-06-30T16:18:17"
"2022-07-08T08:25:48"
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Install the Kubeflow Manifests manually via manifests (https://github.com/kubeflow/manifests#installation), install with a single command * KFP version: 1.8.2 * KFP SDK version: * kfp 1.8.12 * kfp-server-api 1.8.2 ### Steps to reproduce If I change the method to use volumes and pass my Artifacts with OutputPath/InputPath, the cache mechanism does not work when I re-run the exact same pipeline. The cache keys of the pipeline steps are not identical. The cache works as expected when using the Artifact Storage. ### Expected result Caching should also work for data passing via volumes. ### Materials and Reference <b>Sample code</b> Note: "my-volume" should be replaced with existing PVC ``` python from kubernetes.client.models import V1Volume, V1PersistentVolumeClaimVolumeSource import kfp import kfp.dsl as dsl from kfp.dsl import data_passing_methods from kfp.components import OutputPath, InputPath, func_to_container_op # arbitrary python functions def load(data_path: OutputPath()): from sklearn import datasets import pandas as pd data = datasets.load_iris() df = pd.DataFrame(data=data.data, columns= ["Petal Length", "Petal Width", "Sepal Length", "Sepal Width"]) df.to_csv(data_path) def print_out(data_path: InputPath()): import pandas as pd data = pd.read_csv(data_path) print(data.head()) # convert to ops load_op = func_to_container_op(load, base_image='python:3.9.7', packages_to_install=["scikit-learn","pandas"]) print_out_op = func_to_container_op(print_out, base_image='python:3.9.7', packages_to_install=["pandas"]) # select existing volume for data passing volume_based_data_passing_method = data_passing_methods.KubernetesVolume( volume=V1Volume( name="my-volume", persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name="my-volume"), ), path_prefix='artifacts/', ) # pipeline def pipeline(): load_task = load_op() printing_task = print_out_op(data=load_task.outputs ["data"]) # select data passing method kfp.dsl.get_pipeline_conf().data_passing_method = volume_based_data_passing_method client = kfp.Client() # run pipeline client.create_run_from_pipeline_func( pipeline, experiment_name="data_passing", run_name="via_volume", arguments={}, enable_caching=True ) ``` **Further information:** The caching works as expected if the data passing method is not changed. Only when you change it to using volumes it doesn't work. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7972/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7972/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7971
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7971/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7971/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7971/events
https://github.com/kubeflow/pipelines/issues/7971
1,290,199,934
I_kwDOB-71UM5M5uN-
7,971
Metrics using SDK
{ "login": "bertinma", "id": 78208091, "node_id": "MDQ6VXNlcjc4MjA4MDkx", "avatar_url": "https://avatars.githubusercontent.com/u/78208091?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bertinma", "html_url": "https://github.com/bertinma", "followers_url": "https://api.github.com/users/bertinma/followers", "following_url": "https://api.github.com/users/bertinma/following{/other_user}", "gists_url": "https://api.github.com/users/bertinma/gists{/gist_id}", "starred_url": "https://api.github.com/users/bertinma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bertinma/subscriptions", "organizations_url": "https://api.github.com/users/bertinma/orgs", "repos_url": "https://api.github.com/users/bertinma/repos", "events_url": "https://api.github.com/users/bertinma/events{/privacy}", "received_events_url": "https://api.github.com/users/bertinma/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "@bertinma how did you resolve the issue?", "I create a dictionary with my metrics and save to a minIO output path like this \r\n\r\n`metrics = {\r\n 'metrics': [\r\n {\r\n 'name': 'loss',\r\n 'numberValue': self.metrics_dict['loss'],\r\n 'format': 'RAW',\r\n },\r\n {\r\n 'name': 'iou_score',\r\n 'numberValue': self.metrics_dict['iou_score'],\r\n 'format': 'RAW',\r\n },\r\n {\r\n 'name': 'f1-score',\r\n 'numberValue': self.metrics_dict['f1-score'],\r\n 'format': 'RAW'\r\n },\r\n ]\r\n }`\r\n\r\n with open(Path(args.metrics) / \"mlpipeline-metrics.json\", 'w') as f:\r\n json.dump(metrics, f)`\r\n\r\n\r\nThen I create a step to display them \r\n\r\n\r\n`from kfp.components import InputPath, OutputPath\r\ndef produce_metrics(metrics: InputPath('LocalPath'), roc_curve_values: InputPath('LocalPath'), mlpipeline_metrics_path: OutputPath('Metrics')):\r\n import json\r\n from pathlib import Path\r\n import pickle\r\n import pandas as pd \r\n import os\r\n with open(Path(metrics) / 'mlpipeline-metrics.json', 'r') as f:\r\n metrics = json.load(f)\r\n with open(mlpipeline_metrics_path, 'w') as f:\r\n json.dump(metrics, f)`" ]
"2022-06-30T14:13:21"
"2022-09-02T13:34:42"
"2022-06-30T15:52:29"
NONE
null
Hi, I'm. using kip to build my kubeflow pipeline following this method/tutorial : https://www.kubeflow.org/docs/components/pipelines/sdk/component-development/ So I have a python code, component.yaml, Dockerfile and build_image.sh files for each pipeline step. I want to track metrics produced during evaluation step. My codes are like that : * eval.py metrics': [ { 'name': 'loss', 'numberValue': self.metrics_dict['loss'], 'format': 'RAW', }, { 'name': 'iou_score', 'numberValue': self.metrics_dict['iou_score'], 'format': 'PERCENTAGE', }, { 'name': 'f1-score', 'numberValue': self.metrics_dict['f1-score'], 'format': 'PERCENTAGE' }, ] } print(args.metrics) with open(Path(args.metrics) / "mlpipeline-metrics.json", 'w') as f: json.dump(metrics, f) * component.yaml name : eval_seg description: Evaluate segmentation model outputs: - {name: MLPipeline Metrics, type: Metrics, description: "metrics path"} implementation: container: image: gcr.io/.../eval_seg@sha256:... command: [ python3.8, eval.py, --metrics, {outputPath: MLPipeline Metrics} ] The step finished correctly but no metrics and artifacts were found for this step in "Visualizations" and for the whole pipeline in "Run output" tab. Where I am wrong ? Is there a method to get metrics using this kubeflow method or I have to change how to build my pipeline ? Thanks for your help ! ################################### kfp version : 1.8.12 (installed with pip) kubeflow : 1.7.1 Kubernetes : 1.21.11-gke.900 Cloud deployment : GCP AI Pipelines
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7971/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7971/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7963
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7963/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7963/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7963/events
https://github.com/kubeflow/pipelines/issues/7963
1,288,081,345
I_kwDOB-71UM5Mxo_B
7,963
[backend] Terminate Behavior Regression between Workflow Controllers 2.x vs 3.x (HEAD)
{ "login": "aabbccddeeffgghhii1438", "id": 35978194, "node_id": "MDQ6VXNlcjM1OTc4MTk0", "avatar_url": "https://avatars.githubusercontent.com/u/35978194?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aabbccddeeffgghhii1438", "html_url": "https://github.com/aabbccddeeffgghhii1438", "followers_url": "https://api.github.com/users/aabbccddeeffgghhii1438/followers", "following_url": "https://api.github.com/users/aabbccddeeffgghhii1438/following{/other_user}", "gists_url": "https://api.github.com/users/aabbccddeeffgghhii1438/gists{/gist_id}", "starred_url": "https://api.github.com/users/aabbccddeeffgghhii1438/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aabbccddeeffgghhii1438/subscriptions", "organizations_url": "https://api.github.com/users/aabbccddeeffgghhii1438/orgs", "repos_url": "https://api.github.com/users/aabbccddeeffgghhii1438/repos", "events_url": "https://api.github.com/users/aabbccddeeffgghhii1438/events{/privacy}", "received_events_url": "https://api.github.com/users/aabbccddeeffgghhii1438/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Do note that the expected behavior was true for Argo < 3, as the same function would fall through. I.e https://github.com/argoproj/argo-workflows/blob/v2.12.13/workflow/controller/exec_control.go#L89.", "This will be resolved after we upgrade argo to version 3.3.", "Is there a timeline for this? For this upgrade, is it for 2.0, or 1.8.x?", "Additionally some of the pre-existing terminate commands do not work due to a change in the way they killed containers and the signals that were sent. Looks like they fixed this in https://github.com/argoproj/argo-workflows/issues/8687 what is the communities path-forward to regain this functionality?", "@Linchin When can we expect version upgrade to 3.3?" ]
"2022-06-29T02:03:52"
"2023-08-28T20:59:04"
null
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.8.2 * KFP SDK version: 1.8.2 ### Steps to reproduce Currently, Kubeflow's terminate function works by setting activeDeadlineSeconds to 0. However, argo's controller loop only checks this for Pending pods, so a long running pod must run to completion before terminating. This has since been fixed on Argo's workflow controller with [commit](https://github.com/argoproj/argo-workflows/pull/8065). However, Argo 3.3 is fundamentally incompatible with Kubeflow as updates to workflows are now done by WorkflowTaskResult instead of patching pods. ### Expected result Ideally, clicking terminate instantly terminates the workflow and any running pod. This behavior is honored by the above commit. However, due to other changes there is no feasible route to upgrade ATM. If Kubeflow wishes to stay on 3.2.3, perhaps it should be considered to cherrypick out the patch? ### Materials and Reference <!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. --> https://github.com/argoproj/argo-workflows/pull/8065 --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7963/reactions", "total_count": 4, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/kubeflow/pipelines/issues/7963/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7947
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7947/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7947/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7947/events
https://github.com/kubeflow/pipelines/issues/7947
1,286,171,330
I_kwDOB-71UM5MqWrC
7,947
[sdk] The component method .after() not working properly with multiple components from the same spec.yaml file
{ "login": "msvensson222", "id": 44285698, "node_id": "MDQ6VXNlcjQ0Mjg1Njk4", "avatar_url": "https://avatars.githubusercontent.com/u/44285698?v=4", "gravatar_id": "", "url": "https://api.github.com/users/msvensson222", "html_url": "https://github.com/msvensson222", "followers_url": "https://api.github.com/users/msvensson222/followers", "following_url": "https://api.github.com/users/msvensson222/following{/other_user}", "gists_url": "https://api.github.com/users/msvensson222/gists{/gist_id}", "starred_url": "https://api.github.com/users/msvensson222/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/msvensson222/subscriptions", "organizations_url": "https://api.github.com/users/msvensson222/orgs", "repos_url": "https://api.github.com/users/msvensson222/repos", "events_url": "https://api.github.com/users/msvensson222/events{/privacy}", "received_events_url": "https://api.github.com/users/msvensson222/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Could you write them as graph components and see if that works? I'm not sure using two \".after()\"s consecutively will work. You may also try wrapping the first two steps into a single component, and execute the third one after it. See example here:\r\nhttps://github.com/kubeflow/pipelines/blob/master/samples/contrib/kubeflow-e2e-mnist/kubeflow-e2e-mnist.ipynb" ]
"2022-06-27T18:11:08"
"2022-06-30T22:37:57"
null
NONE
null
### Environment * KFP SDK version: 2.0.0a5 ### Purpose I am trying to build and run a pipeline using a reusable component, leveraging an image hosted in Google Container Registry, a `component-spec.yaml` file as well as the `load_component_from_file()` function. The pipeline consists of three components, where the first two should be executed independently (and loaded from the same reusable file), and the third one should be executed after the two first have been completed. For context, this component takes as input argument a tablename (which exists in our data warehouse (DWH)), and dumps it in a Google Cloud Storage Bucket (GCS). Because of this, having it as a reusable component is very convenient. ### Steps to reproduce I define a pipeline as such: ```python from kfp import dsl, compiler from kfp.components import load_component_from_file @dsl.pipeline( name='my-pipeline', description='Pipeline with a reusable component', pipeline_root='gs://my-bucket/pipeline' ) def pipeline( tablename_1: str = 'table1', tablename_2: str = 'table2', some_file_path: str = 'gs://my-bucket/some_folder/' ): # Snowflake queries and dump data dwh_to_gcs_0 = load_component_from_file("dwh-to-gcs.yaml") dwh_to_gcs_0( tablename=tablename_1 ) dwh_to_gcs_1 = load_component_from_file("dwh-to-gcs.yaml") dwh_to_gcs_1( tablename=tablename_2 ) final_component = load_component_from_file("final-component.yaml") final_component( file_path=some_file_path ) \ .after(dwh_to_gcs_0) \ .after(dwh_to_gcs_1) if __name__ == '__main__': compiler.Compiler().compile( pipeline_func=pipeline, package_path="./pipeline.json" ) ``` With the following component specification yaml-files: `dwh-to-gcs.yaml` ("Reusable"): ```yaml name: DWHtoGCS description: Dumps a table in the DWH as a csv-file into GS. inputs: - {name: tablename, type: String} implementation: container: image: eu.gcr.io/my-project/my-image:latest command: [ python3, main.py, --tablename, {inputValue: tablename} ] ``` and `final-component.yaml`: ```yaml name: FinalComponent description: Makes some computations from some files. inputs: - {name: file_path, type: String} implementation: container: image: eu.gcr.io/my-project/my-image:latest command: [ python3, main.py, --file_path, {inputValue: file_path} ] ``` When compiling the pipeline, by running `python3 pipeline.py`, I get the following `pipeline.json` file: ```json { "components": { "comp-dwhtogcs": { "executorLabel": "exec-dwhtogcs", "inputDefinitions": { "parameters": { "tablename": { "parameterType": "STRING" } } } }, "comp-dwhtogcs-2": { "executorLabel": "exec-dwhtogcs-2", "inputDefinitions": { "parameters": { "tablename": { "parameterType": "STRING" } } } }, "comp-finalcomponent": { "executorLabel": "exec-finalcomponent", "inputDefinitions": { "parameters": { "file_path": { "parameterType": "STRING" } } } } }, "defaultPipelineRoot": "gs://my-bucket/pipeline", "deploymentSpec": { "executors": { "exec-dwhtogcs": { "container": { "command": [ "python3", "main.py", "--tablename", "{{$.inputs.parameters['tablename']}}" ], "image": "eu.gcr.io/my-project/my-image:latest" } }, "exec-dwhtogcs-2": { "container": { "command": [ "python3", "main.py", "--tablename", "{{$.inputs.parameters['tablename']}}" ], "image": "eu.gcr.io/my-project/my-image:latest" } }, "exec-finalcomponent": { "container": { "command": [ "python3", "main.py", "--file_path", "{{$.inputs.parameters['file_path']}}" ], "image": "eu.gcr.io/my-project/my-image:latest" } } } }, "pipelineInfo": { "name": "my-pipeline" }, "root": { "dag": { "tasks": { "dwhtogcs": { "cachingOptions": { "enableCache": true }, "componentRef": { "name": "comp-dwhtogcs" }, "inputs": { "parameters": { "tablename": { "componentInputParameter": "tablename_1" } } }, "taskInfo": { "name": "dwhtogcs" } }, "dwhtogcs-2": { "cachingOptions": { "enableCache": true }, "componentRef": { "name": "comp-dwhtogcs-2" }, "inputs": { "parameters": { "tablename": { "componentInputParameter": "tablename_2" } } }, "taskInfo": { "name": "dwhtogcs-2" } }, "finalcomponent": { "cachingOptions": { "enableCache": true }, "componentRef": { "name": "comp-finalcomponent" }, "dependentTasks": [ "dwhtogcs" ], "inputs": { "parameters": { "file_path": { "componentInputParameter": "some_file_path" } } }, "taskInfo": { "name": "finalcomponent" } } } }, "inputDefinitions": { "parameters": { "some_file_path": { "defaultValue": "gs://my-bucket/some_folder/", "parameterType": "STRING" }, "tablename_1": { "defaultValue": "table1", "parameterType": "STRING" }, "tablename_2": { "defaultValue": "table2", "parameterType": "STRING" } } } }, "schemaVersion": "2.1.0", "sdkVersion": "kfp-2.0.0-alpha.5" } ``` Within this pipeline definition, note that the final component only has **one** dependant task: ```yaml ... "finalcomponent": { "cachingOptions": { "enableCache": true }, "componentRef": { "name": "comp-finalcomponent" }, "dependentTasks": [ "dwhtogcs" ], "inputs": { "parameters": { "file_path": { "componentInputParameter": "some_file_path" } } }, "taskInfo": { "name": "finalcomponent" } } ... ``` ### Expected result To clarify further, I specify in my `pipeline.py` file that the final component should be after **BOTH** previous steps; ```python final_component( file_path=some_file_path ) \ .after(dwh_to_gcs_0) \ .after(dwh_to_gcs_1) ``` I expect that to be reflected in the pipeline.json definition, but there, only **ONE** is listed (the name (lower-case) of the reusable component); ```yaml "dependentTasks": [ "dwhtogcs" ], ``` Moreover, to verify that the pipeline is also behaving this way if executed, I submitted it to Vertex AI, where it also displays the same behaviour. **Is this a bug, or am I somehow using the SDK in a non-ideal way?** Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7947/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7947/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7945
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7945/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7945/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7945/events
https://github.com/kubeflow/pipelines/issues/7945
1,284,915,508
I_kwDOB-71UM5MlkE0
7,945
Difference between add_volume and add_pvolume
{ "login": "rjtshrm", "id": 6351392, "node_id": "MDQ6VXNlcjYzNTEzOTI=", "avatar_url": "https://avatars.githubusercontent.com/u/6351392?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rjtshrm", "html_url": "https://github.com/rjtshrm", "followers_url": "https://api.github.com/users/rjtshrm/followers", "following_url": "https://api.github.com/users/rjtshrm/following{/other_user}", "gists_url": "https://api.github.com/users/rjtshrm/gists{/gist_id}", "starred_url": "https://api.github.com/users/rjtshrm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rjtshrm/subscriptions", "organizations_url": "https://api.github.com/users/rjtshrm/orgs", "repos_url": "https://api.github.com/users/rjtshrm/repos", "events_url": "https://api.github.com/users/rjtshrm/events{/privacy}", "received_events_url": "https://api.github.com/users/rjtshrm/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Here is the documentation: \r\nhttps://kubeflow-pipelines.readthedocs.io/en/stable/source/kfp.dsl.html#kfp.dsl.BaseOp.add_volume\r\nhttps://kubeflow-pipelines.readthedocs.io/en/stable/source/kfp.dsl.html#kfp.dsl.BaseOp.add_pvolumes\r\n\r\nAre these what you are looking for?\r\n\r\nAs to volume vs. persistent volume, k8s has more explanation here:\r\nhttps://kubernetes.io/docs/concepts/storage/volumes/\r\nhttps://kubernetes.io/docs/concepts/storage/persistent-volumes/\r\n\r\n" ]
"2022-06-26T13:34:54"
"2022-06-30T22:59:31"
null
NONE
null
I couldn't find any good documentation b/w add_volume and add_pvolume and when to use which.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7945/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7945/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7939
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7939/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7939/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7939/events
https://github.com/kubeflow/pipelines/issues/7939
1,283,495,560
I_kwDOB-71UM5MgJaI
7,939
[backend] the cache server does not check whether the cached artifacts have been deleted
{ "login": "juliusvonkohout", "id": 45896133, "node_id": "MDQ6VXNlcjQ1ODk2MTMz", "avatar_url": "https://avatars.githubusercontent.com/u/45896133?v=4", "gravatar_id": "", "url": "https://api.github.com/users/juliusvonkohout", "html_url": "https://github.com/juliusvonkohout", "followers_url": "https://api.github.com/users/juliusvonkohout/followers", "following_url": "https://api.github.com/users/juliusvonkohout/following{/other_user}", "gists_url": "https://api.github.com/users/juliusvonkohout/gists{/gist_id}", "starred_url": "https://api.github.com/users/juliusvonkohout/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/juliusvonkohout/subscriptions", "organizations_url": "https://api.github.com/users/juliusvonkohout/orgs", "repos_url": "https://api.github.com/users/juliusvonkohout/repos", "events_url": "https://api.github.com/users/juliusvonkohout/events{/privacy}", "received_events_url": "https://api.github.com/users/juliusvonkohout/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "I'm not entirely sure about v1 behavior, but in v2, we don't make any assumption on whether an artifact has content in its path. For instance, `Metrics` artifact has nothing in its cloud storage path but only metadata in the MLMD database. So it's by design that we don't check the \"existence\" of artifacts with or without caching. \r\n\r\nYou could disable caching when submitting a pipeline run though, the SDK client has a parameter for that.", "> I'm not entirely sure about v1 behavior, but in v2, we don't make any assumption on whether an artifact has content in its path. For instance, `Metrics` artifact has nothing in its cloud storage path but only metadata in the MLMD database. So it's by design that we don't check the \"existence\" of artifacts with or without caching.\r\n> \r\n> You could disable caching when submitting a pipeline run though, the SDK client has a parameter for that.\r\n\r\nThis is a fix for v1 behavior only. I do not know about v2 behavior. The pipeline just breaks, if the S3 content is deleted so somehow that must be fixed. Do you have another proposal?", "No, I think it's by design. If users deletes artifacts, shouldn't that be considered user error?\r\nThe solution can be run again with cache option disabled.", "I believe that @juliusvonkohout raised a very important issue here. In many scenarios users are legally required to cleanup old records and keeping the cachedb in sync is definitely a pain point.\r\n\r\nIn the past, the kfp project has also not considered this a user error and it was even recommended as best practice for production grade deployments by @Bobgy (https://github.com/kubeflow/pipelines/issues/6204#issuecomment-903011330).", "> I believe that @juliusvonkohout raised a very important issue here. In many scenarios users are legally required to cleanup old records and keeping the cachedb in sync is definitely a pain point.\r\n> \r\n> In the past, the kfp project has also not considered this a user error and it was even recommended as best practice for production grade deployments by @Bobgy ([#6204 (comment)](https://github.com/kubeflow/pipelines/issues/6204#issuecomment-903011330)).\r\n\r\nYes, @chensun please reopen, it is clearly a bug.", "@chensun as a follow up to the meeting i do not see a global setting here https://www.kubeflow.org/docs/components/pipelines/overview/caching/", "Discussed this in today KFP community meeting, and I want to summarize it here:\r\n\r\n>> In the past, the kfp project has also not considered this a user error and it was even recommended as best practice for production grade deployments by @Bobgy (https://github.com/kubeflow/pipelines/issues/6204#issuecomment-903011330).\r\n>\r\n> Yes, @chensun please reopen, it is clearly a bug.\r\n\r\nFirst of all, what you quoted from @Bobgy (https://github.com/kubeflow/pipelines/issues/6204#issuecomment-903011330) doesn't conflict with what I said above. \r\n\r\nI didn't mean the action of deleting artifacts from cloud storage itself is a user error. My reply is within the specific context of this issue, deleting the artifacts while replying on caching to continue working is a user error. It's like in C, you have a pointer that points to some allocated memory, freeing the memory is absolutely normal, but keep accessing the pointer would be a user error. \r\n\r\nIf users would delete artifacts periodically, they should probably configure pipelines runs with a cache staleness setting (understood it's different from cache lifecycle but I think still solves this use case). Or they can disable caching for new runs after artifact deletion. From this point, this is a not a bug. \r\n\r\nI'm against the proposed fix because as I mentioned an artifact has a remote storage URI and metadata, both are optional. We can't assume there's content in the URI, and I'm not sure if we can assume the URI always exists even if users never write to it--it's rather an implementation detail that may vary for different platforms. We could check and verify this, but I don't think cache server should care at all an external system. \r\n\r\nI personally would prefer to improve the caching configuration story--if the existing caching setting is not that obvious or easy to use, maybe we can add a global config. Or as @connor-mccarthy mentioned, maybe we provide a way for users to delete cache entires in the database--it could be as simple as a script to begin with as long as it's well documented.\r\n\r\n", "I think the difference is that the user is not actively \"relying on caching\". For the user it just happens automatically when using the same component definition and arguments. Having this trigger automatically but then fail is confusing for users.\r\n\r\nIf I understand you correctly, you are suggesting that a user should manually disable caching when the corresponding artifact was deleted. But how would that work in practice? Does the user have to figure out the component definition and arguments that produced the artifact and note them down to disable caching whenever accidentally using the same definitions again in the future? It would be impossible to keep track of all items that the cache server still considers cached but are already deleted from storage (the cachedb is always growing and never cleaned after all).\r\n\r\nI agree that there are other approaches to solve this problem. For example you could have an option somewhere in the UI to delete an artifact which would clean the artifact from S3, mlmd and the cachedb (but then again that's not supported by mlmd as far as I know).\r\n\r\nBut recommending to delete artifacts from S3 is incompatible with the current caching behaviour in my eyes.", "> I personally would prefer to improve the caching configuration story--if the existing caching setting is not that obvious or easy to use, maybe we can add a global config. Or as @connor-mccarthy mentioned, maybe we provide a way for users to delete cache entires in the database--it could be as simple as a script to begin with as long as it's well documented.\r\n\r\nWe cannot expect the user to set it for every pipeline himself. That is too error prone as @MatthiasCarnein explains. We need a global expiration setting (maximum cache lifetime), for example an environment variable in the cache server that is respected by V1 and V2 pipelines. This setting could also enforce the deletion of old database entries according to the expiration setting. So we could reuse the parts of my PR that clean the database entries (cachedb and maybe mlmd for v2 ). Then we can use a lifecyclepolicy on the object storage with the same timeframe. The user can use a cache staleness up to the cache expiration duration then. Iit would be a user error then if he manually deletes Artifacts before the lifecycle policies does.\r\n\r\nDoes this sound reasonable to you?", "> I think the difference is that the user is not actively \"relying on caching\". For the user it just happens automatically when using the same component definition and arguments. Having this trigger automatically but then fail is confusing for users.\r\n\r\nThat's a fair argument. On the other hand though, I don't think it's too much to ask for from the users to understand that KFP has caching on by default. After all, I perceive KFP as an advanced developer tool. Users probably should understand the possible impacts of deleting artifacts.\r\n\r\n> If I understand you correctly, you are suggesting that a user should manually disable caching when the corresponding artifact was deleted. But how would that work in practice? Does the user have to figure out the component definition and arguments that produced the artifact and note them down to disable caching whenever accidentally using the same definitions again in the future?\r\n\r\nIn practice, how do users deleting artifacts? I guess they don't cherry-pick specific artifacts to delete but rather delete artifacts in bulk under some root path. Then they could disable caching for all their pipelines or a set of them. If they do delete artifacts in a precise cherry-picking manner, they probably tracks what pipelines and inputs produced those artifacts already.\r\n\r\n> I agree that there are other approaches to solve this problem. For example you could have an option somewhere in the UI to delete an artifact which would clean the artifact from S3, mlmd and the cachedb (but then again that's not supported by mlmd as far as I know).\r\n\r\n> But recommending to delete artifacts from S3 is incompatible with the current caching behaviour in my eyes.\r\n\r\nIndeed, if KFP provides the feature for user to delete artifacts, which is what Bobgy suggested in the other thread, then it should be responsible for cleaning up all the related items that should and can be deleted--cache entry definitely falls into this category. \r\nBut the current state is KFP doesn't offer such feature, and users are deleting artifacts on their own, which happens outside the KFP system. I don't this KFP should be required to respond to external actions.\r\n\r\nIn summary, I'm supportive of the following options:\r\n- Make artifact deleting a KFP feature so that KFP should do the full cleanup\r\n- Provide a script or API for user to delete cache entry, so they can easily do so after they delete artifacts on their own.\r\n- Make a global caching config, so users can set cache lifecycle/staleness that matches their artifact deletion policy or need.", "> In summary, I'm supportive of the following options:\r\n> \r\n> * Make artifact deleting a KFP feature so that KFP should do the full cleanup\r\n> \r\n> * Provide a script or API for user to delete cache entry, so they can easily do so after they delete artifacts on their own.\r\n> \r\n> * Make a global caching config, so users can set cache lifecycle/staleness that matches their artifact deletion policy or need.\r\n\r\nShall i implement the last item or do you want do it? It would just be an environment variable MAX_CACHE_STALENESS in the cache-server derived from the configmap pipeline-install-config that uses the same syntax as per pipeline cache settings (e.g. P30D). Then in the cache server we modify\r\nhttps://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/server/mutation.go#L120-L129 and\r\nhttps://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/server/watcher.go#L129-L136\r\nto read the environment variable instead of -1.\r\n\r\nFor example with \r\n``` Go\r\n if exists { \r\n maxCacheStalenessInSeconds = getMaxCacheStaleness(maxCacheStaleness) \r\n } \r\n else {\r\n maxCacheStalenessInSeconds = getMaxCacheStaleness(\"pod Environment variable maxCacheStaleness\") \r\n }\r\n```\r\n\r\nThen in https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L38-L47 and \r\nhttps://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L79-L93\r\n\r\nit will just be properly used. So we add three lines and an environment variable via the global configmap to satisfy most of the use cases.\r\n\r\nIf desired we could also add a CACHE_EXPIRATION variable and delete the database entry with https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L137-L143 if it is too old.\r\n\r\nFor example after https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L61-L89 we could add\r\n```\r\n\t\tif maxCacheStaleness == -1 || s.time.Now().UTC().Unix()-startedAtInSec <= podMaxCacheStaleness {\r\n\t\t\t...\r\n\t\t}\r\n if s.time.Now().UTC().Unix()-startedAtInSec > getMaxCacheStaleness(\"CACHE_EXPIRATION variable\") {\r\n err = executionCacheStore.DeleteExecutionCache(id)\r\n }\r\n```\r\n\r\nMaybe it would even be better if we would just periodically execute an SQL command that cleans all entries older than CACHE_EXPIRATION in the cachedb table.", "And is there a documentation for V2 caching?\r\nhttps://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/server/mutation.go#L97-L101", "Kubeflow definitely misses the ability to manage artifacts and I'm glad I am not the only one thinking so (also see #5783). I'd love to see invalidating the cache and deleting artifacts in the UI become a feature. \r\n\r\nHowever, [MLMD doesn't seem to support deleting artifacts anytime soon](https://github.com/google/ml-metadata/issues/69) and it wouldn't help us managing Kubeflow's cache, anyways. \r\n\r\nThus, I've written a short PoC suggesting a way this feature could be implemented. Its integration should be self-explanatory:\r\n\r\n![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n\r\nCode and concept are still in a very early stage. For more info on how the backend manages the cache, see [here](https://github.com/TobiasGoerke/kubeflow/blob/master/components/cache-manager/src/database.py). \r\n\r\nI'm looking for feedback and discussions on this proposal and will gladly contribute to implementing it.", "> > In summary, I'm supportive of the following options:\r\n> > ```\r\n> > * Make artifact deleting a KFP feature so that KFP should do the full cleanup\r\n> > \r\n> > * Provide a script or API for user to delete cache entry, so they can easily do so after they delete artifacts on their own.\r\n> > \r\n> > * Make a global caching config, so users can set cache lifecycle/staleness that matches their artifact deletion policy or need.\r\n> > ```\r\n> \r\n> Shall i implement the last item or do you want do it? It would just be an environment variable MAX_CACHE_STALENESS in the cache-server derived from the configmap pipeline-install-config that uses the same syntax as per pipeline cache settings (e.g. P30D). Then in the cache server we modify\r\n> \r\n> https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/server/mutation.go#L120-L129\r\n> \r\n> and\r\n> https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/server/watcher.go#L129-L136\r\n> \r\n> \r\n> to read the environment variable instead of -1.\r\n> For example with\r\n> \r\n> ```go\r\n> if exists { \r\n> maxCacheStalenessInSeconds = getMaxCacheStaleness(maxCacheStaleness) \r\n> } \r\n> else {\r\n> maxCacheStalenessInSeconds = getMaxCacheStaleness(\"pod Environment variable maxCacheStaleness\") \r\n> }\r\n> ```\r\n> \r\n> Then in\r\n> \r\n> https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L38-L47\r\n> \r\n> and\r\n> https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L79-L93\r\n> \r\n> it will just be properly used. So we add three lines and an environment variable via the global configmap to satisfy most of the use cases.\r\n> \r\n> If desired we could also add a CACHE_EXPIRATION variable and delete the database entry with\r\n> \r\n> https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L137-L143\r\n> \r\n> if it is too old.\r\n> For example after\r\n> \r\n> https://github.com/kubeflow/pipelines/blob/555447f5c6453609c59db1a890cca1ec38156847/backend/src/cache/storage/execution_cache_store.go#L61-L89\r\n> \r\n> we could add\r\n> ```\r\n> \t\tif maxCacheStaleness == -1 || s.time.Now().UTC().Unix()-startedAtInSec <= podMaxCacheStaleness {\r\n> \t\t\t...\r\n> \t\t}\r\n> if s.time.Now().UTC().Unix()-startedAtInSec > getMaxCacheStaleness(\"CACHE_EXPIRATION variable\") {\r\n> err = executionCacheStore.DeleteExecutionCache(id)\r\n> }\r\n> ```\r\n> \r\n> Maybe it would even be better if we would just periodically execute an SQL command that cleans all entries older than CACHE_EXPIRATION in the cachedb table.\r\n\r\n@juliusvonkohout Thank you for the write up. Given this is a nontrivial user interface change. I would suggest you write a design doc so that we can conduct a proper design review by our team and the community. ", "> Kubeflow definitely misses the ability to manage artifacts and I'm glad I am not the only one thinking so (also see #5783). I'd love to see invalidating the cache and deleting artifacts in the UI become a feature.\r\n> \r\n> However, [MLMD doesn't seem to support deleting artifacts anytime soon](https://github.com/google/ml-metadata/issues/69) and it wouldn't help us managing Kubeflow's cache, anyways.\r\n> \r\n> Thus, I've written a short PoC suggesting a way this feature could be implemented. Its integration should be self-explanatory:\r\n> \r\n> ![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif) [ ![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif) ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif) [ ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> \r\n> Code and concept are still in a very early stage. For more info on how the backend manages the cache, see [here](https://github.com/TobiasGoerke/kubeflow/blob/master/components/cache-manager/src/database.py).\r\n> \r\n> I'm looking for feedback and discussions on this proposal and will gladly contribute to implementing it.\r\n\r\nThanks @TobiasGoerke , and same suggestion as writing a design doc so that we can review it properly.", "> > Kubeflow definitely misses the ability to manage artifacts and I'm glad I am not the only one thinking so (also see #5783). I'd love to see invalidating the cache and deleting artifacts in the UI become a feature.\r\n> > However, [MLMD doesn't seem to support deleting artifacts anytime soon](https://github.com/google/ml-metadata/issues/69) and it wouldn't help us managing Kubeflow's cache, anyways.\r\n> > Thus, I've written a short PoC suggesting a way this feature could be implemented. Its integration should be self-explanatory:\r\n> > ![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > [\r\n> > \r\n> > ![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > [\r\n> > \r\n> > \r\n> > \r\n> > ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > [ ![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif) ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > [\r\n> > \r\n> > ![kubeflow_cache](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > \r\n> > [\r\n> > \r\n> > \r\n> > \r\n> > ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > \r\n> > [ ](https://user-images.githubusercontent.com/13769461/182807832-5fcd0b49-5b84-4172-9b0a-4904c190dace.gif)\r\n> > Code and concept are still in a very early stage. For more info on how the backend manages the cache, see [here](https://github.com/TobiasGoerke/kubeflow/blob/master/components/cache-manager/src/database.py).\r\n> > I'm looking for feedback and discussions on this proposal and will gladly contribute to implementing it.\r\n> \r\n> Thanks @TobiasGoerke , and same suggestion as writing a design doc so that we can review it properly.\r\n\r\nI've created a [draft](https://docs.google.com/document/d/1WdPjTgoDj5AdPNYVzqYwVIWKsWjPPphYD3Ck4DFxSLA/edit#). What's the workflow for proposals? Where to post this proposal? Thanks.", "@TobiasGoerke just write me or Diana atanasova on the kubeflow slack. We have some experience with those drafts.", "> > > In summary, I'm supportive of the following options:\r\n> > > ```\r\n> > > * Make artifact deleting a KFP feature so that KFP should do the full cleanup\r\n> > > \r\n> > > * Provide a script or API for user to delete cache entry, so they can easily do so after they delete artifacts on their own.\r\n> > > \r\n> > > * Make a global caching config, so users can set cache lifecycle/staleness that matches their artifact deletion policy or need.\r\n> > > ```\r\n\r\nIs job level caching config an option? The global caching config might be inflexible if different pipelines artifacts have different TTL? Especially if multiple users are sharing the same environment.", "@IronPan \r\n\r\nYou can still set a per pipeline and per step cache staleness. Please check https://github.com/kubeflow/pipelines/pull/8270#discussion_r980508499 and the design document linked there.", "Sadly we have to be agnostic of the backend until the new unified storage architecture from https://github.com/kubeflow/pipelines/pull/7725#issuecomment-1277334000 is there. So i got https://github.com/kubeflow/pipelines/pull/8270 in instead of #7938 . V2 pipelines are not yet covered.", "V2 is discussed in https://docs.google.com/document/d/1_Hy1_KvuKh-heydg8qIyXYTiMfNPxePx4BHGpZT4xMk/edit?resourcekey=0-PTtkRPF5xpS3lD8dssqX1Q#" ]
"2022-06-24T09:21:23"
"2022-10-14T08:33:59"
null
MEMBER
null
### Environment Kubeflow 1.5.1 ### Steps to reproduce The cache server wrongly states that the step is cached even if the artifacts have been deleted from S3. We propose https://github.com/kubeflow/pipelines/pull/7938 that checks whether the folder actually still exists on S3. If the folder does not exist we do not wrongfully pretend that it is cached and instead delete the cachedb entry and let the pipeline step run again. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7939/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7939/timeline
null
reopened
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7935
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7935/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7935/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7935/events
https://github.com/kubeflow/pipelines/issues/7935
1,281,890,175
I_kwDOB-71UM5MaBd_
7,935
..
{ "login": "sciai-ai", "id": 52277510, "node_id": "MDQ6VXNlcjUyMjc3NTEw", "avatar_url": "https://avatars.githubusercontent.com/u/52277510?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sciai-ai", "html_url": "https://github.com/sciai-ai", "followers_url": "https://api.github.com/users/sciai-ai/followers", "following_url": "https://api.github.com/users/sciai-ai/following{/other_user}", "gists_url": "https://api.github.com/users/sciai-ai/gists{/gist_id}", "starred_url": "https://api.github.com/users/sciai-ai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sciai-ai/subscriptions", "organizations_url": "https://api.github.com/users/sciai-ai/orgs", "repos_url": "https://api.github.com/users/sciai-ai/repos", "events_url": "https://api.github.com/users/sciai-ai/events{/privacy}", "received_events_url": "https://api.github.com/users/sciai-ai/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-06-23T06:39:52"
"2022-06-26T03:11:31"
"2022-06-26T03:11:31"
NONE
null
..
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7935/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7935/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7929
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7929/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7929/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7929/events
https://github.com/kubeflow/pipelines/issues/7929
1,280,850,698
I_kwDOB-71UM5MWDsK
7,929
[frontend] Input/Output tab doesn't show information.
{ "login": "jlyaoyuli", "id": 56132941, "node_id": "MDQ6VXNlcjU2MTMyOTQx", "avatar_url": "https://avatars.githubusercontent.com/u/56132941?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jlyaoyuli", "html_url": "https://github.com/jlyaoyuli", "followers_url": "https://api.github.com/users/jlyaoyuli/followers", "following_url": "https://api.github.com/users/jlyaoyuli/following{/other_user}", "gists_url": "https://api.github.com/users/jlyaoyuli/gists{/gist_id}", "starred_url": "https://api.github.com/users/jlyaoyuli/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jlyaoyuli/subscriptions", "organizations_url": "https://api.github.com/users/jlyaoyuli/orgs", "repos_url": "https://api.github.com/users/jlyaoyuli/repos", "events_url": "https://api.github.com/users/jlyaoyuli/events{/privacy}", "received_events_url": "https://api.github.com/users/jlyaoyuli/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
{ "login": "Linchin", "id": 12806577, "node_id": "MDQ6VXNlcjEyODA2NTc3", "avatar_url": "https://avatars.githubusercontent.com/u/12806577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Linchin", "html_url": "https://github.com/Linchin", "followers_url": "https://api.github.com/users/Linchin/followers", "following_url": "https://api.github.com/users/Linchin/following{/other_user}", "gists_url": "https://api.github.com/users/Linchin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Linchin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Linchin/subscriptions", "organizations_url": "https://api.github.com/users/Linchin/orgs", "repos_url": "https://api.github.com/users/Linchin/repos", "events_url": "https://api.github.com/users/Linchin/events{/privacy}", "received_events_url": "https://api.github.com/users/Linchin/received_events", "type": "User", "site_admin": false }
[ { "login": "Linchin", "id": 12806577, "node_id": "MDQ6VXNlcjEyODA2NTc3", "avatar_url": "https://avatars.githubusercontent.com/u/12806577?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Linchin", "html_url": "https://github.com/Linchin", "followers_url": "https://api.github.com/users/Linchin/followers", "following_url": "https://api.github.com/users/Linchin/following{/other_user}", "gists_url": "https://api.github.com/users/Linchin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Linchin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Linchin/subscriptions", "organizations_url": "https://api.github.com/users/Linchin/orgs", "repos_url": "https://api.github.com/users/Linchin/repos", "events_url": "https://api.github.com/users/Linchin/events{/privacy}", "received_events_url": "https://api.github.com/users/Linchin/received_events", "type": "User", "site_admin": false } ]
null
[ "https://github.com/kubeflow/pipelines/blob/master/frontend/src/components/tabs/InputOutputTab.tsx#L149-L170\r\nhttps://github.com/kubeflow/pipelines/blob/master/frontend/src/mlmd/MlmdUtils.ts#L213-L228", "Execution details:\r\nhttps://screenshot.googleplex.com/7E9SGJGD6hbrC7v\r\n\r\nPattern(RegExp) & Key in Execution:\r\nhttps://screenshot.googleplex.com/pLtkNgtJ9vhmebX\r\n\r\n", "/assign @Linchin ", "https://github.com/kubeflow/pipelines/pull/8069" ]
"2022-06-22T20:34:33"
"2022-07-27T03:22:08"
"2022-07-27T03:22:08"
COLLABORATOR
null
We expect to see detailed information about the input and output in the Input/Output tab of components. Currently it is not able to display the info. https://screenshot.googleplex.com/3doJNfZJQ6AzmVb
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7929/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7929/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7923
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7923/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7923/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7923/events
https://github.com/kubeflow/pipelines/issues/7923
1,278,934,192
I_kwDOB-71UM5MOvyw
7,923
[feature] Implement an agnostic way to initialize the database
{ "login": "rimolive", "id": 813430, "node_id": "MDQ6VXNlcjgxMzQzMA==", "avatar_url": "https://avatars.githubusercontent.com/u/813430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rimolive", "html_url": "https://github.com/rimolive", "followers_url": "https://api.github.com/users/rimolive/followers", "following_url": "https://api.github.com/users/rimolive/following{/other_user}", "gists_url": "https://api.github.com/users/rimolive/gists{/gist_id}", "starred_url": "https://api.github.com/users/rimolive/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rimolive/subscriptions", "organizations_url": "https://api.github.com/users/rimolive/orgs", "repos_url": "https://api.github.com/users/rimolive/repos", "events_url": "https://api.github.com/users/rimolive/events{/privacy}", "received_events_url": "https://api.github.com/users/rimolive/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "KFP uses gorm which already provides abstraction. it might already be supported, or easy to tweak the config and support \r\nhttps://gorm.io/docs/connecting_to_the_database.html", "That's right, but there is something in the code that end up with a switch-case where if MySQL is not the driver used, it will return an error saying it is not supported.", "@IronPan so do you think we can add \"postgres\" as a new driver in the switch-case to begin with?\r\nalso linking the design docs to this issue: https://docs.google.com/document/d/1u50Ja5YltsESZkrS2f_KLAuRABeEv9QF8P0aetfh05U/edit#" ]
"2022-06-21T19:16:59"
"2022-07-06T17:33:32"
null
NONE
null
### Feature Area Currently, Kubeflow Pipelines relies on a specific database vendor (MySQL) which restrict users on database choice. <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> /area backend <!-- /area sdk --> <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? My proposal is a change in the code to make this database calls agnostic from the vendor. ### What is the use case or pain point? Suppose the user already uses PostgreSQL, and there is already a database running in his environment. My idea could make the user just point to the existing database instead of deploying another one. ### Is there a workaround currently? The is no workaround so far --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7923/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7923/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7917
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7917/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7917/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7917/events
https://github.com/kubeflow/pipelines/issues/7917
1,277,581,296
I_kwDOB-71UM5MJlfw
7,917
ask 'no2-pipeline-xxg59.marge-pos-neg-data' errored: Pod "no2-pipeline-xxg59-3831270412" is invalid: spec.volumes[3].name: Duplicate value: "no2-pvc"
{ "login": "MLHafizur", "id": 45520794, "node_id": "MDQ6VXNlcjQ1NTIwNzk0", "avatar_url": "https://avatars.githubusercontent.com/u/45520794?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MLHafizur", "html_url": "https://github.com/MLHafizur", "followers_url": "https://api.github.com/users/MLHafizur/followers", "following_url": "https://api.github.com/users/MLHafizur/following{/other_user}", "gists_url": "https://api.github.com/users/MLHafizur/gists{/gist_id}", "starred_url": "https://api.github.com/users/MLHafizur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MLHafizur/subscriptions", "organizations_url": "https://api.github.com/users/MLHafizur/orgs", "repos_url": "https://api.github.com/users/MLHafizur/repos", "events_url": "https://api.github.com/users/MLHafizur/events{/privacy}", "received_events_url": "https://api.github.com/users/MLHafizur/received_events", "type": "User", "site_admin": false }
[]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "Maybe it's because the two `pvolumes` used in `marge_pos_neg_data` actually point to the same persistent volume? As in this line:\r\n```\r\n pvolumes = {\"/mnt/positive/\": load_negative_data.pvolume, \"/mnt/negative/\": load_positive_data.pvolume}\r\n```\r\n" ]
"2022-06-21T01:16:25"
"2022-06-30T23:20:39"
null
NONE
null
Hi, I am trying to run a Kubeflow pipeline. Two steps will run in parallel and dump data to two different folders in PVC, then the third component will collect data from those t folders and merge them together and dump the merged data to another PVC folder. `vop = dsl.VolumeOp( name='no2-pvc', resource_name = "no2-pvc", size="100Gi", modes = dsl.VOLUME_MODE_RWO ) ##LOADING POSITIVE DATA## load_positive_data = dsl.ContainerOp( name='load_positive_data', image=load_positive_data_image, command="python", arguments=[ "/app/load_positive_data.py", ], pvolumes={"/mnt/positive/": vop.volume}).apply(gcp.use_gcp_secret("user-gcp-sa")) ##LOADING NEGATIVE DATA## load_negative_data = dsl.ContainerOp( name='load_negative_data', image=load_negative_data_image, command="python", arguments=[ "/app/load_negative_data.py", ], pvolumes={"/mnt/negative/": vop.volume}).apply(gcp.use_gcp_secret("user-gcp-sa")) ##MERGING POSITIVE AND NEGATIVE DATA## marge_pos_neg_data = dsl.ContainerOp( name='marge_pos_neg_data', image=marged_data_image, command="python", arguments=[ "/app/merge_neg_pos.py" ], pvolumes = {"/mnt/positive/": load_negative_data.pvolume, "/mnt/negative/": load_positive_data.pvolume} #volumes={'/mnt': vop.after(load_negative_data, load_positive_data)} ).apply(gcp.use_gcp_secret("user-gcp-sa")).after(load_positive_data, load_negative_data) ##PROCESSING MARGED DATA## process_marged_data = dsl.ContainerOp( name='process_data', image=perpare_merged_data_image, command="python", arguments=[ "/app/prepare_all_dataset.py" ], pvolumes = {"/mnt/pos_neg": marge_pos_neg_data.pvolume} ).apply(gcp.use_gcp_secret("user-gcp-sa")).after(marge_pos_neg_data)` load-positive-data and load-negative-data are working fine but the marge-pos-neg-data step is giving the following error: ``` This step is in Error state with this message: task 'no2-pipeline-x5kpd.marge-pos-neg-data' errored: Pod "no2-pipeline-x5kpd-2954674781" is invalid: spec.volumes[3].name: Duplicate value: "no2-pvc" ```
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7917/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7917/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7915
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7915/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7915/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7915/events
https://github.com/kubeflow/pipelines/issues/7915
1,275,832,061
I_kwDOB-71UM5MC6b9
7,915
Failed to copy data to Persistant Volume (PV) from GCS in Kubeflow Pipeline
{ "login": "MLHafizur", "id": 45520794, "node_id": "MDQ6VXNlcjQ1NTIwNzk0", "avatar_url": "https://avatars.githubusercontent.com/u/45520794?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MLHafizur", "html_url": "https://github.com/MLHafizur", "followers_url": "https://api.github.com/users/MLHafizur/followers", "following_url": "https://api.github.com/users/MLHafizur/following{/other_user}", "gists_url": "https://api.github.com/users/MLHafizur/gists{/gist_id}", "starred_url": "https://api.github.com/users/MLHafizur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MLHafizur/subscriptions", "organizations_url": "https://api.github.com/users/MLHafizur/orgs", "repos_url": "https://api.github.com/users/MLHafizur/repos", "events_url": "https://api.github.com/users/MLHafizur/events{/privacy}", "received_events_url": "https://api.github.com/users/MLHafizur/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-06-18T16:40:31"
"2022-06-20T21:09:48"
"2022-06-20T21:09:48"
NONE
null
Its been a long time since I am struggling to run a Kubeflow pipeline. I am trying this code to collect data from GCS and after filtering pass those data to Persistent Volume (PV) (/mnt/positive) to use as input in the next step: ``` def get_pos_data(analyte_name: str = "no2", run_name: str = "no2"): raw_files_path = 'postive_data_file/' # Checking for edge cases if raw_files_path == None or analyte_name == None or run_name == None: raise ValueError('Reading positive data failed: Component Configuration Invalid!') if not gstore.is_dir(raw_files_path): raise ValueError('Reading positive data failed: Raw files path do not exist') # Read files analyte_files = gstore.list_files(raw_files_path, recurse=True) if len(analyte_files) == 0: raise ValueError('Reading positive data failed: Raw files folder empty') sensor_cols = ["s"+ str(i) for i in range(1, 33)] removed_files = [] #Output path #output_file_path = 'output_positive_data_file' output_file_path='/mnt/positive' print("Num of {} files before filter: {}".format(analyte_name, len(analyte_files))) print('Processing files...') for analyte_file in analyte_files: content = pd.read_csv(f'gs://{gstore.get_bucket_name()}/{analyte_file}') if len(content[analyte_name].unique()) < 10: removed_files.append(analyte_file) continue if not('run_name' in content.columns): removed_files.append(analyte_file) continue if not('_'+ run_name + '_' in content.run_name[0]): removed_files.append(analyte_file) continue for an_input in sensor_cols: if an_input not in content.columns: removed_files.append(analyte_file) break analyte_filtered_files = [x for x in analyte_files if x not in removed_files] print("Num of {} files after filter: {}".format(analyte_name, len(analyte_filtered_files))) pos_files = analyte_filtered_files for x in pos_files: print(x) file = f'gs://{gstore.get_bucket_name()}/{x}' # Create temp local directory #local_file_path = os.path.join(output_file_path, os.path.basename(x) ) shutil.copy(file, f'{output_file_path}/{os.path.basename(x)}') #gstore.download_file_local(file, f'{output_file_path}/{os.path.basename(x)}') print('Moving Postive Data Completed!') ``` But getting follow error in Kubeflow Pipeline: ``` Num of no2 files before filter: 1832 Processing files... Num of no2 files after filter: 1832 Traceback (most recent call last): File "/app/load_positive_data.py", line 83, in <module> get_pos_data() File "/app/load_positive_data.py", line 77, in get_pos_data shutil.copy(path, output_file_path) File "/usr/local/lib/python3.7/shutil.py", line 248, in copy copyfile(src, dst, follow_symlinks=follow_symlinks) File "/usr/local/lib/python3.7/shutil.py", line 120, in copyfile with open(src, 'rb') as fsrc: FileNotFoundError: [Errno 2] No such file or directory: 'gs://pipe_ml-data/postive_data_file/IAS_2106091729_100083_e868608d.csv' ``` How may I overcome this issue and copy data from GCS to PVC?
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7915/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7915/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7909
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7909/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7909/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7909/events
https://github.com/kubeflow/pipelines/issues/7909
1,275,339,713
I_kwDOB-71UM5MBCPB
7,909
[bug] Minio S3 Gateway results in 'pipeline spec is invalid'
{ "login": "JuanValencia", "id": 3623306, "node_id": "MDQ6VXNlcjM2MjMzMDY=", "avatar_url": "https://avatars.githubusercontent.com/u/3623306?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JuanValencia", "html_url": "https://github.com/JuanValencia", "followers_url": "https://api.github.com/users/JuanValencia/followers", "following_url": "https://api.github.com/users/JuanValencia/following{/other_user}", "gists_url": "https://api.github.com/users/JuanValencia/gists{/gist_id}", "starred_url": "https://api.github.com/users/JuanValencia/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JuanValencia/subscriptions", "organizations_url": "https://api.github.com/users/JuanValencia/orgs", "repos_url": "https://api.github.com/users/JuanValencia/repos", "events_url": "https://api.github.com/users/JuanValencia/events{/privacy}", "received_events_url": "https://api.github.com/users/JuanValencia/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Could you please share the pipeline template that you used?", "It says in the description: [Tutorial] Data passing in python components", "I am facing the same error but just for tutorial pipeline example , I can create runs for pipelines I created" ]
"2022-06-17T18:22:22"
"2022-10-10T12:14:13"
null
NONE
null
### What steps did you take I updated minio on Kubeflow version 1.5.0 standard manifest install on a generic cluster to use an s3 gateway. I also updated the bucket name on pipeline-install-config which was reflected in the workflow-controller-configmap ### What happened: Minio came up correctly, and was able to manage s3 buckets through the minio GUI When I try to run `[Tutorial] Data passing in python components`, it fails with: ```{"error":"Failed to create a new run.: InvalidInputError: unknown template format: pipeline spec is invalid","code":3,"message":"Failed to create a new run.: InvalidInputError: unknown template format: pipeline spec is invalid","details":[{"@type":"type.googleapis.com/api.Error","error_message":"unknown template format","error_details":"Failed to create a new run.: InvalidInputError: unknown template format: pipeline spec is invalid"}]}``` ### What did you expect to happen: The pipeline executes correctly ### Environment: <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? From the manifests directly, I patched it to include the new config, and the minio config. <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: 1.5.0 kubeflow and all the standard versions <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7909/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7909/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7904
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7904/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7904/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7904/events
https://github.com/kubeflow/pipelines/issues/7904
1,273,919,121
I_kwDOB-71UM5L7naR
7,904
[backend] GCR images not accessible: `Consumer 'project:ml-pipeline' has been suspended.`
{ "login": "VishDev12", "id": 19661538, "node_id": "MDQ6VXNlcjE5NjYxNTM4", "avatar_url": "https://avatars.githubusercontent.com/u/19661538?v=4", "gravatar_id": "", "url": "https://api.github.com/users/VishDev12", "html_url": "https://github.com/VishDev12", "followers_url": "https://api.github.com/users/VishDev12/followers", "following_url": "https://api.github.com/users/VishDev12/following{/other_user}", "gists_url": "https://api.github.com/users/VishDev12/gists{/gist_id}", "starred_url": "https://api.github.com/users/VishDev12/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VishDev12/subscriptions", "organizations_url": "https://api.github.com/users/VishDev12/orgs", "repos_url": "https://api.github.com/users/VishDev12/repos", "events_url": "https://api.github.com/users/VishDev12/events{/privacy}", "received_events_url": "https://api.github.com/users/VishDev12/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Okay, this feels real, I can't access any of the images from this repo: `ml-pipeline`\r\n\r\nAnd confirmed that other gcr.io pulls work fine, for example: `gcr.io/google-containers/busybox@sha256:d8d3bc2c183ed2f9f10e7258f84971202325ee6011ba137112e01e30f206de67`", "This is definitely an issue our cluster is having the same problem. ", "Related slack thread: https://kubeflow.slack.com/archives/CE10KS9M4/p1655400994211759", "@chensun @IronPan @james-jwu @zijianjoy As you are the [owners](https://github.com/kubeflow/pipelines/blob/master/OWNERS) of this project, can you please help restore ml-pippelines in gcr.io? Thanks!", "I am having the same issue and it just broke my demo I was about to give to my team in an hour. ", "Some steps I'm taking to temporarily unblock our jobs:\r\n\r\n1. SSH to the K8s machines where you know for a fact that the jobs previously succeeded, and run:\r\n`docker image ls`\r\n2. Find the images that are causing your job to fail now, for us, it's `v3.1.14-license-compliance`.\r\n3. Run `docker tag gcr.io/ml-pipeline/argoexec:v3.1.14-license-compliance <private_repo>/repo:v3.1.14-license-compliance`\r\n4. Run `docker push <private_repo>/repo:v3.1.14-license-compliance`\r\n\r\nAfter that, I haven't figured out how to let new jobs know to use this image. But for the jobs that are presently stuck in limbo:\r\n\r\n1. After uploading the image to the repo above, SSH to the machine where the jobs are stuck,\r\n2. Login to the same private repo.\r\n3. `docker pull <private_repo>/repo:v3.1.14-license-compliance`\r\n4. `docker tag <private_repo>/repo:v3.1.14-license-compliance gcr.io/ml-pipeline/argoexec:v3.1.14-license-compliance`\r\n\r\nAfter this, I can confirm that the stuck job succeeded on the UI, but this is only a stop-gap measure, of course.\r\n\r\n---\r\n\r\nAlso, it appears that this issue doesn't really stop our jobs from succeeding, it only seems to be stuck at the final stage with the wait container.", "Seems like it's back up in gcr.io: https://kubeflow.slack.com/archives/CE10KS9M4/p1655407543079189?thread_ts=1655400994.211759&cid=CE10KS9M4\r\n", "Pods are able to pull the image now. ", "Could we please get a short RCA from the developers before we close this issue? I want to evaluate the risks of something like this repeating.", "Should they be using artifact registry? https://cloud.google.com/artifact-registry/docs/transition/transition-from-gcr?_ga=2.199229930.-556289867.1649704015\r\n\r\n+1 about the ask for an RCA.\r\n", "\r\n\r\n> Could we please get a short RCA from the developers before we close this issue? I want to evaluate the risks of something like this repeating.\r\n\r\nApologize for the trouble, our project was mistakenly suspended for a short period due to some process error, and we'll be working on a postmortem to prevent this from happening again.\r\n" ]
"2022-06-16T18:09:31"
"2022-06-16T22:26:40"
"2022-06-16T22:26:40"
NONE
null
### Environment * How did you deploy Kubeflow Pipelines (KFP)? Standalone installation with Kustomize on EKS. * KFP version: 1.7.0 My KFP runs are failing and when I tried to pull this image: `gcr.io/ml-pipeline/argoexec:v3.1.14-license-compliance` manually from another unrelated machine, that failed with the following error: ``` Error response from daemon: Head https://gcr.io/v2/ml-pipeline/argoexec/manifests/v3.1.14-license-compliance: denied: Permission denied: Consumer 'project:ml-pipeline' has been suspended. ``` I apologize if this is something from my side, but it seems serious if it is not. Edit: ## Suggestions 1. Your jobs might not actually be failing. For us, the `main` container runs without issue. Only the `wait` container with the `gcr.io/ml-pipeline/argoexec:v3.1.14-license-compliance` image is stuck. [This comment](https://github.com/kubeflow/pipelines/issues/7904#issuecomment-1158019599) details how to deal with that temporarily. 2. If you have your core Kubeflow Pipelines components on machines that have the potential to roll. On spot instance, for example, then: a. Try freezing them if you can. For example, certain K8s scaling systems allow you to restrict scale down on certain node groups. b. Copy over the core images to one of your private container registries as soon as possible. After that, you should be able to update your Kubeflow Pipelines manifests with these private images. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7904/reactions", "total_count": 13, "+1": 13, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7904/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7894
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7894/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7894/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7894/events
https://github.com/kubeflow/pipelines/issues/7894
1,272,320,517
I_kwDOB-71UM5L1hIF
7,894
[sdk] 1.8.1 git tag is misplaced
{ "login": "davidxia", "id": 480621, "node_id": "MDQ6VXNlcjQ4MDYyMQ==", "avatar_url": "https://avatars.githubusercontent.com/u/480621?v=4", "gravatar_id": "", "url": "https://api.github.com/users/davidxia", "html_url": "https://github.com/davidxia", "followers_url": "https://api.github.com/users/davidxia/followers", "following_url": "https://api.github.com/users/davidxia/following{/other_user}", "gists_url": "https://api.github.com/users/davidxia/gists{/gist_id}", "starred_url": "https://api.github.com/users/davidxia/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/davidxia/subscriptions", "organizations_url": "https://api.github.com/users/davidxia/orgs", "repos_url": "https://api.github.com/users/davidxia/repos", "events_url": "https://api.github.com/users/davidxia/events{/privacy}", "received_events_url": "https://api.github.com/users/davidxia/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Hello @davidxia , the SDK versions in Pypi are not the same as GitHub release tags. KFP package in Pypi (SDK) has its own release cycle. The GitHub release tags are for KFP backend (docker images)." ]
"2022-06-15T14:26:23"
"2022-06-23T22:45:07"
"2022-06-23T22:45:07"
CONTRIBUTOR
null
The 1.8.1 git tag is [here](https://github.com/kubeflow/pipelines/commits/1.8.1/sdk/python/requirements.in) on Nov 23, 2021. The `kfp` package on Pypi [here](https://pypi.org/project/kfp/#history) was published on Sep 10, 2021. I suspect the git tag is misplaced and should be moved back to the correct commit that created the 1.8.1 PyPI package. Having misplaced git tags will create debugging confusion for users. I haven't checked the correctness of other git tags. --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7894/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7894/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7893
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7893/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7893/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7893/events
https://github.com/kubeflow/pipelines/issues/7893
1,271,309,811
I_kwDOB-71UM5LxqXz
7,893
[6/14/22] v2 sample test failure
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-06-14T20:12:56"
"2022-07-07T22:46:34"
"2022-07-07T22:46:34"
COLLABORATOR
null
``` F0614 17:22:43.063393 18 main.go:74] KFP driver: driver.RootDAG(pipelineName=my-test-pipeline-beta, runID=8aa7dfc3-519f-4548-b886-ff1a11a37a4e, runtimeConfig, componentSpec) failed: Failed PutContext(name="8aa7dfc3-519f-4548-b886-ff1a11a37a4e", type="system.PipelineRun", typeid=16): rpc error: code = Unavailable desc = error reading from server: EOF ` ``` v2 sample tests keep failing and block presubmit. Possibly a result of using kfp 1.8.2 in test infra.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7893/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7893/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7891
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7891/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7891/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7891/events
https://github.com/kubeflow/pipelines/issues/7891
1,270,469,758
I_kwDOB-71UM5LudR-
7,891
[bug] Considerable delay in showing steps in Kubeflow Pipelines UI after navigating to Runs
{ "login": "rafalk0", "id": 107420134, "node_id": "U_kgDOBmcZ5g", "avatar_url": "https://avatars.githubusercontent.com/u/107420134?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rafalk0", "html_url": "https://github.com/rafalk0", "followers_url": "https://api.github.com/users/rafalk0/followers", "following_url": "https://api.github.com/users/rafalk0/following{/other_user}", "gists_url": "https://api.github.com/users/rafalk0/gists{/gist_id}", "starred_url": "https://api.github.com/users/rafalk0/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rafalk0/subscriptions", "organizations_url": "https://api.github.com/users/rafalk0/orgs", "repos_url": "https://api.github.com/users/rafalk0/repos", "events_url": "https://api.github.com/users/rafalk0/events{/privacy}", "received_events_url": "https://api.github.com/users/rafalk0/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" } ]
open
false
null
[]
null
[ "Could you check the resource usage in your cluster, and see if any limit was reached? You may also consider upgrading to 1.8.", "@Linchin No limit was reached when it comes to resource usage. " ]
"2022-06-14T08:50:53"
"2022-07-18T10:16:46"
null
NONE
null
### What steps did you take Create a pipeline with working ML algorithm. ### What happened: We encountered considerable delay (lasting more than 2 minutes) in showing steps in Kubeflow UI after navigating to Runs. Initially, after opening Runs, Kubeflow Pipelines UI shows only a blank window. The algorithm's steps in Kubeflow Pipelines appear only after these 2 requests: `ml_metadata.MetadataStoreService/GetContextByTypeAndName` fail. It seems that it's rather a core Kubeflow's internal bug, similar to the one reported here: https://github.com/kubeflow/pipelines/issues/3763. This issue affects only the Kubeflow UI and does not affect the algorithms' deployments nor operations. We can still access pipelines details by using an external software as UI. <img width="1532" alt="Kubeflow_140_170_working_slow" src="https://user-images.githubusercontent.com/107420134/173522832-e0f7cfca-8385-4abb-8dde-d77d998d9e91.png"> ### What did you expect to happen: We didn't expect any significant delay. ### Environment: * How do you deploy Kubeflow Pipelines (KFP)? Kubeflow Pipelines as [part of a full Kubeflow deployment](https://www.kubeflow.org/docs/components/pipelines/installation/overview/#full-kubeflow-deployment). Kubeflow itself is installed using kustomize, according to the [documentation](https://github.com/kubeflow/manifests) (agnostic environment). * KF version: 1.4.0 * KFP version: 1.7.0 * KFP SDK version: 1.8.11 <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Anything else you would like to add: What would be the estimated time of resolution for this issue? ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> <!-- /area sdk --> <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7891/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7891/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7887
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7887/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7887/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7887/events
https://github.com/kubeflow/pipelines/issues/7887
1,269,825,243
I_kwDOB-71UM5Lr_7b
7,887
[feature] google-cloud component for generic container jobs
{ "login": "defoishugo", "id": 22320492, "node_id": "MDQ6VXNlcjIyMzIwNDky", "avatar_url": "https://avatars.githubusercontent.com/u/22320492?v=4", "gravatar_id": "", "url": "https://api.github.com/users/defoishugo", "html_url": "https://github.com/defoishugo", "followers_url": "https://api.github.com/users/defoishugo/followers", "following_url": "https://api.github.com/users/defoishugo/following{/other_user}", "gists_url": "https://api.github.com/users/defoishugo/gists{/gist_id}", "starred_url": "https://api.github.com/users/defoishugo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/defoishugo/subscriptions", "organizations_url": "https://api.github.com/users/defoishugo/orgs", "repos_url": "https://api.github.com/users/defoishugo/repos", "events_url": "https://api.github.com/users/defoishugo/events{/privacy}", "received_events_url": "https://api.github.com/users/defoishugo/received_events", "type": "User", "site_admin": false }
[ { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[ "/assign @connor-mccarthy ", "@defoishugo, thanks for reaching out. This is on our development roadmap as a \"Bring your own container\" feature. I believe this will meet your needs once implemented.", "@connor-mccarthy perfect!\r\n\r\nCurrent workaround: using YML file with a container implementation. The issue with that is mainly about linking a training job to a dataset, which means that in the Datasets UI of VertexAI you would not see the training job attached. Probably something to add in the Google API backend?\r\n\r\nStill, this workaround is easy to implement for anyone who is looking for a quick solution and waiting for the \"Bring your own container\" epic/feature.", "@connor-mccarthy Do we have any news about this topic?\r\n\r\nWhen using the workaround described in my previous message, we could not see the trained model in the \"Datasets\" related trainings. It means that it breaks the lineage. ", "Hi, @defoishugo. This is implemented. The docs, which we are continuing to improve, are [here](https://www.kubeflow.org/docs/components/pipelines/v2/author-a-pipeline/components/#3-custom-container-components)." ]
"2022-06-13T18:53:58"
"2023-08-10T20:19:38"
"2023-08-10T20:19:38"
NONE
null
### Feature Area /area sdk /area samples /area components ### What feature would you like to see? The ability to create custom container jobs, not only for training. In my mind, we should be able to output user-defined artifacts (e.g.: metrics, datasets, two models,Β ...). ### What is the use case or pain point? **USE CASE:** As a user, I have an existing on-premise application with which is a docker container with different pipelines: - split: split the dataset into train/test/val with customized strategy (not random) - train: train a model - evaluate: displays and save some metrics When it comes to calling the training pipeline, I can use the `CustomContainerTrainingJob` which produces a `VertexModel`. It works pretty well and thank you for that! But for the split and evaluate commands, I could not use the `CustomContainerTrainingJob` since it produces only `VertexModel` and not arbitrary `Artifact`. ### Is there a workaround currently? Currently, I do not have a working workaround. However, I am right now playing with `convert_method_to_component` and with the `CustomContainerTrainingJob` and I will find a solution for that. The first idea that comes to my min would be to generate the train, test and val datasets in the _AIP_MODEL_DIR_ folder. Then, instead of creating a model, I will generate an artifact for each file with an URI to the files. Do I like this workaround? NO. It needs a lot of edits in different parts of the code.Β The solution seems dirty and too much homemade. If you have any other idea, please tell me. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7887/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7887/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7880
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7880/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7880/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7880/events
https://github.com/kubeflow/pipelines/issues/7880
1,267,821,647
I_kwDOB-71UM5LkWxP
7,880
[CI] release-1.8 branch build-each-commit error
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "The issue appears to be caused by go 1.18 adds additional info which are not recognized by the license check tool. Found a similar issue: https://github.com/uw-labs/lichen/issues/14\r\n\r\nIn the latest go-licenses tool, we removed the feature of checking licenses from binary file, so this issue shouldn't be a blocker any Will try to update go-licenses tool in release-1.8 branch. ", "Actually, on `master` branch we use go 1.17 to build CRD viewer:\r\nhttps://github.com/kubeflow/pipelines/blob/d05110e0b0c300879614b3bb28c06fee71470de6/backend/Dockerfile.viewercontroller#L15\r\n\r\nBut on `release-1.8` branch we didn't pin the go version:\r\nhttps://github.com/kubeflow/pipelines/blob/8649bac68e070a79d7ba14fb2f6b7623337c18a7/backend/Dockerfile.viewercontroller#L15\r\n\r\nI think probably a quicker and safer change it to pin go version in `release-1.8` branch." ]
"2022-06-10T17:21:55"
"2022-06-16T22:56:22"
"2022-06-16T22:56:22"
COLLABORATOR
null
CRD viewer build failure on the go-licenses check ``` Step 8/17 : RUN go-licenses csv /bin/controller > /tmp/licenses.csv && diff /tmp/licenses.csv backend/third_party_licenses/viewer.csv && go-licenses save /tmp/licenses.csv --save_path /tmp/NOTICES ---> Running in 8c96b00fe722 listModulesInGoBinary(path="/bin/controller"): unrecognised line: build -compiler=gc The command '/bin/sh -c go-licenses csv /bin/controller > /tmp/licenses.csv && diff /tmp/licenses.csv backend/third_party_licenses/viewer.csv && go-licenses save /tmp/licenses.csv --save_path /tmp/NOTICES' returned a non-zero code: 1 ``` https://github.com/kubeflow/pipelines/runs/6498434354
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7880/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7880/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7879
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7879/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7879/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7879/events
https://github.com/kubeflow/pipelines/issues/7879
1,267,052,820
I_kwDOB-71UM5LhbEU
7,879
[feature] TaskSpecs from create_component_from_func to support "after(...)"
{ "login": "terrykong", "id": 7576060, "node_id": "MDQ6VXNlcjc1NzYwNjA=", "avatar_url": "https://avatars.githubusercontent.com/u/7576060?v=4", "gravatar_id": "", "url": "https://api.github.com/users/terrykong", "html_url": "https://github.com/terrykong", "followers_url": "https://api.github.com/users/terrykong/followers", "following_url": "https://api.github.com/users/terrykong/following{/other_user}", "gists_url": "https://api.github.com/users/terrykong/gists{/gist_id}", "starred_url": "https://api.github.com/users/terrykong/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/terrykong/subscriptions", "organizations_url": "https://api.github.com/users/terrykong/orgs", "repos_url": "https://api.github.com/users/terrykong/repos", "events_url": "https://api.github.com/users/terrykong/events{/privacy}", "received_events_url": "https://api.github.com/users/terrykong/received_events", "type": "User", "site_admin": false }
[ { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Actually, I just discovered that this request is not needed. It appears that this is already supported, but I ran into this issue because I was working outside of the compiler. To others who stumble upon this issue: it appears that I do in fact get ContainerOps back from `kfp.components.create_component_from_func` when I instantiate that task within a pipeline func and then pass that into the compiler. Here is a snippet that shows that the above example does work:\r\n```python\r\nimport kfp\r\nimport kfp.compiler\r\n\r\n@kfp.components.create_component_from_func\r\ndef training_but_no_outputs():\r\n # do stuff\r\n pass\r\n\r\ndef pipeline_func():\r\n task_A = kfp.dsl.ContainerOp(name='other', image='python:3.6')\r\n task_B = training_but_no_outputs()\r\n task_A.after(task_B) # AttributeError: 'TaskSpec' object has no attribute 'name'\r\n task_B.after(task_A) # AttributeError: 'TaskSpec' object has no attribute 'after'\r\n\r\nkfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY).compile(pipeline_func=pipeline_func, package_path='/tmp/pipeline.yaml')\r\n```" ]
"2022-06-10T05:37:07"
"2022-06-12T05:00:04"
"2022-06-12T05:00:04"
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> <!-- /area backend --> /area sdk <!-- /area samples --> /area components ### What feature would you like to see? <!-- Provide a description of this feature and the user experience. --> Allow ops created from `kfp.components.create_component_from_func` to support ".after(...)" like `kfp.dsl.ContainerOp`s. This would help declare dependencies that don't rely on output passing. Currently, the following is not possible: ```python import kfp @kfp.components.create_component_from_func def training_but_no_outputs(): # do stuff pass task_A = kfp.dsl.ContainerOp(name='other', image='python:3.6') task_B = training_but_no_outputs() task_A.after(task_B) # AttributeError: 'TaskSpec' object has no attribute 'name' task_B.after(task_A) # AttributeError: 'TaskSpec' object has no attribute 'after' ``` ### What is the use case or pain point? <!-- It helps us understand the benefit of this feature for your use case. --> We're still using pipelines "v1" and rely on `kfp.components.create_component_from_func` and `kfp.dsl.ContainerOp`s. We have been able to set dependencies between ContainerOps with `ContainerOp.after(*ops)`, but we cannot do so with `TaskSpec`s, which appear to be the output of create_component_from_func. ### Is there a workaround currently? <!-- Without this feature, how do you accomplish your task today? --> I haven't tested this, but it seems like a workaround could be to emit an output from the python function decorated with `create_component_from_func` and pass it to a dependent op: ```python import kfp @kfp.components.create_component_from_func def training_but_no_outputs(dummy_input: int) -> int: # do stuff return 4 # dummy output task_A = training_but_no_outputs(4) task_B = kfp.dsl.ContainerOp(name='other', image='python:3.6', command=['sh', '-c'], arguments=f'echo {task_A.output}') task_C = kfp.dsl.ContainerOp(name='other', image='python:3.6', command=['sh', '-c'], arguments=f'echo 4 > /file', file_outputs={'file': '/file'}) task_D = training_but_no_outputs(str(task_C.output)) # oddly the stringify-ing of the pipelineparam is necessary ``` If this works, it seems like it would bloat the code. --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7879/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7879/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7878
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7878/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7878/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7878/events
https://github.com/kubeflow/pipelines/issues/7878
1,266,758,583
I_kwDOB-71UM5LgTO3
7,878
[feature] Seek alternative for object store solution other than MinIO
{ "login": "zijianjoy", "id": 37026441, "node_id": "MDQ6VXNlcjM3MDI2NDQx", "avatar_url": "https://avatars.githubusercontent.com/u/37026441?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zijianjoy", "html_url": "https://github.com/zijianjoy", "followers_url": "https://api.github.com/users/zijianjoy/followers", "following_url": "https://api.github.com/users/zijianjoy/following{/other_user}", "gists_url": "https://api.github.com/users/zijianjoy/gists{/gist_id}", "starred_url": "https://api.github.com/users/zijianjoy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijianjoy/subscriptions", "organizations_url": "https://api.github.com/users/zijianjoy/orgs", "repos_url": "https://api.github.com/users/zijianjoy/repos", "events_url": "https://api.github.com/users/zijianjoy/events{/privacy}", "received_events_url": "https://api.github.com/users/zijianjoy/received_events", "type": "User", "site_admin": false }
[ { "id": 930619516, "node_id": "MDU6TGFiZWw5MzA2MTk1MTY=", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/frontend", "name": "area/frontend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "I would clarify it a bit more\r\n\r\n- It is open sourced and uses license that can be accepted by KFP, something similar to Apache 2.0.\r\n- Orchestration engine (like Argoworkflow) can interact with this object store (S3 compatibility).\r\n- It has fine-grained S3 permission control e.g. ${aws:username} https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#policy-vars-wheretouse. Please see https://github.com/kubeflow/pipelines/pull/7725 for an example implementation\r\n- Same or similar function set as MinIO. We need at least one bucket called mlpipeline and multiple users and policies to manage which user can access which folders. Easy Scalability would also be good.\r\n- Each container must run as non-root and unpriviledged\r\n\r\n- https://www.zenko.io/cloudserver/ no user management at all in the free version\r\n- https://longhorn.io/ Does it even support S3 ? https://github.com/longhorn/longhorn/issues/353\r\n- https://github.com/openstack/swift i cannot find information about ACLs\r\n- [ceph-rook](https://rook.io/) it seems to support aws:username but only bucketpolicies. so pipeline definitions must not be stored on S3 then. It should reside in a separate namespace since it has a lot of pods. Nevertheless the most promising software\r\n", "@zijianjoy , is the MinIO license change to AGPLv3 the only motivation for looking for alternatives? As you wrote, `AGPL software requires that anything it links to must also be licensed under the AGPL`, but this should not be a limitation for upgrading to the latest MinIO:\r\n- Currently, KFP links to [MinIO Golang client](https://github.com/minio/minio-go), which is still under Apache 2.0 License. Even though they changed the MinIO license, the license of the Golang client was not changed (yet). May be there are concerns that the Golang client license can be changed, but for now there should be no license restrictions to use the latest MinIO.\r\n- As [rook](https://github.com/rook/rook) has no Golang client, currently the only way to use it from KFP is through a standalone S3 client, such as [s5cmd](https://github.com/peak/s5cmd) or [s3cmd](https://github.com/s3tools/s3cmd). I would suggest to implement the existing `ObjectStoreInterface interface` by invoking some S3 client, which can be shipped with the apiserver image. That way KFP will not depend on specific object store service, as long as the service supports the S3 API.", "Minio is only used as a storage gateway. This feature has been deprecated in Minio since over a year https://blog.min.io/deprecation-of-the-minio-gateway/\r\n\r\nTo me the simplest replacement is to use an actual S3 client (which hopefully is also an S3 compatible client because not everyone is on AWS) \r\nIt needs to support the custom endpoints, region, and path-style or virtual host style URLs, as well as the various auth methods.\r\nThe problem with that, is that obviously that requires an actual object store service to run... which is where Minio could still be used as the in-cluster object store solution (but not as a gateway)", "\r\n\r\n\r\n\r\n> Minio is only used as a storage gateway. This feature has been deprecated in Minio since over a year https://blog.min.io/deprecation-of-the-minio-gateway/\r\n> \r\n> To me the simplest replacement is to use an actual S3 client (which hopefully is also an S3 compatible client because not everyone is on AWS) It needs to support the custom endpoints, region, and path-style or virtual host style URLs, as well as the various auth methods. The problem with that, is that obviously that requires an actual object store service to run... which is where Minio could still be used as the in-cluster object store solution (but not as a gateway)\r\n\r\nNo, your assesment is not correct. Minio usage differs by distribution and the most important default one used by Kubeflow is NOT the gateway mode. We need a reliable storage and gateway replacement. For further information read the whole conversation here https://github.com/kubeflow/pipelines/pull/7725", "\r\n> No, your assesment is not correct. Minio usage differs by distribution and the most important default one used by Kubeflow is NOT the gateway mode. We need a reliable storage and gateway replacement. For further information read the whole conversation here #7725\r\n\r\nMy bad, I wasn't clear. \r\nMinio is used as the local storage in the vanilla distro, and liekly that's what is used on-premises, when no storage backend is defined or available, otherwise it is used as a 'gateway' for pretty much every cloud distro. Whether it is used in gateway mode, or using S3-compatible backend. \r\nThe problem is it is old and doesn't support several options that make it hard to work with cloud other than AWS. One of the main issue for me is that it doesn't support the region param.\r\nAnyway, regardless of how it is used, my suggestion was to update the object storage client code in pipelines to use a native, modern, S3-compatible client that supports all options, so there is no need for Minio at all when using a cloud object storage backend.\r\nFor the vanilla distro that still requires a in-cluster storage solution, Minio S3 compat still does the job, and it's only a matter of pointing the S3 client to it.\r\n", "I agree with @streamnsight - the `ObjectStoreInterface interface` can be implemented using S3-compatible client, golang or invoking a standalone binary . There should be no MinIO dependency in the KFP code. We can still use MinIO, rook or whatever object store we choose.", "This issue here is about the server, not the client.", "\"For the vanilla distro that still requires a in-cluster storage solution, Minio S3 compat still does the job, and it's only a matter of pointing the S3 client to it.\" No, that is the reason for this issue. We need a server side replacement.", "> This issue here is about the server, not the client.\r\n\r\nCurrently there is a dependency between the server and the client, the server cannot be changed easily without client modifications. The first step is to replace the client with a generic S3-compatible client, without changing the server. After that we can change the server with any object store that supports S3 API.\r\n\r\nWhy do you think that the MinIO server needs to be replaced ? According to the issue description, `using AGPL software requires that anything it links to must also be licensed under the AGPL` , but obviously the MinIO server does not link to any KFP code. The MinIO client links to KFP, but as it remains under Apache 2.0 License, there is no licensing restrictions to use the latest MinIO server and client versions.\r\n", "@tzstoyanov \r\n\"but as it remains under Apache 2.0 License, there is no licensing restrictions to use the latest MinIO server and client versions.\" Googles Lawyers say otherwise. I have discussed it with their team several times.\r\n\r\nPlease also read everything from https://github.com/kubeflow/pipelines/pull/7725 to fully understand the long-term goals for the artifact storage within Kubeflow.\r\n\r\nYou can already use the minio server with most S3 clients, but yes, maybe it is problematic to use the minio client for other S3 servers.\r\n\r\nWe are always looking for volunteers. I already mentored several people that now have their first commits to the Kubeflow project. ", "@juliusvonkohout, I looked at the comments of the #7725. Thanks for pointing me to that, now I have a better idea of the problem and the work you did in the context of that PR. I have a few questions, will be happy if you or someone from the community can answer:\r\n- Regarding the MinIO license problem, you mentioned that `Googles Lawyers say otherwise. I have discussed it with their team several times.` I wonder if these discussions were public? I'm curious to see their arguments against this specific use case. I'm not a license expert, but the AGPL restrictions are pretty clear. From my understanding, and according to the description of this specific issue I cannot see any license violation in our use case - we do not modify any AGPL code, nor we do link to any AGPL code. Moreover - the KFP image is already based on Debian, there are a lot of *GPL* binaries distributed as part of it.\r\n- In the context of your PR, @zijianjoy mentioned that `New image has to pass license review`. Do you know what is that license review process, is it described somewhere?\r\n- In any case, replacing the MinIO client with some generic S3 client is a good idea. This will make KFP more flexible and is the first step in replacing the MinIO server. Do you know if there is a specific issue about the client?\r\n\r\n I can contribute to that, implement the `ObjectStoreInterface interface` using a generic S3 client. We can use the official [AWS SDK](https://github.com/aws/aws-sdk-go), or use a command line client such as [s5cmd](https://github.com/peak/s5cmd) or [s3cmd](https://github.com/s3tools/s3cmd). ", "@tzstoyanov please reach out on LinkedIn or slack for discussion. I am already working with one of your colleagues @difince https://kccnceu2023.sched.com/event/1HyY8/hardening-kubeflow-security-for-enterprise-environments-julius-von-kohout-dhl-deutsche-telekom-diana-dimitrova-atanasova-vmware\r\n\r\nMaybe these here are are lower hanging fruit. https://github.com/kubeflow/kubeflow/pull/7032#issuecomment-1505277720 we really need to focus on what to work on first, because getting stuff into KFP is difficult.", "Do we have any update on this? :) ", "cubefs looks promising.", "@gsoec can you articulate why cubefs looks promising?\r\n\r\nI think we need an assessment similar to what @juliusvonkohout did in https://github.com/kubeflow/pipelines/issues/7878#issuecomment-1169070899. Based on that, comparing with [ceph-rook](https://rook.io/) appears to be most relevant. From what I see here, the latter appears to be the favorable solution...", "@lehrig please take a look at the last comments of https://github.com/kubeflow/pipelines/pull/7725. i think we need to use istio or something else for the authentication part since only a few s3 providers fully support enterprise-level user management and authorization. Furthermore we could just plug and play any basic-S3 compatible storage backend and get rid of passwords and the necessary rotation altogether. " ]
"2022-06-09T23:15:32"
"2023-08-23T14:06:44"
null
COLLABORATOR
null
### Feature Area /area backend /area frontend ### What is the use case or pain point? Currently, KFP is using MinIO as the default object store for Artifact payload and Pipeline template. However, MinIO has fully changed its license to AGPLv3 since 2021. This change has prevented KFP from upgrading to the latest of MinIO: using AGPL software requires that anything it links to must also be licensed under the AGPL. Since we are not able to adopt the latest change from MinIO, we are seeking alternatives to replace it in future KFP deployment. ### What feature would you like to see? This new object store solution should have the following nature: 1. It is open sourced and uses license that can be accepted by KFP. 2. Orchestration engine (like Argoworkflow) can interact with this object store. 3. It is compatible with S3. 4. It has fine-grained permission control. 5. Same or similar function set with MinIO. ### Exploration We are currently considering following options, we would like to hear your opinions as well. - https://www.zenko.io/cloudserver/ - https://longhorn.io/ - https://github.com/openstack/swift - [ceph-rook](https://rook.io/) - https://github.com/cubeFS/cubefs - https://github.com/openebs/openebs --- cc @chensun @IronPan @james-jwu --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7878/reactions", "total_count": 30, "+1": 30, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7878/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7861
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7861/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7861/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7861/events
https://github.com/kubeflow/pipelines/issues/7861
1,264,805,012
I_kwDOB-71UM5LY2SU
7,861
[feature] google-cloud component for loading existing VertexDataset
{ "login": "defoishugo", "id": 22320492, "node_id": "MDQ6VXNlcjIyMzIwNDky", "avatar_url": "https://avatars.githubusercontent.com/u/22320492?v=4", "gravatar_id": "", "url": "https://api.github.com/users/defoishugo", "html_url": "https://github.com/defoishugo", "followers_url": "https://api.github.com/users/defoishugo/followers", "following_url": "https://api.github.com/users/defoishugo/following{/other_user}", "gists_url": "https://api.github.com/users/defoishugo/gists{/gist_id}", "starred_url": "https://api.github.com/users/defoishugo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/defoishugo/subscriptions", "organizations_url": "https://api.github.com/users/defoishugo/orgs", "repos_url": "https://api.github.com/users/defoishugo/repos", "events_url": "https://api.github.com/users/defoishugo/events{/privacy}", "received_events_url": "https://api.github.com/users/defoishugo/received_events", "type": "User", "site_admin": false }
[ { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1260031624, "node_id": "MDU6TGFiZWwxMjYwMDMxNjI0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/samples", "name": "area/samples", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
{ "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false }
[ { "login": "connor-mccarthy", "id": 55268212, "node_id": "MDQ6VXNlcjU1MjY4MjEy", "avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4", "gravatar_id": "", "url": "https://api.github.com/users/connor-mccarthy", "html_url": "https://github.com/connor-mccarthy", "followers_url": "https://api.github.com/users/connor-mccarthy/followers", "following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}", "gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}", "starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions", "organizations_url": "https://api.github.com/users/connor-mccarthy/orgs", "repos_url": "https://api.github.com/users/connor-mccarthy/repos", "events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}", "received_events_url": "https://api.github.com/users/connor-mccarthy/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks, @defoishugo. This development is currently in progress and should be released with an upcoming v2 alpha release!", "Thank you @connor-mccarthy.\r\n\r\nJust to let you know, I found this workaround for my task:\r\n\r\n```\r\n@component(base_image=\"python:3.9\", packages_to_install=[\"google-cloud-aiplatform\"]) \r\ndef get_data(project: str, region: str, dataset_id: str, dataset: Output[Artifact]):\r\n from google.cloud import aiplatform as aip\r\n vertex_dataset = aip.TabularDataset(dataset_id, project=project, location=region)\r\n dataset.metadata[\"resourceName\"] = vertex_dataset.resource_name\r\n dataset.uri = \"\"\r\n```\r\n\r\nThe output is not a `VertexDataset` object but it will work if you pass it to a custom job like `CustomContainerTrainingJobRunOp` since the job is only using the `resource_name` metadata to get the dataset.\r\n\r\nStill, this is not a clean solution. Thank you for developing this feature, waiting for it! ", "Glad you found a workaround, @defoishugo! Programmatic import of the type annotation import is indeed the crux of this problem for Vertex artifacts. We have a nice solution for it that I think we should be able to ship soon!", "@connor-mccarthy Do you have any update regarding this issue?\r\n\r\n**Current work-arounds are insufficient**\r\nThe work-around I have been using is the use of the `importer_node` as mentioned in [this article](https://cloud.google.com/vertex-ai/docs/pipelines/use-components#use_an_importer_node) - while this should intuitively work for loading Artifacts and functionally does the job, it duplicates entries within the ML Metadata store in the VertexAI project.\r\n\r\n**Loading existing Artifacts is a key MLOps functionality**\r\nAs a user, it seems like there is clearly something missing that would allow one to link an Artifact to multiple pipelines and multiple pipeline runs without duplicating ML Metadata entries within VertexAI. Use cases include running multiple training runs using different models on the same input Dataset, using the same trained model on multiple datasets, re-using the trained model artifact for model evaluation and deploying in separate pipelines, etc.", "> it duplicates entries within the ML Metadata store in the VertexAI\r\n\r\nUsing the `importer` argument `reimport=False` in `kfp==2.x.x` should avoid duplicating entries in ML Metadata. I think this should resolve the issues you are describing. If not, can you let me know what gaps you're still experiencing?\r\n\r\n", "Thank you for replying @connor-mccarthy!\r\n\r\n**Some Artifact types are not duplicated, even before `kfp==2.x.x`**\r\nDatasets and Endpoints seem to show the desired behavior. However, `dsl.Model` is duplicated when importing.\r\n\r\n**Pre-release is not yet supported**\r\nHoping this will be fixed in version 2.x.x, however currently there is no version of `google-cloud-pipeline-components` that supports the pre-release, so I can not experiment with this yet. Looking forward to their next version update.", "Thank you for explaining. I have made a note of this bug.", "> Glad you found a workaround, @defoishugo! Programmatic import of the type annotation import is indeed the crux of this problem for Vertex artifacts. We have a nice solution for it that I think we should be able to ship soon!\r\n\r\nAny update on this, it's been several months? Facing the same issue when trying to pass the output of a `BigqueryQueryJobOp` which is of type `google.BQTable` to another component. ", "@sumanassarmathg, support for google-namespaced artifact types was released with [`2.0.0b5`](https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md#200-beta5).", "@connor-mccarthy \r\n\r\n> **Current work-arounds are insufficient** The work-around I have been using is the use of the `importer_node` as mentioned in [this article](https://cloud.google.com/vertex-ai/docs/pipelines/use-components#use_an_importer_node) - while this should intuitively work for loading Artifacts and functionally does the job, it duplicates entries within the ML Metadata store in the VertexAI project.\r\n> \r\n\r\nBy the way, the current work-arounds does not help for the following use case: considering a dataset ID as a parameter of the pipeline (which is the case for the trainings pipelines). \r\n\r\nThe three solutions to import the dataset would be the followings:\r\n\r\n- **Importer**: using the importer operation would need to provide the URI of the source to use\r\n- **Python components:** using a custom python-defined component as mentioned in [this message](https://github.com/kubeflow/pipelines/issues/7861#issuecomment-1152533517) will create duplicates entries in the ML Metadata store and break the lineage\r\n- **YML components:** same issue with the YML-based components\r\n\r\nCurrently, there is no solution for importing a VertexDataset using only the dataset ID. The same issue could occur with the model, that you could not import based on VertexModel ID. \r\n\r\nDo we have any solution regarding this issue? @adhaene-noimos maybe you found work-arounds?\r\n\r\nAnd a more important question is: considering we could take a parameterized VertexDataset as input? What is the good practice? What is the vision of kubeflow and VertexAI on this very important topic?\r\n\r\nIn my mind, lineage is really the core of MLOps and from what I see, I am not the only one who's training a lot of models on VertexAI and want to have one pipeline for all of them... Which means that we should be able to have a dataset as an input of a pipeline (could be a dataset ID or another way)." ]
"2022-06-08T14:02:34"
"2023-01-26T14:11:27"
null
NONE
null
### Feature Area /area sdk /area samples /area components ### What feature would you like to see? A new component to load existing VertexDataset. Related to #7792 ### What is the use case or pain point? As a user, I have one existing dataset in VertexAI. I am doing several experiments with different models. Each of my experiment is represented by a pipeline. When developing a kubeflow pipeline for VertexAI, I would like to be able to load an existing VertexDataset instead of using the dataset creation component. But today, the dataset reading component is not existing so I am not able to do it. ### Is there a workaround currently? Today, i am not able to do the task. I tried the following: ``` @component(base_image="python:3.9", packages_to_install=["google-cloud-aiplatform"]) def get_data( project: str, region: str, bucket: str, dataset: Output[VertexDataset] ): from google.cloud import aiplatform dataset = aiplatform.datasets._Dataset(TEST_ID, project=project, location=region) ``` This one is dropping the following error: `NameError: name 'VertexDataset' is not defined`. --- Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7861/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7861/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7860
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7860/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7860/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7860/events
https://github.com/kubeflow/pipelines/issues/7860
1,264,647,569
I_kwDOB-71UM5LYP2R
7,860
Passing big data between components SDK[V2]
{ "login": "wafaaabdelhafez", "id": 10349656, "node_id": "MDQ6VXNlcjEwMzQ5NjU2", "avatar_url": "https://avatars.githubusercontent.com/u/10349656?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wafaaabdelhafez", "html_url": "https://github.com/wafaaabdelhafez", "followers_url": "https://api.github.com/users/wafaaabdelhafez/followers", "following_url": "https://api.github.com/users/wafaaabdelhafez/following{/other_user}", "gists_url": "https://api.github.com/users/wafaaabdelhafez/gists{/gist_id}", "starred_url": "https://api.github.com/users/wafaaabdelhafez/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wafaaabdelhafez/subscriptions", "organizations_url": "https://api.github.com/users/wafaaabdelhafez/orgs", "repos_url": "https://api.github.com/users/wafaaabdelhafez/repos", "events_url": "https://api.github.com/users/wafaaabdelhafez/events{/privacy}", "received_events_url": "https://api.github.com/users/wafaaabdelhafez/received_events", "type": "User", "site_admin": false }
[ { "id": 1073153908, "node_id": "MDU6TGFiZWwxMDczMTUzOTA4", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/bug", "name": "kind/bug", "color": "fc2515", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" } ]
open
false
{ "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false }
[ { "login": "chensun", "id": 2043310, "node_id": "MDQ6VXNlcjIwNDMzMTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2043310?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chensun", "html_url": "https://github.com/chensun", "followers_url": "https://api.github.com/users/chensun/followers", "following_url": "https://api.github.com/users/chensun/following{/other_user}", "gists_url": "https://api.github.com/users/chensun/gists{/gist_id}", "starred_url": "https://api.github.com/users/chensun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chensun/subscriptions", "organizations_url": "https://api.github.com/users/chensun/orgs", "repos_url": "https://api.github.com/users/chensun/repos", "events_url": "https://api.github.com/users/chensun/events{/privacy}", "received_events_url": "https://api.github.com/users/chensun/received_events", "type": "User", "site_admin": false } ]
null
[ "Are you sure you've included the whole code?\r\n\r\nI'm not sure this can work: `output_path.path`.\r\nYou should replace it with just `output_path`.\r\n\r\nYour second issue is that your 2nd component is getting the texts without `InputPath`.\r\nReplace `docs: List[str]` with `docs_path: InputPath(List[str])`.\r\n\r\nThen it should work.\r\n\r\nUnder the hood, in KFP, the way the upstream output is compiled depends on how it's consumed downstream.\r\nIn general, outputs using `OutputPath` should be passed to inputs using `InputPath`.\r\n" ]
"2022-06-08T12:12:32"
"2022-08-09T22:13:48"
null
NONE
null
### What steps did you take 1. Read text data from file on minio, write it to `Outputpath` to pass it to next step `@component(base_image='wafaaabdelhafez/tensorflow-2') def load_docs(output_path: OutputPath(List[str])): from minio import Minio import json client = Minio() obj = client.get_object("mlpipeline", "dataset/model_docs.txt") for data in obj: content = json.loads(data) with open(output_path.path, 'w') as f: f.write(content)` 2. Read data from the previous task to process it `@component(base_image='wafaaabdelhafez/tensorflow-2') def encode_docs(docs: List[str], output_path: OutputPath(List[str])): import json import tensorflow as tf from keras.preprocessing.text import one_hot vocab_size = 50000 encoded_docs = [] for d in docs: res = one_hot(d, vocab_size) encoded_docs.append(res) with open(output_path, 'w') as f: f.write(json.dumps(encoded_docs))` <!-- A clear and concise description of what the bug is.--> I got an error in the first step (reading data and write to outputpath) is it's exceed limits , data too long ### What happened: cannot write data to `Outputhpath ` ### What did you expect to happen: can read and write big amount of data and pass it through steps of pipeline link in V1.x ### Environment: Kubeflow 1.4 python 3.7 working on kubeflow notebook <!-- Please fill in those that seem relevant. --> * How do you deploy Kubeflow Pipelines (KFP)? <!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. --> * KFP version: <!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface. To find the version number, See version number shows on bottom of KFP UI left sidenav. --> * KFP SDK version: <!-- Specify the output of the following shell command: $pip list | grep kfp --> ### Anything else you would like to add: <!-- Miscellaneous information that will assist in solving the issue.--> ### Labels <!-- Please include labels below by uncommenting them to help us better triage issues --> <!-- /area frontend --> <!-- /area backend --> /area sdk <!-- /area testing --> <!-- /area samples --> <!-- /area components --> --- <!-- Don't delete message below to encourage users to support your issue! --> Impacted by this bug? Give it a πŸ‘. We prioritise the issues with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7860/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7860/timeline
null
null
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7859
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7859/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7859/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7859/events
https://github.com/kubeflow/pipelines/issues/7859
1,264,512,907
I_kwDOB-71UM5LXu-L
7,859
Deploy a new version of a model
{ "login": "clausagerskov", "id": 13769591, "node_id": "MDQ6VXNlcjEzNzY5NTkx", "avatar_url": "https://avatars.githubusercontent.com/u/13769591?v=4", "gravatar_id": "", "url": "https://api.github.com/users/clausagerskov", "html_url": "https://github.com/clausagerskov", "followers_url": "https://api.github.com/users/clausagerskov/followers", "following_url": "https://api.github.com/users/clausagerskov/following{/other_user}", "gists_url": "https://api.github.com/users/clausagerskov/gists{/gist_id}", "starred_url": "https://api.github.com/users/clausagerskov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/clausagerskov/subscriptions", "organizations_url": "https://api.github.com/users/clausagerskov/orgs", "repos_url": "https://api.github.com/users/clausagerskov/repos", "events_url": "https://api.github.com/users/clausagerskov/events{/privacy}", "received_events_url": "https://api.github.com/users/clausagerskov/received_events", "type": "User", "site_admin": false }
[ { "id": 1118896905, "node_id": "MDU6TGFiZWwxMTE4ODk2OTA1", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/backend", "name": "area/backend", "color": "d2b48c", "default": false, "description": "" }, { "id": 1126834402, "node_id": "MDU6TGFiZWwxMTI2ODM0NDAy", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/components", "name": "area/components", "color": "d2b48c", "default": false, "description": "" }, { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1260031624, "node_id": "MDU6TGFiZWwxMjYwMDMxNjI0", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/samples", "name": "area/samples", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
closed
false
null
[]
null
[ "Hello @clausagerskov \r\nKFP SDK doesn't have client to talk to Vertex AI service. Would you mind creating another issue under this repo: https://github.com/googleapis/python-aiplatform ? Thank you." ]
"2022-06-08T10:12:42"
"2022-06-16T22:48:04"
"2022-06-16T22:48:04"
NONE
null
### Feature Area As per this issue in the vertex-ai-samples repo, https://github.com/GoogleCloudPlatform/vertex-ai-samples/issues/585 Vertex AI has model versions numbers that increase when manually importing models with the GUI. Is updating models to higher versions supported when doing CI/CD using kubeflow pipelines for GCP Vertex AI? /area backend /area sdk /area samples /area components <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7859/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7859/timeline
null
completed
null
null
false
https://api.github.com/repos/kubeflow/pipelines/issues/7858
https://api.github.com/repos/kubeflow/pipelines
https://api.github.com/repos/kubeflow/pipelines/issues/7858/labels{/name}
https://api.github.com/repos/kubeflow/pipelines/issues/7858/comments
https://api.github.com/repos/kubeflow/pipelines/issues/7858/events
https://github.com/kubeflow/pipelines/issues/7858
1,264,490,612
I_kwDOB-71UM5LXph0
7,858
KFP SDK should support other cloud platforms other than GCP for storing image artifacts built via "build_image_from_working_dir"
{ "login": "sujaykulkarn", "id": 31735981, "node_id": "MDQ6VXNlcjMxNzM1OTgx", "avatar_url": "https://avatars.githubusercontent.com/u/31735981?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sujaykulkarn", "html_url": "https://github.com/sujaykulkarn", "followers_url": "https://api.github.com/users/sujaykulkarn/followers", "following_url": "https://api.github.com/users/sujaykulkarn/following{/other_user}", "gists_url": "https://api.github.com/users/sujaykulkarn/gists{/gist_id}", "starred_url": "https://api.github.com/users/sujaykulkarn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sujaykulkarn/subscriptions", "organizations_url": "https://api.github.com/users/sujaykulkarn/orgs", "repos_url": "https://api.github.com/users/sujaykulkarn/repos", "events_url": "https://api.github.com/users/sujaykulkarn/events{/privacy}", "received_events_url": "https://api.github.com/users/sujaykulkarn/received_events", "type": "User", "site_admin": false }
[ { "id": 1136110037, "node_id": "MDU6TGFiZWwxMTM2MTEwMDM3", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/area/sdk", "name": "area/sdk", "color": "d2b48c", "default": false, "description": "" }, { "id": 1289588140, "node_id": "MDU6TGFiZWwxMjg5NTg4MTQw", "url": "https://api.github.com/repos/kubeflow/pipelines/labels/kind/feature", "name": "kind/feature", "color": "2515fc", "default": false, "description": "" } ]
open
false
null
[]
null
[ "`build_image_from_working_dir` is deprecated, you can use the following KFP CLI to build component for KFPv2:\r\n\r\n```\r\nkfp component build --help\r\n```\r\n\r\ncc @chensun @connor-mccarthy " ]
"2022-06-08T09:54:27"
"2022-06-23T23:00:45"
null
NONE
null
### Feature Area <!-- Uncomment the labels below which are relevant to this feature: --> <!-- /area frontend --> <!-- /area backend --> /area sdk <!-- /area samples --> <!-- /area components --> ### What feature would you like to see? <!-- Provide a description of this feature and the user experience. --> ### What is the use case or pain point? Currently, KFP SDK in order to work with containers codebase uses GCP to store temporary files. But our need is to store the same in other cloud providers or on-prem cluster, Please help us in a way to achieve these. If its a new feature request then please help us understand the code base to modify the code accordingly SDK Code trying- `build_image_from_working_dir(working_dir=".", base_image="pytorch/pytorch:1.0-cuda10.0-cudnn7-runtime", image_name="mnistpytorch:proxy")` ERROR log - `ValueError: Cannot get the Google Cloud project ID, please specify the gcs_staging argument.` Kubeflow(v.1.4.0) - On-prem setup KFP - 1.8.1 ### Is there a workaround currently? - No --- <!-- Don't delete message below to encourage users to support your feature request! --> Love this idea? Give it a πŸ‘. We prioritize fulfilling features with the most πŸ‘.
{ "url": "https://api.github.com/repos/kubeflow/pipelines/issues/7858/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/kubeflow/pipelines/issues/7858/timeline
null
null
null
null
false