File size: 5,441 Bytes
4d7de12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from uuid import uuid1

import pytest

from aidisdk import AIDIClient
from aidisdk.algo_house.algorithm_module import AlgoConfig, AlgoFieldEnum
from aidisdk.compute.job_abstract import (
    JobType,
    RunningResourceConfig,
    StartUpConfig,
)
from aidisdk.compute.package_abstract import (
    CodePackageConfig,
    LocalPackageItem,
)
from aidisdk.model import ModelFramework


@pytest.mark.skip("unused")
def test_create_algo_for_eval_detection3d(unittest_client):
    client: AIDIClient = unittest_client
    # create a algorithm with a raw config file

    algorithm = client.algo_house.create(
        algo_name="eval_for_detection3d_" + str(uuid1()).replace("-", "_"),
        field=AlgoFieldEnum.AUTO,
        scene="高速",
        module="感知",
        task_types=["2D检测"],
        framework=ModelFramework.pytorch,
        startup="cd ${WORKING_PATH} && python3 local_example.py ",  # noqa
        code_package="test/test_data/eval_experiment",
        docker_image="docker.hobot.cc/auto/eval-traincli:v1.0.36test",
        desc="算法仓库发起评测使用,请勿删除.",
        tags=["test", "unittest"],
        config_files=[
            AlgoConfig(
                name="eval_setting",
                local_path="test/test_data/"
                + "eval_experiment/setting_example.yaml",
            ),
        ],
    )
    client.algo_house.__delete__(algorithm.algo_id)


@pytest.mark.skip("unused")
def test_update_algo(unittest_client):
    client: AIDIClient = unittest_client
    algo_name = "eval_for_Semantic_Segmentation"
    algorithm = client.algo_house.update(
        algo_name=algo_name,
        field=AlgoFieldEnum.AUTO,
        scene="高速",
        module="感知",
        task_types=["2D检测"],
        framework=ModelFramework.pytorch,
        startup="python3 local_example.py --task_type ${TASK_TYPE} "
        + "--endpoint"
        + " 'http://aidi-test.hobot.cc' "
        + "--group_name ${GROUP_NAME} "
        + "--experiment_name ${EXPERIMENT_NAME} --run_name '${RUN_NAME}' "
        + "--gt_dataset_id "
        + "'${GT_DATASET_ID}' "
        + "--images_dataset_id "  # detection 3d & 分割
        + "'${IMAGES_DATASET_ID}' "
        + "--prediction_name '${PREDICTION_NAME}' "
        + "--predictions_dataset_id '${PREDICTIONS_DATASET_ID}' "  # 分割
        + "--labels_dataset_id '${LABELS_DATASET_ID}' "  # 分割
        + "--setting_file_name ${EVAL_SETTING}",


        code_package="test/test_data/eval_experiment",
        docker_image="docker.hobot.cc/auto/eval-traincli:v1.0.36test",
        desc="算法仓库发起评测使用,请勿删除.",
        tags=["test", "unittest"],
        config_files=[
            AlgoConfig(
                name="eval_setting",
                local_path="test/test_data/eval_experiment/wk_setting.yaml",  # 分割
                # local_path="test/test_data/eval_experiment/setting_example.yaml",
                placeholder="${EVAL_SETTING}",
            ),
        ],
    )
    print(algorithm)


# @pytest.mark.skip("unused")
def test_create_eval_task_env_test(unittest_client):
    client: AIDIClient = unittest_client
    # algo_name = "eval_for_detection3d"
    algo_name = "eval_for_Semantic_Segmentation"
    algo = client.algo_house.get(
        algo_name=algo_name, download_config=True, download_package=True
    )
    # experiment group name + experiment name + prediction in experiment
    # 参数会替换cmd命令中的占位符,默认cmd_args_dict的key大写为占位符,如 task_type -> ${TASK_TYPE}
    cmd_args_dict = {
        "task_type": "Semantic_Segmentation",
        "predictions_dataset_id": "dataset://25616",
        "labels_dataset_id": "dataset://25615",

        # "gt_dataset": "dataset://25575",
        "gt_dataset": "",
        "images_dataset_id": "dataset://25613",
        "group_name": "train-withBN",
        "experiment_name": "wjx_test_095",
        "run_name": "test_run_name_wjx_003",
        # "prediction_name": "wjx_test_023/prediction.json",
        "prediction_name": "",
    }

    config_files = [
        AlgoConfig(
            name="eval_setting",
            local_path="test/test_data/eval_experiment/wk_setting.yaml",  # 分割
            # local_path="test/test_data/eval_experiment/setting_example.yaml",
            placeholder="${EVAL_SETTING}",
        ),
    ]

    algo.update_config(config_files)
    algo.update_cmd(cmd_args_dict)

    # TODO gen job obj
    # job = algo.gen_job()
    cpu_count = 6
    cpu_mem_ratio = 6
    queue = "svc-aip-cpu"
    project = "PD20210425"
    job = client.single_job.create(
        job_name="eval_from_algo_%s_%s"
        % (algo.name, str(uuid1()).replace("-", "_")),
        job_type=JobType.APP_EVAL,
        ipd_number=project,
        queue_name=queue,
        running_resource=RunningResourceConfig(
            docker_image=algo.docker_image,
            instance=1,
            cpu=cpu_count,
            gpu=0,
            cpu_mem_ratio=cpu_mem_ratio,
        ),
        mount=[],
        startup=StartUpConfig(
            command=algo.startup_command,  # noqa
        ),
        code_package=CodePackageConfig(
            raw_package=LocalPackageItem(
                lpath=algo.package_path,
                encrypt_passwd="12345",
                follow_softlink=True,
            ).set_as_startup_dir(),
        ),
        # subscribers=["dan.song", "shulan.shen"],
    )
    print(job)